From 91a05c0709af688f73e3ba28620af2c1ab6d78f4 Mon Sep 17 00:00:00 2001 From: Jean-Francois Chevrette Date: Fri, 17 Jul 2015 08:31:16 -0400 Subject: [PATCH 001/101] source cluster env.sh on kube-up/down/push --- .gitignore | 3 +++ cluster/kube-down.sh | 5 +++++ cluster/kube-push.sh | 5 +++++ cluster/kube-up.sh | 5 +++++ 4 files changed, 18 insertions(+) diff --git a/.gitignore b/.gitignore index beea0662db4..857638fd6d1 100644 --- a/.gitignore +++ b/.gitignore @@ -42,6 +42,9 @@ Session.vim .vagrant network_closure.sh +# Local cluster env variables +/cluster/env.sh + # Compiled binaries in third_party /third_party/pkg diff --git a/cluster/kube-down.sh b/cluster/kube-down.sh index 3c1bc9b480b..75864024731 100755 --- a/cluster/kube-down.sh +++ b/cluster/kube-down.sh @@ -21,6 +21,11 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then + source "${KUBE_ROOT}/cluster/env.sh" +fi + source "${KUBE_ROOT}/cluster/kube-env.sh" source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" diff --git a/cluster/kube-push.sh b/cluster/kube-push.sh index 522e9cb8bd0..192f71c7592 100755 --- a/cluster/kube-push.sh +++ b/cluster/kube-push.sh @@ -24,6 +24,11 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then + source "${KUBE_ROOT}/cluster/env.sh" +fi + source "${KUBE_ROOT}/cluster/kube-env.sh" source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" diff --git a/cluster/kube-up.sh b/cluster/kube-up.sh index 326a38486a9..0701a2b3b22 100755 --- a/cluster/kube-up.sh +++ b/cluster/kube-up.sh @@ -25,6 +25,11 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +if [ -f "${KUBE_ROOT}/cluster/env.sh" ]; then + source "${KUBE_ROOT}/cluster/env.sh" +fi + source "${KUBE_ROOT}/cluster/kube-env.sh" source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" From 3a6701c85f5878141ac9bf1fbd7034031feea979 Mon Sep 17 00:00:00 2001 From: Jimmi Dyson Date: Wed, 26 Aug 2015 13:22:25 +0100 Subject: [PATCH 002/101] Ensure container metrics are registered with prometheus even without cadvisor http server Fixes #13200 --- pkg/kubelet/cadvisor/cadvisor_linux.go | 48 ++++++++++++++------------ 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/pkg/kubelet/cadvisor/cadvisor_linux.go b/pkg/kubelet/cadvisor/cadvisor_linux.go index 649998031cc..ea8d50f1d99 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -64,12 +64,9 @@ func New(port uint) (Interface, error) { Manager: m, } - // Export the HTTP endpoint if a port was specified. - if port > 0 { - err = cadvisorClient.exportHTTP(port) - if err != nil { - return nil, err - } + err = cadvisorClient.exportHTTP(port) + if err != nil { + return nil, err } return cadvisorClient, nil } @@ -79,30 +76,35 @@ func (cc *cadvisorClient) Start() error { } func (cc *cadvisorClient) exportHTTP(port uint) error { + // Register the handlers regardless as this registers the prometheus + // collector properly. mux := http.NewServeMux() err := cadvisorHttp.RegisterHandlers(mux, cc, "", "", "", "", "/metrics") if err != nil { return err } - serv := &http.Server{ - Addr: fmt.Sprintf(":%d", port), - Handler: mux, - } - - // TODO(vmarmol): Remove this when the cAdvisor port is once again free. - // If export failed, retry in the background until we are able to bind. - // This allows an existing cAdvisor to be killed before this one registers. - go func() { - defer util.HandleCrash() - - err := serv.ListenAndServe() - for err != nil { - glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err) - time.Sleep(time.Minute) - err = serv.ListenAndServe() + // Only start the http server if port > 0 + if port > 0 { + serv := &http.Server{ + Addr: fmt.Sprintf(":%d", port), + Handler: mux, } - }() + + // TODO(vmarmol): Remove this when the cAdvisor port is once again free. + // If export failed, retry in the background until we are able to bind. + // This allows an existing cAdvisor to be killed before this one registers. + go func() { + defer util.HandleCrash() + + err := serv.ListenAndServe() + for err != nil { + glog.Infof("Failed to register cAdvisor on port %d, retrying. Error: %v", port, err) + time.Sleep(time.Minute) + err = serv.ListenAndServe() + } + }() + } return nil } From 0b6030f50ce54d5caa8f131880ef8fa2d5df8001 Mon Sep 17 00:00:00 2001 From: markturansky Date: Mon, 13 Jul 2015 15:10:04 -0400 Subject: [PATCH 003/101] added better matching for PV access modes --- pkg/api/helpers.go | 56 ++++++++ pkg/api/helpers_test.go | 33 +++++ .../persistentvolume_index_test.go | 130 ++++++++++++++++++ pkg/controller/persistentvolume/types.go | 130 +++++++++++++++--- pkg/kubectl/describe.go | 5 +- pkg/kubectl/resource_printer.go | 5 +- pkg/volume/util.go | 32 ----- 7 files changed, 337 insertions(+), 54 deletions(-) diff --git a/pkg/api/helpers.go b/pkg/api/helpers.go index e79e6e202ed..099a8d81135 100644 --- a/pkg/api/helpers.go +++ b/pkg/api/helpers.go @@ -20,6 +20,7 @@ import ( "crypto/md5" "fmt" "reflect" + "strings" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/conversion" @@ -179,3 +180,58 @@ func LoadBalancerStatusDeepCopy(lb *LoadBalancerStatus) *LoadBalancerStatus { } return c } + +// GetAccessModesAsString returns a string representation of an array of access modes. +// modes, when present, are always in the same order: RWO,ROX,RWX. +func GetAccessModesAsString(modes []PersistentVolumeAccessMode) string { + modes = removeDuplicateAccessModes(modes) + modesStr := []string{} + if containsAccessMode(modes, ReadWriteOnce) { + modesStr = append(modesStr, "RWO") + } + if containsAccessMode(modes, ReadOnlyMany) { + modesStr = append(modesStr, "ROX") + } + if containsAccessMode(modes, ReadWriteMany) { + modesStr = append(modesStr, "RWX") + } + return strings.Join(modesStr, ",") +} + +// GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString +func GetAccessModesFromString(modes string) []PersistentVolumeAccessMode { + strmodes := strings.Split(modes, ",") + accessModes := []PersistentVolumeAccessMode{} + for _, s := range strmodes { + s = strings.Trim(s, " ") + switch { + case s == "RWO": + accessModes = append(accessModes, ReadWriteOnce) + case s == "ROX": + accessModes = append(accessModes, ReadOnlyMany) + case s == "RWX": + accessModes = append(accessModes, ReadWriteMany) + } + } + return accessModes +} + +// removeDuplicateAccessModes returns an array of access modes without any duplicates +func removeDuplicateAccessModes(modes []PersistentVolumeAccessMode) []PersistentVolumeAccessMode { + accessModes := []PersistentVolumeAccessMode{} + for _, m := range modes { + if !containsAccessMode(accessModes, m) { + accessModes = append(accessModes, m) + } + } + return accessModes +} + +func containsAccessMode(modes []PersistentVolumeAccessMode, mode PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} diff --git a/pkg/api/helpers_test.go b/pkg/api/helpers_test.go index 4b4bc4f1a86..6cbe4726939 100644 --- a/pkg/api/helpers_test.go +++ b/pkg/api/helpers_test.go @@ -142,3 +142,36 @@ func TestAddToNodeAddresses(t *testing.T) { } } } + +func TestGetAccessModesFromString(t *testing.T) { + modes := GetAccessModesFromString("ROX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + + modes = GetAccessModesFromString("ROX,RWX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + if !containsAccessMode(modes, ReadWriteMany) { + t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes) + } + + modes = GetAccessModesFromString("RWO,ROX,RWX") + if !containsAccessMode(modes, ReadOnlyMany) { + t.Errorf("Expected mode %s, but got %+v", ReadOnlyMany, modes) + } + if !containsAccessMode(modes, ReadWriteMany) { + t.Errorf("Expected mode %s, but got %+v", ReadWriteMany, modes) + } +} + +func TestRemoveDuplicateAccessModes(t *testing.T) { + modes := []PersistentVolumeAccessMode{ + ReadWriteOnce, ReadOnlyMany, ReadOnlyMany, ReadOnlyMany, + } + modes = removeDuplicateAccessModes(modes) + if len(modes) != 2 { + t.Errorf("Expected 2 distinct modes in set but found %v", len(modes)) + } +} diff --git a/pkg/controller/persistentvolume/persistentvolume_index_test.go b/pkg/controller/persistentvolume/persistentvolume_index_test.go index 1c7d72da1f6..143c74a81e3 100644 --- a/pkg/controller/persistentvolume/persistentvolume_index_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_index_test.go @@ -216,6 +216,136 @@ func TestSort(t *testing.T) { } } +func TestAllPossibleAccessModes(t *testing.T) { + index := NewPersistentVolumeOrderedIndex() + for _, pv := range createTestVolumes() { + index.Add(pv) + } + + // the mock PVs creates contain 2 types of accessmodes: RWO+ROX and RWO+ROW+RWX + possibleModes := index.allPossibleMatchingAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce}) + if len(possibleModes) != 2 { + t.Errorf("Expected 2 arrays of modes that match RWO, but got %v", len(possibleModes)) + } + for _, m := range possibleModes { + if !contains(m, api.ReadWriteOnce) { + t.Errorf("AccessModes does not contain %s", api.ReadWriteOnce) + } + } + + possibleModes = index.allPossibleMatchingAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteMany}) + if len(possibleModes) != 1 { + t.Errorf("Expected 1 array of modes that match RWX, but got %v", len(possibleModes)) + } + if !contains(possibleModes[0], api.ReadWriteMany) { + t.Errorf("AccessModes does not contain %s", api.ReadWriteOnce) + } + +} + +func TestFindingVolumeWithDifferentAccessModes(t *testing.T) { + gce := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{UID: "001", Name: "gce"}, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, + PersistentVolumeSource: api.PersistentVolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}}, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + }, + }, + } + + ebs := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{UID: "002", Name: "ebs"}, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, + PersistentVolumeSource: api.PersistentVolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}}, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + }, + }, + } + + nfs := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{UID: "003", Name: "nfs"}, + Spec: api.PersistentVolumeSpec{ + Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")}, + PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{}}, + AccessModes: []api.PersistentVolumeAccessMode{ + api.ReadWriteOnce, + api.ReadOnlyMany, + api.ReadWriteMany, + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claim01", + Namespace: "myns", + }, + Spec: api.PersistentVolumeClaimSpec{ + AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce}, + Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1G")}}, + }, + } + + index := NewPersistentVolumeOrderedIndex() + index.Add(gce) + index.Add(ebs) + index.Add(nfs) + + volume, _ := index.FindBestMatchForClaim(claim) + if volume.Name != ebs.Name { + t.Errorf("Expected %s but got volume %s instead", ebs.Name, volume.Name) + } + + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany} + volume, _ = index.FindBestMatchForClaim(claim) + if volume.Name != gce.Name { + t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name) + } + + // order of the requested modes should not matter + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteMany, api.ReadWriteOnce, api.ReadOnlyMany} + volume, _ = index.FindBestMatchForClaim(claim) + if volume.Name != nfs.Name { + t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name) + } + + // fewer modes requested should still match + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteMany} + volume, _ = index.FindBestMatchForClaim(claim) + if volume.Name != nfs.Name { + t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name) + } + + // pretend the exact match is bound. should get the next level up of modes. + ebs.Spec.ClaimRef = &api.ObjectReference{} + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce} + volume, _ = index.FindBestMatchForClaim(claim) + if volume.Name != gce.Name { + t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name) + } + + // continue up the levels of modes. + gce.Spec.ClaimRef = &api.ObjectReference{} + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce} + volume, _ = index.FindBestMatchForClaim(claim) + if volume.Name != nfs.Name { + t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name) + } + + // partial mode request + gce.Spec.ClaimRef = nil + claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadOnlyMany} + volume, _ = index.FindBestMatchForClaim(claim) + if volume.Name != gce.Name { + t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name) + } +} + func createTestVolumes() []*api.PersistentVolume { // these volumes are deliberately out-of-order to test indexing and sorting return []*api.PersistentVolume{ diff --git a/pkg/controller/persistentvolume/types.go b/pkg/controller/persistentvolume/types.go index 4e29a88674c..bb0e094f766 100644 --- a/pkg/controller/persistentvolume/types.go +++ b/pkg/controller/persistentvolume/types.go @@ -23,7 +23,6 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/volume" ) // persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes indexed by AccessModes and ordered by storage capacity. @@ -42,7 +41,7 @@ func NewPersistentVolumeOrderedIndex() *persistentVolumeOrderedIndex { // accessModesIndexFunc is an indexing function that returns a persistent volume's AccessModes as a string func accessModesIndexFunc(obj interface{}) ([]string, error) { if pv, ok := obj.(*api.PersistentVolume); ok { - modes := volume.GetAccessModesAsString(pv.Spec.AccessModes) + modes := api.GetAccessModesAsString(pv.Spec.AccessModes) return []string{modes}, nil } return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj) @@ -75,23 +74,38 @@ type matchPredicate func(compareThis, toThis *api.PersistentVolume) bool // Find returns the nearest PV from the ordered list or nil if a match is not found func (pvIndex *persistentVolumeOrderedIndex) Find(pv *api.PersistentVolume, matchPredicate matchPredicate) (*api.PersistentVolume, error) { - volumes, err := pvIndex.ListByAccessModes(pv.Spec.AccessModes) - if err != nil { - return nil, err - } + // the 'pv' argument is a synthetic PV with capacity and accessmodes set according to the user's PersistentVolumeClaim. + // the synthetic pv arg is, therefore, a request for a storage resource. + // + // PVs are indexed by their access modes to allow easier searching. Each index is the string representation of a set of access modes. + // There is a finite number of possible sets and PVs will only be indexed in one of them (whichever index matches the PV's modes). + // + // A request for resources will always specify its desired access modes. Any matching PV must have at least that number + // of access modes, but it can have more. For example, a user asks for ReadWriteOnce but a GCEPD is available, which is ReadWriteOnce+ReadOnlyMany. + // + // Searches are performed against a set of access modes, so we can attempt not only the exact matching modes but also + // potential matches (the GCEPD example above). + allPossibleModes := pvIndex.allPossibleMatchingAccessModes(pv.Spec.AccessModes) - // volumes are sorted by size but some may be bound. - // remove bound volumes for easy binary search by size - unboundVolumes := []*api.PersistentVolume{} - for _, v := range volumes { - if v.Spec.ClaimRef == nil { - unboundVolumes = append(unboundVolumes, v) + for _, modes := range allPossibleModes { + volumes, err := pvIndex.ListByAccessModes(modes) + if err != nil { + return nil, err } - } - i := sort.Search(len(unboundVolumes), func(i int) bool { return matchPredicate(pv, unboundVolumes[i]) }) - if i < len(unboundVolumes) { - return unboundVolumes[i], nil + // volumes are sorted by size but some may be bound. + // remove bound volumes for easy binary search by size + unboundVolumes := []*api.PersistentVolume{} + for _, v := range volumes { + if v.Spec.ClaimRef == nil { + unboundVolumes = append(unboundVolumes, v) + } + } + + i := sort.Search(len(unboundVolumes), func(i int) bool { return matchPredicate(pv, unboundVolumes[i]) }) + if i < len(unboundVolumes) { + return unboundVolumes[i], nil + } } return nil, nil } @@ -139,3 +153,87 @@ func matchStorageCapacity(pvA, pvB *api.PersistentVolume) bool { bSize := bQty.Value() return aSize <= bSize } + +// allPossibleMatchingAccessModes returns an array of AccessMode arrays that can satisfy a user's requested modes. +// +// see comments in the Find func above regarding indexing. +// +// allPossibleMatchingAccessModes gets all stringified accessmodes from the index and returns all those that +// contain at least all of the requested mode. +// +// For example, assume the index contains 2 types of PVs where the stringified accessmodes are: +// +// "RWO,ROX" -- some number of GCEPDs +// "RWO,ROX,RWX" -- some number of NFS volumes +// +// A request for RWO could be satisfied by both sets of indexed volumes, so allPossibleMatchingAccessModes returns: +// +// [][]api.PersistentVolumeAccessMode { +// []api.PersistentVolumeAccessMode { +// api.ReadWriteOnce, api.ReadOnlyMany, +// }, +// []api.PersistentVolumeAccessMode { +// api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany, +// }, +// } +// +// A request for RWX can be satisfied by only one set of indexed volumes, so the return is: +// +// [][]api.PersistentVolumeAccessMode { +// []api.PersistentVolumeAccessMode { +// api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany, +// }, +// } +// +// This func returns modes with ascending levels of modes to give the user what is closest to what they actually asked for. +// +func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []api.PersistentVolumeAccessMode) [][]api.PersistentVolumeAccessMode { + matchedModes := [][]api.PersistentVolumeAccessMode{} + keys := pvIndex.Indexer.ListIndexFuncValues("accessmodes") + for _, key := range keys { + indexedModes := api.GetAccessModesFromString(key) + if containedInAll(indexedModes, requestedModes) { + matchedModes = append(matchedModes, indexedModes) + } + } + + // sort by the number of modes in each array with the fewest number of modes coming first. + // this allows searching for volumes by the minimum number of modes required of the possible matches. + sort.Sort(byAccessModes{matchedModes}) + return matchedModes +} + +func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool { + for _, m := range modes { + if m == mode { + return true + } + } + return false +} + +func containedInAll(indexedModes []api.PersistentVolumeAccessMode, requestedModes []api.PersistentVolumeAccessMode) bool { + for _, mode := range requestedModes { + if !contains(indexedModes, mode) { + return false + } + } + return true +} + +// byAccessModes is used to order access modes by size, with the fewest modes first +type byAccessModes struct { + modes [][]api.PersistentVolumeAccessMode +} + +func (c byAccessModes) Less(i, j int) bool { + return len(c.modes[i]) < len(c.modes[j]) +} + +func (c byAccessModes) Swap(i, j int) { + c.modes[i], c.modes[j] = c.modes[j], c.modes[i] +} + +func (c byAccessModes) Len() int { + return len(c.modes) +} diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 55c7964714e..7d3756c066b 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -34,7 +34,6 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" - "k8s.io/kubernetes/pkg/volume" ) // Describer generates output for the named resource or an error @@ -599,7 +598,7 @@ func (d *PersistentVolumeDescriber) Describe(namespace, name string) (string, er fmt.Fprintf(out, "Claim:\t%s\n", "") } fmt.Fprintf(out, "Reclaim Policy:\t%v\n", pv.Spec.PersistentVolumeReclaimPolicy) - fmt.Fprintf(out, "Access Modes:\t%s\n", volume.GetAccessModesAsString(pv.Spec.AccessModes)) + fmt.Fprintf(out, "Access Modes:\t%s\n", api.GetAccessModesAsString(pv.Spec.AccessModes)) fmt.Fprintf(out, "Capacity:\t%s\n", storage.String()) fmt.Fprintf(out, "Message:\t%s\n", pv.Status.Message) fmt.Fprintf(out, "Source:\n") @@ -642,7 +641,7 @@ func (d *PersistentVolumeClaimDescriber) Describe(namespace, name string) (strin capacity := "" accessModes := "" if pvc.Spec.VolumeName != "" { - accessModes = volume.GetAccessModesAsString(pvc.Status.AccessModes) + accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) storage = pvc.Status.Capacity[api.ResourceStorage] capacity = storage.String() } diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 3d649a6efc1..f20efc31691 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -40,7 +40,6 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/jsonpath" - "k8s.io/kubernetes/pkg/volume" ) // GetPrinter takes a format type, an optional format argument. It will return true @@ -872,7 +871,7 @@ func printPersistentVolume(pv *api.PersistentVolume, w io.Writer, withNamespace claimRefUID += pv.Spec.ClaimRef.Name } - modesStr := volume.GetAccessModesAsString(pv.Spec.AccessModes) + modesStr := api.GetAccessModesAsString(pv.Spec.AccessModes) aQty := pv.Spec.Capacity[api.ResourceStorage] aSize := aQty.String() @@ -917,7 +916,7 @@ func printPersistentVolumeClaim(pvc *api.PersistentVolumeClaim, w io.Writer, wit capacity := "" accessModes := "" if pvc.Spec.VolumeName != "" { - accessModes = volume.GetAccessModesAsString(pvc.Status.AccessModes) + accessModes = api.GetAccessModesAsString(pvc.Status.AccessModes) storage = pvc.Status.Capacity[api.ResourceStorage] capacity = storage.String() } diff --git a/pkg/volume/util.go b/pkg/volume/util.go index d350374f9f4..c8336496af8 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -31,38 +31,6 @@ import ( "github.com/golang/glog" ) -func GetAccessModesAsString(modes []api.PersistentVolumeAccessMode) string { - modesAsString := "" - - if contains(modes, api.ReadWriteOnce) { - appendAccessMode(&modesAsString, "RWO") - } - if contains(modes, api.ReadOnlyMany) { - appendAccessMode(&modesAsString, "ROX") - } - if contains(modes, api.ReadWriteMany) { - appendAccessMode(&modesAsString, "RWX") - } - - return modesAsString -} - -func appendAccessMode(modes *string, mode string) { - if *modes != "" { - *modes += "," - } - *modes += mode -} - -func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool { - for _, m := range modes { - if m == mode { - return true - } - } - return false -} - // ScrubPodVolumeAndWatchUntilCompletion is intended for use with volume Recyclers. This function will // save the given Pod to the API and watch it until it completes, fails, or the pod's ActiveDeadlineSeconds is exceeded, whichever comes first. // An attempt to delete a scrubber pod is always attempted before returning. From bcdf89a9091ccc3cf30234f3f28e0f906af2dd01 Mon Sep 17 00:00:00 2001 From: Jan Safranek Date: Thu, 27 Aug 2015 17:50:05 +0200 Subject: [PATCH 004/101] Rebase the gluster image to CentOS. Using the same base for NFS and Gluster images should reduce the download size. Also, CentOS image is ~30MB smaller than Ubuntu 14.04. --- test/images/volumes-tester/gluster/Dockerfile | 6 +-- test/images/volumes-tester/gluster/Makefile | 2 +- .../volumes-tester/gluster/gluster.repo | 50 +++++++++++++++++++ 3 files changed, 54 insertions(+), 4 deletions(-) create mode 100644 test/images/volumes-tester/gluster/gluster.repo diff --git a/test/images/volumes-tester/gluster/Dockerfile b/test/images/volumes-tester/gluster/Dockerfile index 6ab22c27a8c..fa3f0d332f5 100644 --- a/test/images/volumes-tester/gluster/Dockerfile +++ b/test/images/volumes-tester/gluster/Dockerfile @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM ubuntu:14.04 +FROM centos MAINTAINER Jan Safranek, jsafrane@redhat.com -ENV DEBIAN_FRONTEND noninteractive -RUN apt-get update -qq && apt-get install -y glusterfs-server -qq +ADD gluster.repo /etc/yum.repos.d/ +RUN yum -y install hostname glusterfs-server && yum clean all ADD glusterd.vol /etc/glusterfs/ ADD run_gluster.sh /usr/local/bin/ ADD index.html /vol/ diff --git a/test/images/volumes-tester/gluster/Makefile b/test/images/volumes-tester/gluster/Makefile index 5c476af6b9b..b52ce19dece 100644 --- a/test/images/volumes-tester/gluster/Makefile +++ b/test/images/volumes-tester/gluster/Makefile @@ -1,6 +1,6 @@ all: push -TAG = 0.1 +TAG = 0.2 container: docker build -t gcr.io/google_containers/volume-gluster . # Build new image and automatically tag it as latest diff --git a/test/images/volumes-tester/gluster/gluster.repo b/test/images/volumes-tester/gluster/gluster.repo new file mode 100644 index 00000000000..08e85903851 --- /dev/null +++ b/test/images/volumes-tester/gluster/gluster.repo @@ -0,0 +1,50 @@ +[epel] +name=Extra Packages for Enterprise Linux 7 - $basearch +#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch +failovermethod=priority +enabled=1 +gpgcheck=1 +gpgkey=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 + +[epel-debuginfo] +name=Extra Packages for Enterprise Linux 7 - $basearch - Debug +#baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch +failovermethod=priority +enabled=0 +gpgcheck=1 +gpgkey=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 + +[epel-source] +name=Extra Packages for Enterprise Linux 7 - $basearch - Source +#baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS +mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch +failovermethod=priority +enabled=0 +gpgcheck=1 +gpgkey=https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 + +[glusterfs-epel] +name=GlusterFS is a clustered file-system capable of scaling to several petabytes. +baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/$basearch/ +enabled=1 +skip_if_unavailable=1 +gpgcheck=1 +gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key + +[glusterfs-noarch-epel] +name=GlusterFS is a clustered file-system capable of scaling to several petabytes. +baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/noarch +enabled=1 +skip_if_unavailable=1 +gpgcheck=1 +gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key + +[glusterfs-source-epel] +name=GlusterFS is a clustered file-system capable of scaling to several petabytes. - Source +baseurl=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/epel-$releasever/SRPMS +enabled=0 +skip_if_unavailable=1 +gpgcheck=1 +gpgkey=http://download.gluster.org/pub/gluster/glusterfs/LATEST/EPEL.repo/pub.key From 2145371c45369f178eca1f9a5a8a6c59806947fe Mon Sep 17 00:00:00 2001 From: "Timothy St. Clair" Date: Fri, 28 Aug 2015 10:10:05 -0500 Subject: [PATCH 005/101] Plumb through configuration option to disable watch cache because we are seeing anomolies on our cluster. --- cmd/kube-apiserver/app/server.go | 4 ++++ hack/verify-flags/known-flags.txt | 1 + pkg/master/master.go | 10 ++++++---- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 954d11364d9..ff478fab969 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -107,6 +107,7 @@ type APIServer struct { KubeletConfig client.KubeletConfig ClusterName string EnableProfiling bool + EnableWatchCache bool MaxRequestsInFlight int MinRequestTimeout int LongRunningRequestRE string @@ -222,6 +223,8 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { fs.Var(&s.RuntimeConfig, "runtime-config", "A set of key=value pairs that describe runtime configuration that may be passed to the apiserver. api/ key can be used to turn on/off specific api versions. api/all and api/legacy are special keys to control all and legacy api versions respectively.") fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster") fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/") + // TODO: enable cache in integration tests. + fs.BoolVar(&s.EnableWatchCache, "watch-cache", true, "Enable watch caching in the apiserver") fs.StringVar(&s.ExternalHost, "external-hostname", "", "The hostname to use when generating externalized URLs for this master (e.g. Swagger API Docs.)") fs.IntVar(&s.MaxRequestsInFlight, "max-requests-inflight", 400, "The maximum number of requests in flight at a given time. When the server exceeds this, it rejects requests. Zero for no limit.") fs.IntVar(&s.MinRequestTimeout, "min-request-timeout", 1800, "An optional field indicating the minimum number of seconds a handler must keep a request open before timing it out. Currently only honored by the watch request handler, which picks a randomized value above this number as the connection timeout, to spread out load.") @@ -429,6 +432,7 @@ func (s *APIServer) Run(_ []string) error { EnableUISupport: true, EnableSwaggerSupport: true, EnableProfiling: s.EnableProfiling, + EnableWatchCache: s.EnableWatchCache, EnableIndex: true, APIPrefix: s.APIPrefix, ExpAPIPrefix: s.ExpAPIPrefix, diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 7a3c37ae1e8..e84f0545afa 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -248,6 +248,7 @@ update-period upgrade-target use-kubernetes-cluster-service user-whitelist +watch-cache watch-only whitelist-override-label www-prefix diff --git a/pkg/master/master.go b/pkg/master/master.go index c1a9ab96ab6..aa7802eddf0 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -112,6 +112,7 @@ type Config struct { // allow downstream consumers to disable the index route EnableIndex bool EnableProfiling bool + EnableWatchCache bool APIPrefix string ExpAPIPrefix string CorsAllowedOriginList []string @@ -189,6 +190,7 @@ type Master struct { enableUISupport bool enableSwaggerSupport bool enableProfiling bool + enableWatchCache bool apiPrefix string expAPIPrefix string corsAllowedOriginList []string @@ -344,6 +346,7 @@ func New(c *Config) *Master { enableUISupport: c.EnableUISupport, enableSwaggerSupport: c.EnableSwaggerSupport, enableProfiling: c.EnableProfiling, + enableWatchCache: c.EnableWatchCache, apiPrefix: c.APIPrefix, expAPIPrefix: c.ExpAPIPrefix, corsAllowedOriginList: c.CorsAllowedOriginList, @@ -430,10 +433,9 @@ func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) // init initializes master. func (m *Master) init(c *Config) { - enableCacher := true healthzChecks := []healthz.HealthzChecker{} m.clock = util.RealClock{} - podStorage := podetcd.NewStorage(c.DatabaseStorage, enableCacher, c.KubeletClient) + podStorage := podetcd.NewStorage(c.DatabaseStorage, c.EnableWatchCache, c.KubeletClient) podTemplateStorage := podtemplateetcd.NewREST(c.DatabaseStorage) @@ -449,10 +451,10 @@ func (m *Master) init(c *Config) { namespaceStorage, namespaceStatusStorage, namespaceFinalizeStorage := namespaceetcd.NewREST(c.DatabaseStorage) m.namespaceRegistry = namespace.NewRegistry(namespaceStorage) - endpointsStorage := endpointsetcd.NewREST(c.DatabaseStorage, enableCacher) + endpointsStorage := endpointsetcd.NewREST(c.DatabaseStorage, c.EnableWatchCache) m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) - nodeStorage, nodeStatusStorage := nodeetcd.NewREST(c.DatabaseStorage, enableCacher, c.KubeletClient) + nodeStorage, nodeStatusStorage := nodeetcd.NewREST(c.DatabaseStorage, c.EnableWatchCache, c.KubeletClient) m.nodeRegistry = minion.NewRegistry(nodeStorage) serviceStorage := serviceetcd.NewREST(c.DatabaseStorage) From 97ddc9781c4c401cd5d14738dcdc08a92e913336 Mon Sep 17 00:00:00 2001 From: Quinton Hoole Date: Fri, 28 Aug 2015 10:48:13 -0700 Subject: [PATCH 006/101] Create fewer pods in Scheduler Predicates e2e test to speed things up. --- test/e2e/scheduler_predicates.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index 7f6b3abadc0..13ac3c3c836 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -241,7 +241,7 @@ var _ = Describe("SchedulerPredicates", func() { cleanupPods(c, ns) }) - // This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacit. + // This test verifies we don't allow scheduling of pods in a way that sum of limits of pods is greater than machines capacity. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. It("validates resource limits of pods that are allowed to run.", func() { @@ -267,9 +267,10 @@ var _ = Describe("SchedulerPredicates", func() { } var podsNeededForSaturation int + milliCpuPerPod := int64(500) for name, leftCapacity := range nodeToCapacityMap { Logf("Node: %v has capacity: %v", name, leftCapacity) - podsNeededForSaturation += (int)(leftCapacity / 100) + podsNeededForSaturation += (int)(leftCapacity / milliCpuPerPod) } By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster CPU and trying to start another one", podsNeededForSaturation)) @@ -289,7 +290,7 @@ var _ = Describe("SchedulerPredicates", func() { Image: "gcr.io/google_containers/pause:go", Resources: api.ResourceRequirements{ Limits: api.ResourceList{ - "cpu": *resource.NewMilliQuantity(100, "DecimalSI"), + "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), }, }, }, @@ -313,7 +314,7 @@ var _ = Describe("SchedulerPredicates", func() { Image: "gcr.io/google_containers/pause:go", Resources: api.ResourceRequirements{ Limits: api.ResourceList{ - "cpu": *resource.NewMilliQuantity(100, "DecimalSI"), + "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), }, }, }, From bf170a688ca51b9bc011760ba1ca59aae6d1e838 Mon Sep 17 00:00:00 2001 From: Pedro Roque Marques Date: Fri, 28 Aug 2015 15:34:22 -0700 Subject: [PATCH 007/101] Update annotations in the kubelet. When the pod annotations are updated in the apiserver, update the pod. Annotations may be used to convey attributes that are required to the pod execution, such as networking parameters. --- pkg/kubelet/config/config.go | 59 ++++++++++++++++++++++++++++++- pkg/kubelet/config/config_test.go | 42 +++++++++++++++++++--- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index 6b0736d3b54..0de727354bd 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -333,6 +333,61 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco return } +// Annotations that the kubelet adds to the pod. +var localAnnotations = []string{ + kubelet.ConfigSourceAnnotationKey, + kubelet.ConfigMirrorAnnotationKey, + kubelet.ConfigFirstSeenAnnotationKey, +} + +func isLocalAnnotationKey(key string) bool { + for _, localKey := range localAnnotations { + if key == localKey { + return true + } + } + return false +} + +// isAnnotationMapEqual returns true if the existing annotation Map is equal to candidate except +// for local annotations. +func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool { + if candidateMap == nil { + return true + } + for k, v := range candidateMap { + if existingValue, ok := existingMap[k]; ok && existingValue == v { + continue + } + return false + } + for k := range existingMap { + if isLocalAnnotationKey(k) { + continue + } + // stale entry in existing map. + if _, exists := candidateMap[k]; !exists { + return false + } + } + return true +} + +// updateAnnotations returns an Annotation map containing the api annotation map plus +// locally managed annotations +func updateAnnotations(existing, ref *api.Pod) { + annotations := make(map[string]string, len(ref.Annotations)+len(localAnnotations)) + for k, v := range ref.Annotations { + annotations[k] = v + } + for _, k := range localAnnotations { + if v, ok := existing.Annotations[k]; ok { + annotations[k] = v + } + } + existing.Annotations = annotations +} + // checkAndUpdatePod updates existing if ref makes a meaningful change and returns true, or // returns false if there was no update. func checkAndUpdatePod(existing, ref *api.Pod) bool { @@ -340,13 +395,15 @@ func checkAndUpdatePod(existing, ref *api.Pod) bool { // like the source annotation or the UID (to ensure safety) if reflect.DeepEqual(existing.Spec, ref.Spec) && reflect.DeepEqual(existing.DeletionTimestamp, ref.DeletionTimestamp) && - reflect.DeepEqual(existing.DeletionGracePeriodSeconds, ref.DeletionGracePeriodSeconds) { + reflect.DeepEqual(existing.DeletionGracePeriodSeconds, ref.DeletionGracePeriodSeconds) && + isAnnotationMapEqual(existing.Annotations, ref.Annotations) { return false } // this is an update existing.Spec = ref.Spec existing.DeletionTimestamp = ref.DeletionTimestamp existing.DeletionGracePeriodSeconds = ref.DeletionGracePeriodSeconds + updateAnnotations(existing, ref) return true } diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index c213e07d618..c3d39202f8f 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -22,6 +22,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/unversioned/record" + "k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" @@ -55,10 +56,9 @@ func (s sortedPods) Less(i, j int) bool { func CreateValidPod(name, namespace, source string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{ - UID: types.UID(name), // for the purpose of testing, this is unique enough - Name: name, - Namespace: namespace, - Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: source}, + UID: types.UID(name), // for the purpose of testing, this is unique enough + Name: name, + Namespace: namespace, }, Spec: api.PodSpec{ RestartPolicy: api.RestartPolicyAlways, @@ -95,9 +95,11 @@ func expectPodUpdate(t *testing.T, ch <-chan kubelet.PodUpdate, expected ...kube // TODO: consider mock out recordFirstSeen in config.go for _, pod := range update.Pods { delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey) + delete(pod.Annotations, kubelet.ConfigSourceAnnotationKey) } for _, pod := range expected[i].Pods { delete(pod.Annotations, kubelet.ConfigFirstSeenAnnotationKey) + delete(pod.Annotations, kubelet.ConfigSourceAnnotationKey) } if !api.Semantic.DeepEqual(expected[i], update) { t.Fatalf("Expected %#v, Got %#v", expected[i], update) @@ -259,3 +261,35 @@ func TestNewPodAddedUpdatedSet(t *testing.T) { CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo4", "new", "test")), CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) } + +func TestPodUpdateAnnotations(t *testing.T) { + channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental) + + pod := CreateValidPod("foo2", "new", "test") + pod.Annotations = make(map[string]string, 0) + pod.Annotations["kubernetes.io/blah"] = "blah" + + clone, err := conversion.NewCloner().DeepCopy(pod) + if err != nil { + t.Fatalf("%v", err) + } + + podUpdate := CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), clone.(*api.Pod), CreateValidPod("foo3", "new", "test")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test"))) + + pod.Annotations["kubenetes.io/blah"] = "superblah" + podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) + + pod.Annotations["kubernetes.io/otherblah"] = "doh" + podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) + + delete(pod.Annotations, "kubernetes.io/blah") + podUpdate = CreatePodUpdate(kubelet.SET, NoneSource, CreateValidPod("foo1", "new", "test"), pod, CreateValidPod("foo3", "new", "test")) + channel <- podUpdate + expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) +} From 4393116422dc0e5fab4d90e0238f6385a11602d7 Mon Sep 17 00:00:00 2001 From: Guohua Ouyang Date: Mon, 31 Aug 2015 15:14:27 +0800 Subject: [PATCH 008/101] Use "kubectl replace" in update-storage-objects.sh Signed-off-by: Guohua Ouyang --- cluster/update-storage-objects.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/update-storage-objects.sh b/cluster/update-storage-objects.sh index 239127e4fef..e4c0c830452 100755 --- a/cluster/update-storage-objects.sh +++ b/cluster/update-storage-objects.sh @@ -84,7 +84,7 @@ do echo "Looks like ${instance} got deleted. Ignoring it" continue fi - output=$("${KUBECTL}" update -f "${filename}" --namespace="${namespace}") || true + output=$("${KUBECTL}" replace -f "${filename}" --namespace="${namespace}") || true rm "${filename}" if [ -n "${output:-}" ] then From 85729403fd21c068361da6ff43d72c7847987692 Mon Sep 17 00:00:00 2001 From: huangyuqi Date: Mon, 31 Aug 2015 19:15:11 +0000 Subject: [PATCH 009/101] Add ubuntu kube-push for upgrading of k8s cluster --- cluster/ubuntu/util.sh | 84 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 83 insertions(+), 1 deletion(-) diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index 61e5e8dc85b..1fb63b1a1a7 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -434,9 +434,91 @@ function kube-down { wait } + +# Perform common upgrade setup tasks +function prepare-push() { + #Not yet support upgrading by using local binaries. + if [[ $KUBE_VERSION == "" ]]; then + echo "Upgrading nodes to local binaries is not yet supported.Please specify the version" + exit 1 + fi + # Run build.sh to get the latest release + source "${KUBE_ROOT}/cluster/ubuntu/build.sh" +} + +# Update a kubernetes master with latest release +function push-master { + source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" + setClusterInfo + ii=0 + for i in ${nodes}; do + if [[ "${roles[${ii}]}" == "a" || "${roles[${ii}]}" == "ai" ]]; then + echo "Cleaning on master ${i#*@}" + ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true + provision-master + elif [[ "${roles[${ii}]}" == "i" ]]; then + continue + else + echo "unsupported role for ${i}. please check" + exit 1 + fi + ((ii=ii+1)) + done + verify-cluster +} + +# Update a kubernetes node with latest release +function push-node() { + source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" + node=${1} + setClusterInfo + ii=0 + for i in ${nodes}; do + if [[ "${roles[${ii}]}" == "i" || "${roles[${ii}]}" == "ai" && $i == *$node ]]; then + echo "Cleaning on node ${i#*@}" + ssh -t $i 'sudo -p "[sudo] stop the all process: " service etcd stop' || true + provision-minion $i + else + echo "unsupported role for ${i}, or nodes ${i} don't exist. please check" + exit 1 + fi + ((ii=ii+1)) + done + verify-cluster +} + # Update a kubernetes cluster with latest source function kube-push { - echo "not implemented" + prepare-push + #stop all the kube's process & etcd + source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" + for i in ${nodes}; do + echo "Cleaning on node ${i#*@}" + ssh -t $i 'sudo -p "[sudo] stop all process: " service etcd stop' || true + ssh -t $i 'rm -f /opt/bin/kube* /etc/init/kube* /etc/init.d/kube* /etc/default/kube*; rm -rf ~/kube' || true + done + #Update all nodes with the lasted release + if [[ ! -f "ubuntu/binaries/master/kube-apiserver" ]]; then + echo "There is no latest release of kubernetes,please check first" + exit 1 + fi + #provision all nodes,include master&nodes + setClusterInfo + ii=0 + for i in ${nodes}; do + if [[ "${roles[${ii}]}" == "a" ]]; then + provision-master + elif [[ "${roles[${ii}]}" == "i" ]]; then + provision-minion $i + elif [[ "${roles[${ii}]}" == "ai" ]]; then + provision-masterandminion + else + echo "unsupported role for ${i}. please check" + exit 1 + fi + ((ii=ii+1)) + done + verify-cluster } # Perform preparations required to run e2e tests From a2ca4954f48b2588d28e5e8c2d24ed6e4cb76123 Mon Sep 17 00:00:00 2001 From: Jerzy Szczepkowski Date: Tue, 1 Sep 2015 08:07:39 +0200 Subject: [PATCH 010/101] Fixed kind to resource convertion in scale client. Fixed kind to resource convertion in scale client. --- pkg/client/unversioned/scale.go | 35 +++++++++++---------------------- 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/pkg/client/unversioned/scale.go b/pkg/client/unversioned/scale.go index c0d4b662e37..4152ec9b2fb 100644 --- a/pkg/client/unversioned/scale.go +++ b/pkg/client/unversioned/scale.go @@ -17,9 +17,7 @@ limitations under the License. package unversioned import ( - "fmt" - "strings" - + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/expapi" ) @@ -50,30 +48,21 @@ func newScales(c *ExperimentalClient, namespace string) *scales { // Get takes the reference to scale subresource and returns the subresource or error, if one occurs. func (c *scales) Get(kind string, name string) (result *expapi.Scale, err error) { result = &expapi.Scale{} - if strings.ToLower(kind) == "replicationcontroller" { - kind = "replicationControllers" - err = c.client.Get().Namespace(c.ns).Resource(kind).Name(name).SubResource("scale").Do().Into(result) - return - } - err = fmt.Errorf("Kind not supported: %s", kind) + resource, _ := meta.KindToResource(kind, false) + err = c.client.Get().Namespace(c.ns).Resource(resource).Name(name).SubResource("scale").Do().Into(result) return } func (c *scales) Update(kind string, scale *expapi.Scale) (result *expapi.Scale, err error) { result = &expapi.Scale{} - if strings.ToLower(kind) == "replicationcontroller" { - kind = "replicationControllers" - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(kind). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return - } - err = fmt.Errorf("Kind not supported: %s", kind) + resource, _ := meta.KindToResource(kind, false) + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) return } From 064b09ff0b24da7d5d3892cb3008dcedce459906 Mon Sep 17 00:00:00 2001 From: Marcin Wielgus Date: Fri, 28 Aug 2015 12:24:00 +0200 Subject: [PATCH 011/101] MetricsClient for HorizontalPodAutoscaler --- .../app/controllermanager.go | 4 +- .../horizontalpodautoscaler_controller.go | 130 +++----------- ...horizontalpodautoscaler_controller_test.go | 77 +++----- .../autoscaler/metrics/metrics_client.go | 168 ++++++++++++++++++ .../autoscaler/metrics/metrics_client_test.go | 131 ++++++++++++++ 5 files changed, 350 insertions(+), 160 deletions(-) create mode 100644 pkg/controller/autoscaler/metrics/metrics_client.go create mode 100644 pkg/controller/autoscaler/metrics/metrics_client_test.go diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index ae13517a9cd..ce1161c27d6 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -36,6 +36,7 @@ import ( clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/controller/autoscaler" + "k8s.io/kubernetes/pkg/controller/autoscaler/metrics" "k8s.io/kubernetes/pkg/controller/endpoint" "k8s.io/kubernetes/pkg/controller/namespace" "k8s.io/kubernetes/pkg/controller/node" @@ -276,7 +277,8 @@ func (s *CMServer) Run(_ []string) error { if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } - horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, expClient) + horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, expClient, + metrics.NewHeapsterMetricsClient(kubeClient)) horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod) } diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go b/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go index e376c92d249..a3cce83833b 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go +++ b/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go @@ -17,64 +17,33 @@ limitations under the License. package autoscalercontroller import ( - "encoding/json" "fmt" - "strings" "time" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/resource" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller/autoscaler/metrics" "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" - - heapster "k8s.io/heapster/api/v1/types" -) - -const ( - heapsterNamespace = "kube-system" - heapsterService = "monitoring-heapster" ) type HorizontalPodAutoscalerController struct { - client client.Interface - expClient client.ExperimentalInterface + client client.Interface + expClient client.ExperimentalInterface + metricsClient metrics.MetricsClient } -// Aggregates results into ResourceConsumption. Also returns number of -// pods included in the aggregation. -type metricAggregator func(heapster.MetricResultList) (expapi.ResourceConsumption, int) - -type metricDefinition struct { - name string - aggregator metricAggregator -} - -var resourceDefinitions = map[api.ResourceName]metricDefinition{ - //TODO: add memory - api.ResourceCPU: {"cpu-usage", - func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) { - sum, count := calculateSumFromLatestSample(metrics) - value := "0" - if count > 0 { - // assumes that cpu usage is in millis - value = fmt.Sprintf("%dm", sum/uint64(count)) - } - return expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count - }}, -} - -var heapsterQueryStart, _ = time.ParseDuration("-5m") var downscaleForbiddenWindow, _ = time.ParseDuration("20m") var upscaleForbiddenWindow, _ = time.ParseDuration("3m") -func New(client client.Interface, expClient client.ExperimentalInterface) *HorizontalPodAutoscalerController { +func New(client client.Interface, expClient client.ExperimentalInterface, metricsClient metrics.MetricsClient) *HorizontalPodAutoscalerController { return &HorizontalPodAutoscalerController{ - client: client, - expClient: expClient, + client: client, + expClient: expClient, + metricsClient: metricsClient, } } @@ -100,57 +69,18 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { glog.Warningf("Failed to query scale subresource for %s: %v", reference, err) continue } - podList, err := a.client.Pods(hpa.Spec.ScaleRef.Namespace). - List(labels.SelectorFromSet(labels.Set(scale.Status.Selector)), fields.Everything()) + currentReplicas := scale.Status.Replicas + currentConsumption, err := a.metricsClient.ResourceConsumption(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.Target.Resource, + scale.Status.Selector) + // TODO: what to do on partial errors (like metrics obtained for 75% of pods). if err != nil { - glog.Warningf("Failed to get pod list for %s: %v", reference, err) - continue - } - podNames := []string{} - for _, pod := range podList.Items { - podNames = append(podNames, pod.Name) - } - - metricSpec, metricDefined := resourceDefinitions[hpa.Spec.Target.Resource] - if !metricDefined { - glog.Warningf("Heapster metric not defined for %s %v", reference, hpa.Spec.Target.Resource) - continue - } - now := time.Now() - - startTime := now.Add(heapsterQueryStart) - metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s", - hpa.Spec.ScaleRef.Namespace, - strings.Join(podNames, ","), - metricSpec.name) - - resultRaw, err := a.client.Services(heapsterNamespace). - ProxyGet(heapsterService, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}). - DoRaw() - - if err != nil { - glog.Warningf("Failed to get pods metrics for %s: %v", reference, err) - continue - } - - var metrics heapster.MetricResultList - err = json.Unmarshal(resultRaw, &metrics) - if err != nil { - glog.Warningf("Failed to unmarshall heapster response: %v", err) - continue - } - - glog.Infof("Metrics available for %s: %s", reference, string(resultRaw)) - - currentConsumption, count := metricSpec.aggregator(metrics) - if count != len(podList.Items) { - glog.Warningf("Metrics obtained for %d/%d of pods", count, len(podList.Items)) + glog.Warningf("Error while getting metrics for %s: %v", reference, err) continue } // if the ratio is 1.2 we want to have 2 replicas - desiredReplicas := 1 + int((currentConsumption.Quantity.MilliValue()*int64(count))/hpa.Spec.Target.Quantity.MilliValue()) + desiredReplicas := 1 + int((currentConsumption.Quantity.MilliValue()*int64(currentReplicas))/hpa.Spec.Target.Quantity.MilliValue()) if desiredReplicas < hpa.Spec.MinCount { desiredReplicas = hpa.Spec.MinCount @@ -158,18 +88,17 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { if desiredReplicas > hpa.Spec.MaxCount { desiredReplicas = hpa.Spec.MaxCount } - + now := time.Now() rescale := false - - if desiredReplicas != count { + if desiredReplicas != currentReplicas { // Going down - if desiredReplicas < count && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || + if desiredReplicas < currentReplicas && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) { rescale = true } // Going up - if desiredReplicas > count && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || + if desiredReplicas > currentReplicas && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) { rescale = true } @@ -185,9 +114,9 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { } status := expapi.HorizontalPodAutoscalerStatus{ - CurrentReplicas: count, + CurrentReplicas: currentReplicas, DesiredReplicas: desiredReplicas, - CurrentConsumption: ¤tConsumption, + CurrentConsumption: currentConsumption, } hpa.Status = &status if rescale { @@ -203,22 +132,3 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { } return nil } - -func calculateSumFromLatestSample(metrics heapster.MetricResultList) (uint64, int) { - sum := uint64(0) - count := 0 - for _, metrics := range metrics.Items { - var newest *heapster.MetricPoint - newest = nil - for _, metricPoint := range metrics.Metrics { - if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) { - newest = &metricPoint - } - } - if newest != nil { - sum += newest.Value - count++ - } - } - return sum, count -} diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go b/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go index 3049edca0df..276dcff2c84 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go +++ b/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go @@ -17,39 +17,33 @@ limitations under the License. package autoscalercontroller import ( - "encoding/json" "fmt" "net/http" "net/http/httptest" "testing" - "time" "k8s.io/kubernetes/pkg/api" _ "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/controller/autoscaler/metrics" "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "github.com/golang/glog" "github.com/stretchr/testify/assert" - - heapster "k8s.io/heapster/api/v1/types" ) const ( namespace = api.NamespaceDefault rcName = "app-rc" podNameLabel = "app" - podName = "p1" hpaName = "foo" hpaListHandler = "HpaList" scaleHandler = "Scale" - podListHandler = "PodList" - heapsterHandler = "Heapster" updateHpaHandler = "HpaUpdate" ) @@ -58,6 +52,26 @@ type serverResponse struct { obj interface{} } +type fakeMetricsClient struct { + consumption metrics.ResourceConsumptionClient +} + +type fakeResourceConsumptionClient struct { + metrics map[api.ResourceName]expapi.ResourceConsumption +} + +func (f *fakeMetricsClient) ResourceConsumption(namespace string) metrics.ResourceConsumptionClient { + return f.consumption +} + +func (f *fakeResourceConsumptionClient) Get(resource api.ResourceName, selector map[string]string) (*expapi.ResourceConsumption, error) { + consumption, found := f.metrics[resource] + if !found { + return nil, fmt.Errorf("resource not found: %v", resource) + } + return &consumption, nil +} + func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httptest.Server, map[string]*util.FakeHandler) { handlers := map[string]*util.FakeHandler{} @@ -73,16 +87,6 @@ func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httpte return &handler } - mkRawHandler := func(url string, response serverResponse) *util.FakeHandler { - handler := util.FakeHandler{ - StatusCode: response.statusCode, - ResponseBody: *response.obj.(*string), - } - mux.Handle(url, &handler) - glog.Infof("Will handle %s", url) - return &handler - } - if responses[hpaListHandler] != nil { handlers[hpaListHandler] = mkHandler("/experimental/v1/horizontalpodautoscalers", *responses[hpaListHandler]) } @@ -92,16 +96,6 @@ func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httpte fmt.Sprintf("/experimental/v1/namespaces/%s/replicationcontrollers/%s/scale", namespace, rcName), *responses[scaleHandler]) } - if responses[podListHandler] != nil { - handlers[podListHandler] = mkHandler(fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace), *responses[podListHandler]) - } - - if responses[heapsterHandler] != nil { - handlers[heapsterHandler] = mkRawHandler( - fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/cpu-usage", - namespace, podName), *responses[heapsterHandler]) - } - if responses[updateHpaHandler] != nil { handlers[updateHpaHandler] = mkHandler(fmt.Sprintf("/experimental/v1/namespaces/%s/horizontalpodautoscalers/%s", namespace, hpaName), *responses[updateHpaHandler]) @@ -150,21 +144,6 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { }, }} - podListResponse := serverResponse{http.StatusOK, &api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{ - Name: podName, - Namespace: namespace, - }, - }}}} - timestamp := time.Now() - metrics := heapster.MetricResultList{ - Items: []heapster.MetricResult{{ - Metrics: []heapster.MetricPoint{{timestamp, 650}}, - LatestTimestamp: timestamp, - }}} - status := expapi.HorizontalPodAutoscalerStatus{ CurrentReplicas: 1, DesiredReplicas: 3, @@ -189,16 +168,10 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { Status: &status, }} - heapsterRawResponse, _ := json.Marshal(&metrics) - heapsterStrResponse := string(heapsterRawResponse) - heapsterResponse := serverResponse{http.StatusOK, &heapsterStrResponse} - testServer, handlers := makeTestServer(t, map[string]*serverResponse{ hpaListHandler: &hpaResponse, scaleHandler: &scaleResponse, - podListHandler: &podListResponse, - heapsterHandler: &heapsterResponse, updateHpaHandler: &updateHpaResponse, }) @@ -206,7 +179,13 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) expClient := client.NewExperimentalOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) - hpaController := New(kubeClient, expClient) + fakeRC := fakeResourceConsumptionClient{metrics: map[api.ResourceName]expapi.ResourceConsumption{ + api.ResourceCPU: {Resource: api.ResourceCPU, Quantity: resource.MustParse("650m")}, + }} + fake := fakeMetricsClient{consumption: &fakeRC} + + hpaController := New(kubeClient, expClient, &fake) + err := hpaController.reconcileAutoscalers() if err != nil { t.Fatal("Failed to reconcile: %v", err) diff --git a/pkg/controller/autoscaler/metrics/metrics_client.go b/pkg/controller/autoscaler/metrics/metrics_client.go new file mode 100644 index 00000000000..b061a0c209c --- /dev/null +++ b/pkg/controller/autoscaler/metrics/metrics_client.go @@ -0,0 +1,168 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/golang/glog" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + + heapster "k8s.io/heapster/api/v1/types" +) + +const ( + heapsterNamespace = "kube-system" + heapsterService = "monitoring-heapster" +) + +var heapsterQueryStart, _ = time.ParseDuration("-5m") + +// An interface for getting metrics for pods. +type MetricsClient interface { + ResourceConsumption(namespace string) ResourceConsumptionClient +} + +type ResourceConsumptionClient interface { + // Gets average resource consumption for pods under the given selector. + Get(resourceName api.ResourceName, selector map[string]string) (*expapi.ResourceConsumption, error) +} + +// Aggregates results into ResourceConsumption. Also returns number of +// pods included in the aggregation. +type metricAggregator func(heapster.MetricResultList) (expapi.ResourceConsumption, int) + +type metricDefinition struct { + name string + aggregator metricAggregator +} + +// Heapster-based implementation of MetricsClient +type HeapsterMetricsClient struct { + client client.Interface +} + +type HeapsterResourceConsumptionClient struct { + namespace string + client client.Interface + resourceDefinitions map[api.ResourceName]metricDefinition +} + +func NewHeapsterMetricsClient(client client.Interface) *HeapsterMetricsClient { + return &HeapsterMetricsClient{client: client} +} + +var heapsterMetricDefinitions = map[api.ResourceName]metricDefinition{ + //TODO: add memory + api.ResourceCPU: {"cpu-usage", + func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) { + sum, count := calculateSumFromLatestSample(metrics) + value := "0" + if count > 0 { + // assumes that cpu usage is in millis + value = fmt.Sprintf("%dm", sum/uint64(count)) + } + return expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count + }}, +} + +func (h *HeapsterMetricsClient) ResourceConsumption(namespace string) ResourceConsumptionClient { + return &HeapsterResourceConsumptionClient{ + namespace: namespace, + client: h.client, + resourceDefinitions: heapsterMetricDefinitions, + } +} + +func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, selector map[string]string) (*expapi.ResourceConsumption, error) { + podList, err := h.client.Pods(h.namespace). + List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything()) + + if err != nil { + return nil, fmt.Errorf("failed to get pod list: %v", err) + } + podNames := []string{} + for _, pod := range podList.Items { + podNames = append(podNames, pod.Name) + } + return h.getForPods(resourceName, podNames) +} + +func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.ResourceName, podNames []string) (*expapi.ResourceConsumption, error) { + + metricSpec, metricDefined := h.resourceDefinitions[resourceName] + if !metricDefined { + return nil, fmt.Errorf("heapster metric not defined for %v", resourceName) + } + now := time.Now() + + startTime := now.Add(heapsterQueryStart) + metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s", + h.namespace, + strings.Join(podNames, ","), + metricSpec.name) + + resultRaw, err := h.client.Services(heapsterNamespace). + ProxyGet(heapsterService, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}). + DoRaw() + + if err != nil { + return nil, fmt.Errorf("failed to get pods metrics: %v", err) + } + + var metrics heapster.MetricResultList + err = json.Unmarshal(resultRaw, &metrics) + if err != nil { + return nil, fmt.Errorf("failed to unmarshall heapster response: %v", err) + } + + glog.Infof("Metrics available: %s", string(resultRaw)) + + currentConsumption, count := metricSpec.aggregator(metrics) + if count != len(podNames) { + return nil, fmt.Errorf("metrics obtained for %d/%d of pods", count, len(podNames)) + } + + return ¤tConsumption, nil +} + +func calculateSumFromLatestSample(metrics heapster.MetricResultList) (uint64, int) { + sum := uint64(0) + count := 0 + for _, metrics := range metrics.Items { + var newest *heapster.MetricPoint + newest = nil + for _, metricPoint := range metrics.Metrics { + if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) { + newest = &metricPoint + } + } + if newest != nil { + sum += newest.Value + count++ + } + } + return sum, count +} diff --git a/pkg/controller/autoscaler/metrics/metrics_client_test.go b/pkg/controller/autoscaler/metrics/metrics_client_test.go new file mode 100644 index 00000000000..22277e8806a --- /dev/null +++ b/pkg/controller/autoscaler/metrics/metrics_client_test.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "k8s.io/kubernetes/pkg/api" + _ "k8s.io/kubernetes/pkg/api/latest" + "k8s.io/kubernetes/pkg/api/testapi" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" + + heapster "k8s.io/heapster/api/v1/types" + + "github.com/golang/glog" + "github.com/stretchr/testify/assert" +) + +const ( + namespace = "test-namespace" + podName = "pod1" + podListHandler = "podlisthandler" + heapsterHandler = "heapsterhandler" +) + +type serverResponse struct { + statusCode int + obj interface{} +} + +func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httptest.Server, map[string]*util.FakeHandler) { + + handlers := map[string]*util.FakeHandler{} + mux := http.NewServeMux() + + mkHandler := func(url string, response serverResponse) *util.FakeHandler { + handler := util.FakeHandler{ + StatusCode: response.statusCode, + ResponseBody: runtime.EncodeOrDie(testapi.Codec(), response.obj.(runtime.Object)), + } + mux.Handle(url, &handler) + glog.Infof("Will handle %s", url) + return &handler + } + + mkRawHandler := func(url string, response serverResponse) *util.FakeHandler { + handler := util.FakeHandler{ + StatusCode: response.statusCode, + ResponseBody: *response.obj.(*string), + } + mux.Handle(url, &handler) + glog.Infof("Will handle %s", url) + return &handler + } + + if responses[podListHandler] != nil { + handlers[podListHandler] = mkHandler(fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace), *responses[podListHandler]) + } + + if responses[heapsterHandler] != nil { + handlers[heapsterHandler] = mkRawHandler( + fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/cpu-usage", + namespace, podName), *responses[heapsterHandler]) + } + + mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) { + t.Errorf("unexpected request: %v", req.RequestURI) + res.WriteHeader(http.StatusNotFound) + }) + return httptest.NewServer(mux), handlers +} + +func TestHeapsterResourceConsumptionGet(t *testing.T) { + + podListResponse := serverResponse{http.StatusOK, &api.PodList{ + Items: []api.Pod{ + { + ObjectMeta: api.ObjectMeta{ + Name: podName, + Namespace: namespace, + }, + }}}} + + timestamp := time.Now() + metrics := heapster.MetricResultList{ + Items: []heapster.MetricResult{{ + Metrics: []heapster.MetricPoint{{timestamp, 650}}, + LatestTimestamp: timestamp, + }}} + heapsterRawResponse, _ := json.Marshal(&metrics) + heapsterStrResponse := string(heapsterRawResponse) + heapsterResponse := serverResponse{http.StatusOK, &heapsterStrResponse} + + testServer, _ := makeTestServer(t, + map[string]*serverResponse{ + heapsterHandler: &heapsterResponse, + podListHandler: &podListResponse, + }) + + defer testServer.Close() + kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + + metricsClient := NewHeapsterMetricsClient(kubeClient) + + val, err := metricsClient.ResourceConsumption(namespace).Get(api.ResourceCPU, map[string]string{"app": "test"}) + if err != nil { + t.Fatalf("Error while getting consumption: %v", err) + } + assert.Equal(t, int64(650), val.Quantity.MilliValue()) +} From 97e505849f8699811d70e007891dce3f669d2e53 Mon Sep 17 00:00:00 2001 From: Brian Grant Date: Thu, 27 Aug 2015 21:12:06 +0000 Subject: [PATCH 012/101] Start on expanding code expectations (aka "The bar") --- docs/devel/api-conventions.md | 7 +++ docs/devel/api_changes.md | 90 +++++++++++++++++++++++++++++-- docs/devel/coding-conventions.md | 51 ++++++++++++++++-- docs/devel/development.md | 2 + docs/devel/faster_reviews.md | 32 +++++++++-- docs/devel/kubectl-conventions.md | 27 +++++++++- docs/troubleshooting.md | 5 +- 7 files changed, 199 insertions(+), 15 deletions(-) diff --git a/docs/devel/api-conventions.md b/docs/devel/api-conventions.md index f00dde1e4c8..746d56cba59 100644 --- a/docs/devel/api-conventions.md +++ b/docs/devel/api-conventions.md @@ -713,6 +713,13 @@ Annotations have very different intended usage from labels. We expect them to be In fact, experimental API fields, including to represent fields of newer alpha/beta API versions in the older, stable storage version, may be represented as annotations with the prefix `experimental.kubernetes.io/`. +Other advice regarding use of labels, annotations, and other generic map keys by Kubernetes components and tools: + - Key names should be all lowercase, with words separated by dashes, such as `desired-replicas` + - Prefix the key with `kubernetes.io/` or `foo.kubernetes.io/`, preferably the latter if the label/annotation is specific to `foo` + - For instance, prefer `service-account.kubernetes.io/name` over `kubernetes.io/service-account.name` + - Use annotations to store API extensions that the controller responsible for the resource doesn't need to know about, experimental fields that aren't intended to be generally used API fields, etc. Beware that annotations aren't automatically handled by the API conversion machinery. + + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/devel/api-conventions.md?pixel)]() diff --git a/docs/devel/api_changes.md b/docs/devel/api_changes.md index 72c38b7ff5e..289123d514d 100644 --- a/docs/devel/api_changes.md +++ b/docs/devel/api_changes.md @@ -189,17 +189,82 @@ API call might POST an object in API v7beta1 format, which uses the cleaner form (since v7beta1 is "beta"). When the user reads the object back in the v7beta1 API it would be unacceptable to have lost all but `Params[0]`. This means that, even though it is ugly, a compatible change must be made to the v6 -API. However, this is very challenging to do correctly. It generally requires +API. + +However, this is very challenging to do correctly. It often requires multiple representations of the same information in the same API resource, which -need to be kept in sync in the event that either is changed. However, if -the new representation is more expressive than the old, this breaks -backward compatibility, since clients that only understood the old representation +need to be kept in sync in the event that either is changed. For example, +let's say you decide to rename a field within the same API version. In this case, +you add units to `height` and `width`. You implement this by adding duplicate +fields: + +```go +type Frobber struct { + Height *int `json:"height"` + Width *int `json:"width"` + HeightInInches *int `json:"heightInInches"` + WidthInInches *int `json:"widthInInches"` +} +``` + +You convert all of the fields to pointers in order to distinguish between unset and +set to 0, and then set each corresponding field from the other in the defaulting +pass (e.g., `heightInInches` from `height`, and vice versa), which runs just prior +to conversion. That works fine when the user creates a resource from a hand-written +configuration -- clients can write either field and read either field, but what about +creation or update from the output of GET, or update via PATCH (see +[In-place updates](../user-guide/managing-deployments.md#in-place-updates-of-resources))? +In this case, the two fields will conflict, because only one field would be updated +in the case of an old client that was only aware of the old field (e.g., `height`). + +Say the client creates: + +```json +{ + "height": 10, + "width": 5 +} +``` + +and GETs: + +```json +{ + "height": 10, + "heightInInches": 10, + "width": 5, + "widthInInches": 5 +} +``` + +then PUTs back: + +```json +{ + "height": 13, + "heightInInches": 10, + "width": 5, + "widthInInches": 5 +} +``` + +The update should not fail, because it would have worked before `heightInInches` was added. + +Therefore, when there are duplicate fields, the old field MUST take precedence +over the new, and the new field should be set to match by the server upon write. +A new client would be aware of the old field as well as the new, and so can ensure +that the old field is either unset or is set consistently with the new field. However, +older clients would be unaware of the new field. Please avoid introducing duplicate +fields due to the complexity they incur in the API. + +A new representation, even in a new API version, that is more expressive than an old one +breaks backward compatibility, since clients that only understood the old representation would not be aware of the new representation nor its semantics. Examples of proposals that have run into this challenge include [generalized label selectors](http://issues.k8s.io/341) and [pod-level security context](http://prs.k8s.io/12823). -As another interesting example, enumerated values provide a unique challenge. +As another interesting example, enumerated values cause similar challenges. Adding a new value to an enumerated set is *not* a compatible change. Clients which assume they know how to handle all possible values of a given field will not be able to handle the new values. However, removing value from an @@ -227,6 +292,21 @@ the release notes for the next release by labeling the PR with the "release-note If you found that your change accidentally broke clients, it should be reverted. +In short, the expected API evolution is as follows: +* `experimental/v1alpha1` -> +* `newapigroup/v1alpha1` -> ... -> `newapigroup/v1alphaN` -> +* `newapigroup/v1beta1` -> ... -> `newapigroup/v1betaN` -> +* `newapigroup/v1` -> +* `newapigroup/v2alpha1` -> ... + +While in experimental we have no obligation to move forward with the API at all and may delete or break it at any time. + +While in alpha we expect to move forward with it, but may break it. + +Once in beta we will preserve forward compatibility, but may introduce new versions and delete old ones. + +v1 must be backward-compatible for an extended length of time. + ## Changing versioned APIs For most changes, you will probably find it easiest to change the versioned diff --git a/docs/devel/coding-conventions.md b/docs/devel/coding-conventions.md index ac3d353f4a2..1569d1aa0d8 100644 --- a/docs/devel/coding-conventions.md +++ b/docs/devel/coding-conventions.md @@ -30,12 +30,57 @@ Documentation for other releases can be found at -Coding style advice for contributors +Code conventions - Bash - https://google-styleguide.googlecode.com/svn/trunk/shell.xml + - Ensure that build, release, test, and cluster-management scripts run on OS X - Go - - https://github.com/golang/go/wiki/CodeReviewComments - - https://gist.github.com/lavalamp/4bd23295a9f32706a48f + - Ensure your code passes the [presubmit checks](development.md#hooks) + - [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) + - [Effective Go](https://golang.org/doc/effective_go.html) + - Comment your code. + - [Go's commenting conventions](http://blog.golang.org/godoc-documenting-go-code) + - If reviewers ask questions about why the code is the way it is, that's a sign that comments might be helpful. + - Command-line flags should use dashes, not underscores + - Naming + - Please consider package name when selecting an interface name, and avoid redundancy. + - e.g.: `storage.Interface` is better than `storage.StorageInterface`. + - Do not use uppercase characters, underscores, or dashes in package names. + - Please consider parent directory name when choosing a package name. + - so pkg/controllers/autoscaler/foo.go should say `package autoscaler` not `package autoscalercontroller`. + - Unless there's a good reason, the `package foo` line should match the name of the directory in which the .go file exists. + - Importers can use a different name if they need to disambiguate. + - API conventions + - [API changes](api_changes.md) + - [API conventions](api-conventions.md) + - [Kubectl conventions](kubectl-conventions.md) + - [Logging conventions](logging.md) + +Testing conventions + - All new packages and most new significant functionality must come with unit tests + - Table-driven tests are preferred for testing multiple scenarios/inputs; for example, see [TestNamespaceAuthorization](../../test/integration/auth_test.go) + - Significant features should come with integration (test/integration) and/or end-to-end (test/e2e) tests + - Including new kubectl commands and major features of existing commands + - Unit tests must pass on OS X and Windows platforms - if you use Linux specific features, your test case must either be skipped on windows or compiled out (skipped is better when running Linux specific commands, compiled out is required when your code does not compile on Windows). + +Directory and file conventions + - Avoid package sprawl. Find an appropriate subdirectory for new packages. (See [#4851](http://issues.k8s.io/4851) for discussion.) + - Libraries with no more appropriate home belong in new package subdirectories of pkg/util + - Avoid general utility packages. Packages called "util" are suspect. Instead, derive a name that describes your desired function. For example, the utility functions dealing with waiting for operations are in the "wait" package and include functionality like Poll. So the full name is wait.Poll + - Go source files and directories use underscores, not dashes + - Package directories should generally avoid using separators as much as possible (when packages are multiple words, they usually should be in nested subdirectories). + - Document directories and filenames should use dashes rather than underscores + - Contrived examples that illustrate system features belong in /docs/user-guide or /docs/admin, depending on whether it is a feature primarily intended for users that deploy applications or cluster administrators, respectively. Actual application examples belong in /examples. + - Examples should also illustrate [best practices for using the system](../user-guide/config-best-practices.md) + - Third-party code + - Third-party Go code is managed using Godeps + - Other third-party code belongs in /third_party + - Third-party code must include licenses + - This includes modified third-party code and excerpts, as well + +Coding advice + - Go + - [Go landmines](https://gist.github.com/lavalamp/4bd23295a9f32706a48f) diff --git a/docs/devel/development.md b/docs/devel/development.md index a266f7cbf56..44ceee1cf5e 100644 --- a/docs/devel/development.md +++ b/docs/devel/development.md @@ -112,6 +112,8 @@ fixups (e.g. automated doc formatting), use one or more commits for the changes to tooling and a final commit to apply the fixup en masse. This makes reviews much easier. +See [Faster Reviews](faster_reviews.md) for more details. + ## godep and dependency management Kubernetes uses [godep](https://github.com/tools/godep) to manage dependencies. It is not strictly required for building Kubernetes but it is required when managing dependencies under the Godeps/ tree, and is required by a number of the build and test scripts. Please make sure that ``godep`` is installed and in your ``$PATH``. diff --git a/docs/devel/faster_reviews.md b/docs/devel/faster_reviews.md index 3ea030d35e9..0c70e4356e0 100644 --- a/docs/devel/faster_reviews.md +++ b/docs/devel/faster_reviews.md @@ -53,15 +53,24 @@ later, just as soon as they have more free time (ha!). Let's talk about how to avoid this. +## 0. Familiarize yourself with project conventions + +* [Development guide](development.md) +* [Coding conventions](coding-conventions.md) +* [API conventions](api-conventions.md) +* [Kubectl conventions](kubectl-conventions.md) + ## 1. Don't build a cathedral in one PR Are you sure FeatureX is something the Kubernetes team wants or will accept, or that it is implemented to fit with other changes in flight? Are you willing to bet a few days or weeks of work on it? If you have any doubt at all about the -usefulness of your feature or the design - make a proposal doc or a sketch PR -or both. Write or code up just enough to express the idea and the design and -why you made those choices, then get feedback on this. Now, when we ask you to -change a bunch of facets of the design, you don't have to re-write it all. +usefulness of your feature or the design - make a proposal doc (in docs/proposals; +for example [the QoS proposal](http://prs.k8s.io/11713)) or a sketch PR (e.g., just +the API or Go interface) or both. Write or code up just enough to express the idea +and the design and why you made those choices, then get feedback on this. Be clear +about what type of feedback you are asking for. Now, if we ask you to change a +bunch of facets of the design, you won't have to re-write it all. ## 2. Smaller diffs are exponentially better @@ -154,7 +163,20 @@ commit and re-push. Your reviewer can then look at that commit on its own - so much faster to review than starting over. We might still ask you to clean up your commits at the very end, for the sake -of a more readable history. +of a more readable history, but don't do this until asked, typically at the point +where the PR would otherwise be tagged LGTM. + +General squashing guidelines: + +* Sausage => squash + + When there are several commits to fix bugs in the original commit(s), address reviewer feedback, etc. Really we only want to see the end state and commit message for the whole PR. + +* Layers => don't squash + + When there are independent changes layered upon each other to achieve a single goal. For instance, writing a code munger could be one commit, applying it could be another, and adding a precommit check could be a third. One could argue they should be separate PRs, but there's really no way to test/review the munger without seeing it applied, and there needs to be a precommit check to ensure the munged output doesn't immediately get out of date. + +A commit, as much as possible, should be a single logical change. Each commit should always have a good title line (<70 characters) and include an additional description paragraph describing in more detail the change intended. Do not link pull requests by `#` in a commit description, because GitHub creates lots of spam. Instead, reference other PRs via the PR your commit is in. ## 8. KISS, YAGNI, MVP, etc diff --git a/docs/devel/kubectl-conventions.md b/docs/devel/kubectl-conventions.md index 5739708c6a5..a37e589922c 100644 --- a/docs/devel/kubectl-conventions.md +++ b/docs/devel/kubectl-conventions.md @@ -34,7 +34,7 @@ Documentation for other releases can be found at Kubectl Conventions =================== -Updated: 8/12/2015 +Updated: 8/27/2015 **Table of Contents** @@ -77,6 +77,31 @@ Updated: 8/12/2015 * Flags are all lowercase, with words separated by hyphens * Flag names and single-character aliases should have the same meaning across all commands * Command-line flags corresponding to API fields should accept API enums exactly (e.g., --restart=Always) +* Do not reuse flags for different semantic purposes, and do not use different flag names for the same semantic purpose -- grep for `"Flags()"` before adding a new flag +* Use short flags sparingly, only for the most frequently used options, prefer lowercase over uppercase for the most common cases, try to stick to well known conventions for UNIX commands and/or Docker, where they exist, and update this list when adding new short flags + * `-f`: Resource file + * also used for `--follow` in `logs`, but should be deprecated in favor of `-F` + * `-l`: Label selector + * also used for `--labels` in `expose`, but should be deprecated + * `-L`: Label columns + * `-c`: Container + * also used for `--client` in `version`, but should be deprecated + * `-i`: Attach stdin + * `-t`: Allocate TTY + * also used for `--template`, but deprecated + * `-w`: Watch (currently also used for `--www` in `proxy`, but should be deprecated) + * `-p`: Previous + * also used for `--pod` in `exec`, but deprecated + * also used for `--patch` in `patch`, but should be deprecated + * also used for `--port` in `proxy`, but should be deprecated + * `-P`: Static file prefix in `proxy`, but should be deprecated + * `-r`: Replicas + * `-u`: Unix socket + * `-v`: Verbose logging level +* `--dry-run`: Don't modify the live state; simulate the mutation and display the output +* `--local`: Don't contact the server; just do local read, transformation, generation, etc. and display the output +* `--output-version=...`: Convert the output to a different API group/version +* `--validate`: Validate the resource schema ## Output conventions diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index eafddd94c38..7f80407c0b8 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -76,7 +76,10 @@ If you have what looks like a bug, or you would like to make a feature request, Before you file an issue, please search existing issues to see if your issue is already covered. -If filing a bug, please include detailed information about how to reproduce the problem. +If filing a bug, please include detailed information about how to reproduce the problem, such as: +* Kubernetes version: `kubectl version` +* Cloud provider, OS distro, network configuration, and Docker version +* Steps to reproduce the problem [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/troubleshooting.md?pixel)]() From d6c43b0606dbebfeec7829291da8cc418b7ea56a Mon Sep 17 00:00:00 2001 From: Vanya Tarasov Date: Tue, 1 Sep 2015 15:41:32 -0700 Subject: [PATCH 013/101] Recognize cloud-platform scope on GCP GCP credential provider currently requires presence of 'devstorage.*' scope, however it fails to recognize 'cloud-platform' scope that implies it. --- pkg/credentialprovider/gcp/metadata.go | 20 +++++++------ pkg/credentialprovider/gcp/metadata_test.go | 33 +++++++++++++++++++++ 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/pkg/credentialprovider/gcp/metadata.go b/pkg/credentialprovider/gcp/metadata.go index 60c771955ab..8ab929315f1 100644 --- a/pkg/credentialprovider/gcp/metadata.go +++ b/pkg/credentialprovider/gcp/metadata.go @@ -27,14 +27,15 @@ import ( ) const ( - metadataUrl = "http://metadata.google.internal./computeMetadata/v1/" - metadataAttributes = metadataUrl + "instance/attributes/" - dockerConfigKey = metadataAttributes + "google-dockercfg" - dockerConfigUrlKey = metadataAttributes + "google-dockercfg-url" - metadataScopes = metadataUrl + "instance/service-accounts/default/scopes" - metadataToken = metadataUrl + "instance/service-accounts/default/token" - metadataEmail = metadataUrl + "instance/service-accounts/default/email" - storageScopePrefix = "https://www.googleapis.com/auth/devstorage" + metadataUrl = "http://metadata.google.internal./computeMetadata/v1/" + metadataAttributes = metadataUrl + "instance/attributes/" + dockerConfigKey = metadataAttributes + "google-dockercfg" + dockerConfigUrlKey = metadataAttributes + "google-dockercfg-url" + metadataScopes = metadataUrl + "instance/service-accounts/default/scopes" + metadataToken = metadataUrl + "instance/service-accounts/default/token" + metadataEmail = metadataUrl + "instance/service-accounts/default/email" + storageScopePrefix = "https://www.googleapis.com/auth/devstorage" + cloudPlatformScopePrefix = "https://www.googleapis.com/auth/cloud-platform" ) // For these urls, the parts of the host name can be glob, for example '*.gcr.io" will match @@ -150,7 +151,8 @@ func (g *containerRegistryProvider) Enabled() bool { } for _, v := range scopes { - if strings.HasPrefix(v, storageScopePrefix) { + // cloudPlatformScope implies storage scope. + if strings.HasPrefix(v, storageScopePrefix) || strings.HasPrefix(v, cloudPlatformScopePrefix) { return true } } diff --git a/pkg/credentialprovider/gcp/metadata_test.go b/pkg/credentialprovider/gcp/metadata_test.go index 0187700e1c6..fbcbdd2f1fd 100644 --- a/pkg/credentialprovider/gcp/metadata_test.go +++ b/pkg/credentialprovider/gcp/metadata_test.go @@ -275,6 +275,39 @@ func TestContainerRegistryNoStorageScope(t *testing.T) { } } +func TestComputePlatformScopeSubstitutesStorageScope(t *testing.T) { + const ( + defaultEndpoint = "/computeMetadata/v1/instance/service-accounts/default/" + scopeEndpoint = defaultEndpoint + "scopes" + ) + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Only serve the URL key and the value endpoint + if scopeEndpoint == r.URL.Path { + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + fmt.Fprint(w, `["https://www.googleapis.com/auth/compute.read_write","https://www.googleapis.com/auth/cloud-platform.read-only"]`) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) + defer server.Close() + + // Make a transport that reroutes all traffic to the example server + transport := &http.Transport{ + Proxy: func(req *http.Request) (*url.URL, error) { + return url.Parse(server.URL + req.URL.Path) + }, + } + + provider := &containerRegistryProvider{ + metadataProvider{Client: &http.Client{Transport: transport}}, + } + + if !provider.Enabled() { + t.Errorf("Provider is unexpectedly disabled") + } +} + func TestAllProvidersNoMetadata(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotFound) From 02206bed619131c92b350ec20ee585be61f7e333 Mon Sep 17 00:00:00 2001 From: feihujiang Date: Mon, 31 Aug 2015 10:44:41 +0800 Subject: [PATCH 014/101] Print valid container names when the command not specify the container --- pkg/kubectl/cmd/log.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/pkg/kubectl/cmd/log.go b/pkg/kubectl/cmd/log.go index e81ffda05ee..0e8d7103f94 100644 --- a/pkg/kubectl/cmd/log.go +++ b/pkg/kubectl/cmd/log.go @@ -21,6 +21,7 @@ import ( "io" "os" "strconv" + "strings" "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" @@ -116,15 +117,18 @@ func RunLog(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string return err } - var container string - if cmdutil.GetFlagString(cmd, "container") != "" { - // [-c CONTAINER] - container = p.containerName - } else { + // [-c CONTAINER] + container := p.containerName + if len(container) == 0 { // [CONTAINER] (container as arg not flag) is supported as legacy behavior. See PR #10519 for more details. if len(args) == 1 { if len(pod.Spec.Containers) != 1 { - return fmt.Errorf("POD %s has more than one container; please specify the container to print logs for", pod.ObjectMeta.Name) + podContainersNames := []string{} + for _, container := range pod.Spec.Containers { + podContainersNames = append(podContainersNames, container.Name) + } + + return fmt.Errorf("Pod %s has the following containers: %s; please specify the container to print logs for with -c", pod.ObjectMeta.Name, strings.Join(podContainersNames, ", ")) } container = pod.Spec.Containers[0].Name } else { From af1d7de874959087897264a7a60ad23ffb6a7d27 Mon Sep 17 00:00:00 2001 From: hurf Date: Tue, 1 Sep 2015 21:30:15 +0800 Subject: [PATCH 015/101] Change default output of `run` command Use simple message instead of print result in `get` form. --- pkg/kubectl/cmd/run.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index 877265f8fba..c2e1398c818 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -220,7 +220,13 @@ func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cob return fmt.Errorf("cannot attach to %s: not implemented", kind) } } - return f.PrintObject(cmd, obj, cmdOut) + + outputFormat := cmdutil.GetFlagString(cmd, "output") + if outputFormat != "" { + return f.PrintObject(cmd, obj, cmdOut) + } + cmdutil.PrintSuccess(mapper, false, cmdOut, mapping.Resource, args[0], "created") + return nil } func waitForPodRunning(c *client.Client, pod *api.Pod, out io.Writer) error { From 3c4ab920c0b4b7f19e902b09742bf145eb6dacf4 Mon Sep 17 00:00:00 2001 From: Casey D Date: Wed, 2 Sep 2015 09:47:00 -0700 Subject: [PATCH 016/101] Update Calico-Ubuntu install guide --- docs/getting-started-guides/ubuntu-calico.md | 36 +++++++++++++++----- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/docs/getting-started-guides/ubuntu-calico.md b/docs/getting-started-guides/ubuntu-calico.md index 80b96341776..e83a097b69f 100644 --- a/docs/getting-started-guides/ubuntu-calico.md +++ b/docs/getting-started-guides/ubuntu-calico.md @@ -35,7 +35,7 @@ Kubernetes Deployment On Bare-metal Ubuntu Nodes with Calico Networking ## Introduction -This document describes how to deploy Kubernetes on ubuntu bare metal nodes with Calico Networking plugin. See [projectcalico.org](http://projectcalico.org) for more information on what Calico is, and [the calicoctl github](https://github.com/projectcalico/calico-docker) for more information on the command-line tool, `calicoctl`. +This document describes how to deploy Kubernetes on Ubuntu bare metal nodes with Calico Networking plugin. See [projectcalico.org](http://projectcalico.org) for more information on what Calico is, and [the calicoctl github](https://github.com/projectcalico/calico-docker) for more information on the command-line tool, `calicoctl`. This guide will set up a simple Kubernetes cluster with a master and two nodes. We will start the following processes with systemd: @@ -54,7 +54,8 @@ On each Node: ## Prerequisites 1. This guide uses `systemd` and thus uses Ubuntu 15.04 which supports systemd natively. -2. All Kubernetes nodes should have the latest docker stable version installed. At the time of writing, that is Docker 1.7.0. +2. All machines should have the latest docker stable version installed. At the time of writing, that is Docker 1.7.0. + - To install docker, follow [these instructions](https://docs.docker.com/installation/ubuntulinux/) 3. All hosts should be able to communicate with each other, as well as the internet, to download the necessary files. 4. This demo assumes that none of the hosts have been configured with any Kubernetes or Calico software yet. @@ -122,8 +123,6 @@ sudo systemctl start kube-controller-manager.service sudo systemctl start kube-scheduler.service ``` -> *You may want to consider checking their status after to ensure everything is running.* - ### Install Calico on Master In order to allow the master to route to pods on our nodes, we will launch the calico-node daemon on our master. This will allow it to learn routes over BGP from the other calico-node daemons in the cluster. The docker daemon should already be running before calico is started. @@ -176,6 +175,7 @@ sudo mv -f network-environment /etc Instead of using docker's default interface (docker0), we will configure a new one to use desired IP ranges ``` +sudo apt-get install -y bridge-utils sudo brctl addbr cbr0 sudo ifconfig cbr0 up sudo ifconfig cbr0 /24 @@ -197,9 +197,12 @@ The Docker daemon must be started and told to use the already configured cbr0 in 2.) Find the line that reads `ExecStart=/usr/bin/docker -d -H fd://` and append the following flags: `--bridge=cbr0 --iptables=false --ip-masq=false` -3.) Reload systemctl with `sudo systemctl daemon-reload` +3.) Reload systemctl and restart docker. -4.) Restart docker with with `sudo systemctl restart docker` +``` +sudo systemctl daemon-reload +sudo systemctl restart docker +``` ### Install Calico on the Node @@ -241,6 +244,10 @@ kubernetes/cluster/ubuntu/build.sh # Add binaries to /usr/bin sudo cp -f binaries/minion/* /usr/bin + +# Get the iptables based kube-proxy reccomended for this demo +sudo wget https://github.com/projectcalico/calico-kubernetes/releases/download/v0.1.1/kube-proxy -P /usr/bin/ +sudo chmod +x /usr/bin/kube-proxy ``` 2.) Install and launch the sample systemd processes settings for launching Kubernetes services @@ -256,6 +263,14 @@ sudo systemctl start kube-kubelet.service >*You may want to consider checking their status after to ensure everything is running* +## Install the DNS Addon + +Most Kubernetes deployments will require the DNS addon for service discovery. For more on DNS service discovery, check [here](../../cluster/addons/dns/). + +The config repository for this guide comes with manifest files to start the DNS addon. To install DNS, do the following on your Master node. + +Replace `` in `calico-kubernetes-ubuntu-demo-master/dns/skydns-rc.yaml` with your Master's IP address. Then, create `skydns-rc.yaml` and `skydns-svc.yaml` using `kubectl create -f `. + ## Launch other Services With Calico-Kubernetes At this point, you have a fully functioning cluster running on kubernetes with a master and 2 nodes networked with Calico. You can now follow any of the [standard documentation](../../examples/) to set up other services on your cluster. @@ -268,12 +283,15 @@ With this sample configuration, because the containers have private `192.168.0.0 The simplest method for enabling connectivity from containers to the internet is to use an iptables masquerade rule. This is the standard mechanism [recommended](../../docs/admin/networking.md#google-compute-engine-gce) in the Kubernetes GCE environment. -We need to NAT traffic that has a destination outside of the cluster. Internal traffic includes the master/nodes, and the container IP pools. Assuming that the master and nodes are in the `172.25.0.0/24` subnet, the cbr0 IP ranges are all in the `192.168.0.0/16` network, and the nodes use the interface `eth0` for external connectivity, a suitable masquerade chain would look like this: +We need to NAT traffic that has a destination outside of the cluster. Internal traffic includes the master/nodes, and the container IP pools. A suitable masquerade chain would follow the pattern below, replacing the following variables: +- `CONTAINER_SUBNET`: The cluster-wide subnet from which container IPs are chosen. All cbr0 bridge subnets fall within this range. The above example uses `192.168.0.0/16`. +- `KUBERNETES_HOST_SUBNET`: The subnet from which Kubernetes node / master IP addresses have been chosen. +- `HOST_INTERFACE`: The interface on the Kubernetes node which is used for external connectivity. The above example uses `eth0` ``` sudo iptables -t nat -N KUBE-OUTBOUND-NAT -sudo iptables -t nat -A KUBE-OUTBOUND-NAT -d 192.168.0.0/16 -o eth0 -j RETURN -sudo iptables -t nat -A KUBE-OUTBOUND-NAT -d 172.25.0.0/24 -o eth0 -j RETURN +sudo iptables -t nat -A KUBE-OUTBOUND-NAT -d -o -j RETURN +sudo iptables -t nat -A KUBE-OUTBOUND-NAT -d -o -j RETURN sudo iptables -t nat -A KUBE-OUTBOUND-NAT -j MASQUERADE sudo iptables -t nat -A POSTROUTING -j KUBE-OUTBOUND-NAT ``` From 207dfe54f07186f8733cbaff9852c23b9d3ee407 Mon Sep 17 00:00:00 2001 From: Yash Parikh Date: Tue, 25 Aug 2015 16:47:33 -0700 Subject: [PATCH 017/101] Fixes #11471 Readiness probe for kube-dns RC (HA) --- cluster/addons/dns/skydns-rc.yaml.in | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/cluster/addons/dns/skydns-rc.yaml.in b/cluster/addons/dns/skydns-rc.yaml.in index 57afe179222..eab8772faf5 100644 --- a/cluster/addons/dns/skydns-rc.yaml.in +++ b/cluster/addons/dns/skydns-rc.yaml.in @@ -1,22 +1,22 @@ apiVersion: v1 kind: ReplicationController metadata: - name: kube-dns-v8 + name: kube-dns-v9 namespace: kube-system labels: k8s-app: kube-dns - version: v8 + version: v9 kubernetes.io/cluster-service: "true" spec: replicas: {{ pillar['dns_replicas'] }} selector: k8s-app: kube-dns - version: v8 + version: v9 template: metadata: labels: k8s-app: kube-dns - version: v8 + version: v9 kubernetes.io/cluster-service: "true" spec: containers: @@ -73,6 +73,13 @@ spec: scheme: HTTP initialDelaySeconds: 30 timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 1 + timeoutSeconds: 5 - name: healthz image: gcr.io/google_containers/exechealthz:1.0 resources: From c9beb5b39c8359b10e77aca6f849ef9c1ca9a76a Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Tue, 1 Sep 2015 18:08:03 -0700 Subject: [PATCH 018/101] Fix the bug that rolling-update throws error when using generateName --- pkg/kubectl/rolling_updater.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index 448f34b8733..5aa4d63e387 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -395,7 +395,7 @@ func (r *RollingUpdater) pollForReadyPods(interval, timeout time.Duration, oldRc // Existing controllers are validated to ensure their sourceIdAnnotation // matches sourceId; if there's a mismatch, an error is returned. func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.ReplicationController, sourceId string) (*api.ReplicationController, bool, error) { - existing, err := r.c.ReplicationControllers(controller.Namespace).Get(controller.Name) + existingRc, err := r.existingController(controller) if err != nil { if !errors.IsNotFound(err) { // There was an error trying to find the controller; don't assume we @@ -416,13 +416,23 @@ func (r *RollingUpdater) getOrCreateTargetControllerWithClient(controller *api.R return newRc, false, err } // Validate and use the existing controller. - annotations := existing.Annotations + annotations := existingRc.Annotations source := annotations[sourceIdAnnotation] _, ok := annotations[desiredReplicasAnnotation] if source != sourceId || !ok { return nil, false, fmt.Errorf("Missing/unexpected annotations for controller %s, expected %s : %s", controller.Name, sourceId, annotations) } - return existing, true, nil + return existingRc, true, nil +} + +// existingController verifies if the controller already exists +func (r *RollingUpdater) existingController(controller *api.ReplicationController) (*api.ReplicationController, error) { + // without rc name but generate name, there's no existing rc + if len(controller.Name) == 0 && len(controller.GenerateName) > 0 { + return nil, errors.NewNotFound("ReplicationController", controller.Name) + } + // controller name is required to get rc back + return r.c.ReplicationControllers(controller.Namespace).Get(controller.Name) } // cleanupWithClients performs cleanup tasks after the rolling update. Update From d3398e2aed77bbd288f9ca8209ac98b5d145a20c Mon Sep 17 00:00:00 2001 From: Isaac Hollander McCreery Date: Fri, 28 Aug 2015 15:34:12 -0700 Subject: [PATCH 019/101] GCE tokens behavior to new format --- cluster/gce/configure-vm.sh | 3 ++- pkg/cloudprovider/providers/gce/gce.go | 3 ++- pkg/cloudprovider/providers/gce/token_source.go | 13 ++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 6e41453649f..c199a6643c5 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -540,10 +540,11 @@ grains: - kubernetes-master cloud: gce EOF - if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then + if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then cat </etc/gce.conf [global] token-url = ${TOKEN_URL} +token-body = ${TOKEN_BODY} project-id = ${PROJECT_ID} network-name = ${NODE_NETWORK} EOF diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index fbe172ab23f..3829956b98e 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -61,6 +61,7 @@ type GCECloud struct { type Config struct { Global struct { TokenURL string `gcfg:"token-url"` + TokenBody string `gcfg:"token-body"` ProjectID string `gcfg:"project-id"` NetworkName string `gcfg:"network-name"` } @@ -159,7 +160,7 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { } } if cfg.Global.TokenURL != "" { - tokenSource = newAltTokenSource(cfg.Global.TokenURL) + tokenSource = newAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) } } client := oauth2.NewClient(oauth2.NoContext, tokenSource) diff --git a/pkg/cloudprovider/providers/gce/token_source.go b/pkg/cloudprovider/providers/gce/token_source.go index 4bf33246ca0..e5e327d03c8 100644 --- a/pkg/cloudprovider/providers/gce/token_source.go +++ b/pkg/cloudprovider/providers/gce/token_source.go @@ -19,6 +19,7 @@ package gce_cloud import ( "encoding/json" "net/http" + "strings" "time" "k8s.io/kubernetes/pkg/util" @@ -59,6 +60,7 @@ func init() { type altTokenSource struct { oauthClient *http.Client tokenURL string + tokenBody string throttle util.RateLimiter } @@ -73,7 +75,7 @@ func (a *altTokenSource) Token() (*oauth2.Token, error) { } func (a *altTokenSource) token() (*oauth2.Token, error) { - req, err := http.NewRequest("GET", a.tokenURL, nil) + req, err := http.NewRequest("POST", a.tokenURL, strings.NewReader(a.tokenBody)) if err != nil { return nil, err } @@ -86,23 +88,24 @@ func (a *altTokenSource) token() (*oauth2.Token, error) { return nil, err } var tok struct { - AccessToken string `json:"accessToken"` - ExpiryTimeSeconds int64 `json:"expiryTimeSeconds,string"` + AccessToken string `json:"accessToken"` + ExpireTime time.Time `json:"expireTime"` } if err := json.NewDecoder(res.Body).Decode(&tok); err != nil { return nil, err } return &oauth2.Token{ AccessToken: tok.AccessToken, - Expiry: time.Unix(tok.ExpiryTimeSeconds, 0), + Expiry: tok.ExpireTime, }, nil } -func newAltTokenSource(tokenURL string) oauth2.TokenSource { +func newAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) a := &altTokenSource{ oauthClient: client, tokenURL: tokenURL, + tokenBody: tokenBody, throttle: util.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst), } return oauth2.ReuseTokenSource(nil, a) From e3bb7834d9ffe4119d8007cbdf4865b2c961a253 Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Wed, 2 Sep 2015 14:05:33 -0700 Subject: [PATCH 020/101] Don't override KUBE_RELEASE_RUN_TESTS if set in enviroment --- hack/jenkins/build.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hack/jenkins/build.sh b/hack/jenkins/build.sh index 5caf3900760..b63407aed98 100755 --- a/hack/jenkins/build.sh +++ b/hack/jenkins/build.sh @@ -31,9 +31,11 @@ set -o xtrace # space. export HOME=${WORKSPACE} # Nothing should want Jenkins $HOME export PATH=$PATH:/usr/local/go/bin -export KUBE_RELEASE_RUN_TESTS=n export KUBE_SKIP_CONFIRMATIONS=y +: ${KUBE_RELEASE_RUN_TESTS:="n"} +export KUBE_RELEASE_RUN_TESTS + # Clean stuff out. Assume the last build left the tree in an odd # state. rm -rf ~/.kube* From 97b4a64e2ce0d6db030e6a65c22ee7c4745ce85b Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 1 Sep 2015 15:27:13 +0200 Subject: [PATCH 021/101] Refactor registry etcd delete tests --- pkg/api/rest/resttest/resttest.go | 263 +++++++++--------- pkg/registry/controller/etcd/etcd_test.go | 66 +---- pkg/registry/daemon/etcd/etcd_test.go | 66 +---- pkg/registry/deployment/etcd/etcd_test.go | 65 +---- pkg/registry/endpoint/etcd/etcd_test.go | 90 ++---- .../horizontalpodautoscaler/etcd/etcd_test.go | 53 ++-- .../horizontalpodautoscaler/strategy.go | 25 +- pkg/registry/limitrange/etcd/etcd_test.go | 29 ++ pkg/registry/minion/etcd/etcd_test.go | 59 +--- pkg/registry/namespace/etcd/etcd_test.go | 75 ++--- pkg/registry/namespace/strategy.go | 7 +- .../persistentvolume/etcd/etcd_test.go | 89 ++---- pkg/registry/persistentvolume/strategy.go | 3 +- .../persistentvolumeclaim/etcd/etcd_test.go | 125 ++------- .../persistentvolumeclaim/strategy.go | 3 +- pkg/registry/pod/etcd/etcd_test.go | 113 +------- pkg/registry/podtemplate/etcd/etcd_test.go | 29 ++ pkg/registry/podtemplate/strategy.go | 25 +- pkg/registry/registrytest/etcd.go | 31 +++ pkg/registry/resourcequota/etcd/etcd_test.go | 58 +--- pkg/registry/secret/etcd/etcd_test.go | 29 ++ pkg/registry/service/etcd/etcd_test.go | 6 + pkg/registry/serviceaccount/etcd/etcd_test.go | 31 +++ .../thirdpartyresource/etcd/etcd_test.go | 53 ++-- 24 files changed, 497 insertions(+), 896 deletions(-) diff --git a/pkg/api/rest/resttest/resttest.go b/pkg/api/rest/resttest/resttest.go index 23b5a87221a..adf2d7e3e9d 100644 --- a/pkg/api/rest/resttest/resttest.go +++ b/pkg/api/rest/resttest/resttest.go @@ -37,11 +37,12 @@ import ( type Tester struct { *testing.T - storage rest.Storage - storageError injectErrorFunc - clusterScope bool - createOnUpdate bool - generatesName bool + storage rest.Storage + storageError injectErrorFunc + clusterScope bool + createOnUpdate bool + generatesName bool + returnDeletedObject bool } type injectErrorFunc func(err error) @@ -75,6 +76,11 @@ func (t *Tester) GeneratesName() *Tester { return t } +func (t *Tester) ReturnDeletedObject() *Tester { + t.returnDeletedObject = true + return t +} + // TestNamespace returns the namespace that will be used when creating contexts. // Returns NamespaceNone for cluster-scoped objects. func (t *Tester) TestNamespace() string { @@ -125,6 +131,7 @@ type EmitFunc func(runtime.Object, string) error type GetFunc func(api.Context, runtime.Object) (runtime.Object, error) type InitWatchFunc func() type InjectErrFunc func(err error) +type IsErrorFunc func(err error) bool type SetFunc func(api.Context, runtime.Object) error type SetRVFunc func(uint64) type UpdateFunc func(runtime.Object) runtime.Object @@ -160,50 +167,45 @@ func (t *Tester) TestUpdate(valid runtime.Object, setFn SetFunc, setRVFn SetRVFu } // Test deleting an object. -// TODO(wojtek-t): Change it to use AssignFunc instead. -func (t *Tester) TestDelete(createFn func() runtime.Object, wasGracefulFn func() bool, invalid ...runtime.Object) { - t.TestDeleteNonExist(createFn) - t.TestDeleteNoGraceful(createFn, wasGracefulFn) - t.TestDeleteInvokesValidation(invalid...) - // TODO: Test delete namespace mismatch rejection - // once #5684 is fixed. +func (t *Tester) TestDelete(valid runtime.Object, setFn SetFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { + t.testDeleteNonExist(copyOrDie(valid)) + t.testDeleteNoGraceful(copyOrDie(valid), setFn, getFn, isNotFoundFn) } -// Test graceful deletion. -// TODO(wojtek-t): Change it to use AssignFunc instead. -func (t *Tester) TestDeleteGraceful(createFn func() runtime.Object, expectedGrace int64, wasGracefulFn func() bool) { - t.TestDeleteGracefulHasDefault(createFn(), expectedGrace, wasGracefulFn) - t.TestDeleteGracefulWithValue(createFn(), expectedGrace, wasGracefulFn) - t.TestDeleteGracefulUsesZeroOnNil(createFn(), 0) - t.TestDeleteGracefulExtend(createFn(), expectedGrace, wasGracefulFn) - t.TestDeleteGracefulImmediate(createFn(), expectedGrace, wasGracefulFn) +// Test gracefully deleting an object. +func (t *Tester) TestDeleteGraceful(valid runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { + t.testDeleteGracefulHasDefault(copyOrDie(valid), setFn, getFn, expectedGrace) + t.testDeleteGracefulWithValue(copyOrDie(valid), setFn, getFn, expectedGrace) + t.testDeleteGracefulUsesZeroOnNil(copyOrDie(valid), setFn, expectedGrace) + t.testDeleteGracefulExtend(copyOrDie(valid), setFn, getFn, expectedGrace) + t.testDeleteGracefulImmediate(copyOrDie(valid), setFn, getFn, expectedGrace) } // Test getting object. -func (t *Tester) TestGet(obj runtime.Object) { - t.testGetFound(obj) - t.testGetNotFound(obj) - t.testGetMimatchedNamespace(obj) +func (t *Tester) TestGet(valid runtime.Object) { + t.testGetFound(copyOrDie(valid)) + t.testGetNotFound(copyOrDie(valid)) + t.testGetMimatchedNamespace(copyOrDie(valid)) if !t.clusterScope { - t.testGetDifferentNamespace(obj) + t.testGetDifferentNamespace(copyOrDie(valid)) } } // Test listing objects. -func (t *Tester) TestList(obj runtime.Object, assignFn AssignFunc, setRVFn SetRVFunc) { +func (t *Tester) TestList(valid runtime.Object, assignFn AssignFunc, setRVFn SetRVFunc) { t.testListError() - t.testListFound(obj, assignFn) + t.testListFound(copyOrDie(valid), assignFn) t.testListNotFound(assignFn, setRVFn) - t.testListMatchLabels(obj, assignFn) + t.testListMatchLabels(copyOrDie(valid), assignFn) } // Test watching objects. func (t *Tester) TestWatch( - obj runtime.Object, initWatchFn InitWatchFunc, injectErrFn InjectErrFunc, emitFn EmitFunc, + valid runtime.Object, initWatchFn InitWatchFunc, injectErrFn InjectErrFunc, emitFn EmitFunc, labelsPass, labelsFail []labels.Set, fieldsPass, fieldsFail []fields.Set, actions []string) { t.testWatch(initWatchFn, injectErrFn) - t.testWatchLabels(copyOrDie(obj), initWatchFn, emitFn, labelsPass, labelsFail, actions) - t.testWatchFields(copyOrDie(obj), initWatchFn, emitFn, fieldsPass, fieldsFail, actions) + t.testWatchLabels(copyOrDie(valid), initWatchFn, emitFn, labelsPass, labelsFail, actions) + t.testWatchFields(copyOrDie(valid), initWatchFn, emitFn, fieldsPass, fieldsFail, actions) } // ============================================================================= @@ -512,26 +514,40 @@ func (t *Tester) testUpdateRejectsMismatchedNamespace(obj runtime.Object, setFn // ============================================================================= // Deletion tests. -func (t *Tester) TestDeleteInvokesValidation(invalid ...runtime.Object) { - for i, obj := range invalid { - objectMeta := t.getObjectMetaOrFail(obj) - ctx := t.TestContext() - _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, nil) - if !errors.IsInvalid(err) { - t.Errorf("%d: Expected to get an invalid resource error, got %v", i, err) +func (t *Tester) testDeleteNoGraceful(obj runtime.Object, setFn SetFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) { + ctx := t.TestContext() + + foo := copyOrDie(obj) + t.setObjectMeta(foo, "foo1") + if err := setFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + } + objectMeta := t.getObjectMetaOrFail(foo) + obj, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(10)) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !t.returnDeletedObject { + if status, ok := obj.(*api.Status); !ok { + t.Errorf("expected status of delete, got %v", status) + } else if status.Status != api.StatusSuccess { + t.Errorf("expected success, got: %v", status.Status) } } + + _, err = getFn(ctx, foo) + if err == nil || !isNotFoundFn(err) { + t.Errorf("unexpected error: %v", err) + } } -func (t *Tester) TestDeleteNonExist(createFn func() runtime.Object) { - existing := createFn() - objectMeta := t.getObjectMetaOrFail(existing) - context := t.TestContext() +func (t *Tester) testDeleteNonExist(obj runtime.Object) { + objectMeta := t.getObjectMetaOrFail(obj) t.withStorageError(&etcd.EtcdError{ErrorCode: tools.EtcdErrorCodeNotFound}, func() { - _, err := t.storage.(rest.GracefulDeleter).Delete(context, objectMeta.Name, nil) + _, err := t.storage.(rest.GracefulDeleter).Delete(t.TestContext(), objectMeta.Name, nil) if err == nil || !errors.IsNotFound(err) { - t.Fatalf("Unexpected error: %v", err) + t.Errorf("unexpected error: %v", err) } }) } @@ -539,100 +555,77 @@ func (t *Tester) TestDeleteNonExist(createFn func() runtime.Object) { // ============================================================================= // Graceful Deletion tests. -func (t *Tester) TestDeleteNoGraceful(createFn func() runtime.Object, wasGracefulFn func() bool) { - existing := createFn() - objectMeta := t.getObjectMetaOrFail(existing) - ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace) - _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(10)) - if err != nil { +func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { + ctx := t.TestContext() + + foo := copyOrDie(obj) + t.setObjectMeta(foo, "foo1") + if err := setFn(ctx, foo); err != nil { t.Errorf("unexpected error: %v", err) } - if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name); !errors.IsNotFound(err) { - t.Errorf("unexpected error, object should not exist: %v", err) - } - if wasGracefulFn() { - t.Errorf("resource should not support graceful delete") - } -} - -func (t *Tester) TestDeleteGracefulHasDefault(existing runtime.Object, expectedGrace int64, wasGracefulFn func() bool) { - objectMeta := t.getObjectMetaOrFail(existing) - ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace) + objectMeta := t.getObjectMetaOrFail(foo) _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, &api.DeleteOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) } - if !wasGracefulFn() { - t.Errorf("did not gracefully delete resource") - return + if _, err := getFn(ctx, foo); err != nil { + t.Fatalf("did not gracefully delete resource", err) } + object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name) if err != nil { - t.Errorf("unexpected error, object should exist: %v", err) - return + t.Fatalf("unexpected error, object should exist: %v", err) } - objectMeta, err = api.ObjectMetaFor(object) - if err != nil { - t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, object) - } - if objectMeta.DeletionTimestamp == nil { - t.Errorf("did not set deletion timestamp") - } - if objectMeta.DeletionGracePeriodSeconds == nil { - t.Fatalf("did not set deletion grace period seconds") - } - if *objectMeta.DeletionGracePeriodSeconds != expectedGrace { - t.Errorf("actual grace period does not match expected: %d", *objectMeta.DeletionGracePeriodSeconds) + objectMeta = t.getObjectMetaOrFail(object) + if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != expectedGrace { + t.Errorf("unexpected deleted meta: %#v", objectMeta) } } -func (t *Tester) TestDeleteGracefulWithValue(existing runtime.Object, expectedGrace int64, wasGracefulFn func() bool) { - objectMeta, err := api.ObjectMetaFor(existing) - if err != nil { - t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing) - } +func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { + ctx := t.TestContext() - ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace) - _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace+2)) + foo := copyOrDie(obj) + t.setObjectMeta(foo, "foo2") + if err := setFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + } + objectMeta := t.getObjectMetaOrFail(foo) + _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace+2)) if err != nil { t.Errorf("unexpected error: %v", err) } - if !wasGracefulFn() { - t.Errorf("did not gracefully delete resource") + if _, err := getFn(ctx, foo); err != nil { + t.Fatalf("did not gracefully delete resource", err) } + object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.Name) if err != nil { t.Errorf("unexpected error, object should exist: %v", err) } - objectMeta, err = api.ObjectMetaFor(object) - if err != nil { - t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, object) - } - if objectMeta.DeletionTimestamp == nil { - t.Errorf("did not set deletion timestamp") - } - if objectMeta.DeletionGracePeriodSeconds == nil { - t.Fatalf("did not set deletion grace period seconds") - } - if *objectMeta.DeletionGracePeriodSeconds != expectedGrace+2 { - t.Errorf("actual grace period does not match expected: %d", *objectMeta.DeletionGracePeriodSeconds) + objectMeta = t.getObjectMetaOrFail(object) + if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != expectedGrace+2 { + t.Errorf("unexpected deleted meta: %#v", objectMeta) } } -func (t *Tester) TestDeleteGracefulExtend(existing runtime.Object, expectedGrace int64, wasGracefulFn func() bool) { - objectMeta, err := api.ObjectMetaFor(existing) - if err != nil { - t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing) - } +func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { + ctx := t.TestContext() - ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace) - _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace)) + foo := copyOrDie(obj) + t.setObjectMeta(foo, "foo3") + if err := setFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + } + objectMeta := t.getObjectMetaOrFail(foo) + _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace)) if err != nil { t.Errorf("unexpected error: %v", err) } - if !wasGracefulFn() { - t.Errorf("did not gracefully delete resource") + if _, err := getFn(ctx, foo); err != nil { + t.Fatalf("did not gracefully delete resource", err) } + // second delete duration is ignored _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace+2)) if err != nil { @@ -642,35 +635,29 @@ func (t *Tester) TestDeleteGracefulExtend(existing runtime.Object, expectedGrace if err != nil { t.Errorf("unexpected error, object should exist: %v", err) } - objectMeta, err = api.ObjectMetaFor(object) - if err != nil { - t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, object) - } - if objectMeta.DeletionTimestamp == nil { - t.Errorf("did not set deletion timestamp") - } - if objectMeta.DeletionGracePeriodSeconds == nil { - t.Fatalf("did not set deletion grace period seconds") - } - if *objectMeta.DeletionGracePeriodSeconds != expectedGrace { - t.Errorf("actual grace period does not match expected: %d", *objectMeta.DeletionGracePeriodSeconds) + objectMeta = t.getObjectMetaOrFail(object) + if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != expectedGrace { + t.Errorf("unexpected deleted meta: %#v", objectMeta) } } -func (t *Tester) TestDeleteGracefulImmediate(existing runtime.Object, expectedGrace int64, wasGracefulFn func() bool) { - objectMeta, err := api.ObjectMetaFor(existing) - if err != nil { - t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, existing) - } +func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, setFn SetFunc, getFn GetFunc, expectedGrace int64) { + ctx := t.TestContext() - ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace) - _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace)) + foo := copyOrDie(obj) + t.setObjectMeta(foo, "foo4") + if err := setFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + } + objectMeta := t.getObjectMetaOrFail(foo) + _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(expectedGrace)) if err != nil { t.Errorf("unexpected error: %v", err) } - if !wasGracefulFn() { - t.Errorf("did not gracefully delete resource") + if _, err := getFn(ctx, foo); err != nil { + t.Fatalf("did not gracefully delete resource", err) } + // second delete is immediate, resource is deleted out, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, api.NewDeleteOptions(0)) if err != nil { @@ -680,19 +667,21 @@ func (t *Tester) TestDeleteGracefulImmediate(existing runtime.Object, expectedGr if !errors.IsNotFound(err) { t.Errorf("unexpected error, object should be deleted immediately: %v", err) } - objectMeta, err = api.ObjectMetaFor(out) - if err != nil { - t.Errorf("unexpected error: %v", err) - return - } + objectMeta = t.getObjectMetaOrFail(out) if objectMeta.DeletionTimestamp == nil || objectMeta.DeletionGracePeriodSeconds == nil || *objectMeta.DeletionGracePeriodSeconds != 0 { t.Errorf("unexpected deleted meta: %#v", objectMeta) } } -func (t *Tester) TestDeleteGracefulUsesZeroOnNil(existing runtime.Object, expectedGrace int64) { - objectMeta := t.getObjectMetaOrFail(existing) - ctx := api.WithNamespace(t.TestContext(), objectMeta.Namespace) +func (t *Tester) testDeleteGracefulUsesZeroOnNil(obj runtime.Object, setFn SetFunc, expectedGrace int64) { + ctx := t.TestContext() + + foo := copyOrDie(obj) + t.setObjectMeta(foo, "foo5") + if err := setFn(ctx, foo); err != nil { + t.Errorf("unexpected error: %v", err) + } + objectMeta := t.getObjectMetaOrFail(foo) _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.Name, nil) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/registry/controller/etcd/etcd_test.go b/pkg/registry/controller/etcd/etcd_test.go index ea66f992d8a..909e529963c 100644 --- a/pkg/registry/controller/etcd/etcd_test.go +++ b/pkg/registry/controller/etcd/etcd_test.go @@ -19,16 +19,12 @@ package etcd import ( "testing" - "github.com/coreos/go-etcd/etcd" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { @@ -127,6 +123,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewController()) +} + func TestGenerationNumber(t *testing.T) { storage, _ := newStorage(t) modifiedSno := *validNewController() @@ -218,59 +220,3 @@ func TestWatch(t *testing.T) { }, ) } - -func TestEtcdDeleteController(t *testing.T) { - ctx := api.NewDefaultContext() - storage, fakeClient := newStorage(t) - key, _ := storage.KeyFunc(ctx, validController.Name) - key = etcdtest.AddPrefix(key) - - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), validController), 0) - obj, err := storage.Delete(ctx, validController.Name, nil) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if status, ok := obj.(*api.Status); !ok { - t.Errorf("Expected status of delete, got %#v", status) - } else if status.Status != api.StatusSuccess { - t.Errorf("Expected success, got %#v", status.Status) - } - if len(fakeClient.DeletedKeys) != 1 { - t.Errorf("Expected 1 delete, found %#v", fakeClient.DeletedKeys) - } - if fakeClient.DeletedKeys[0] != key { - t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key) - } -} - -func TestDelete(t *testing.T) { - ctx := api.NewDefaultContext() - storage, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError) - key, _ := storage.KeyFunc(ctx, validController.Name) - key = etcdtest.AddPrefix(key) - - createFn := func() runtime.Object { - rc := *validNewController() - rc.ResourceVersion = "1" - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &rc), - ModifiedIndex: 1, - }, - }, - } - return &rc - } - gracefulSetFn := func() bool { - // If the controller is still around after trying to delete either the delete - // failed, or we're deleting it gracefully. - if fakeClient.Data[key].R.Node != nil { - return true - } - return false - } - - test.TestDelete(createFn, gracefulSetFn) -} diff --git a/pkg/registry/daemon/etcd/etcd_test.go b/pkg/registry/daemon/etcd/etcd_test.go index 17fb74c5a66..ff289c2ddbc 100755 --- a/pkg/registry/daemon/etcd/etcd_test.go +++ b/pkg/registry/daemon/etcd/etcd_test.go @@ -19,17 +19,13 @@ package etcd import ( "testing" - "github.com/coreos/go-etcd/etcd" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { @@ -121,6 +117,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewDaemon()) +} + func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -158,59 +160,3 @@ func TestWatch(t *testing.T) { }, ) } - -func TestEtcdDeleteDaemon(t *testing.T) { - ctx := api.NewDefaultContext() - storage, fakeClient := newStorage(t) - key, err := storage.KeyFunc(ctx, validDaemon.Name) - key = etcdtest.AddPrefix(key) - - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), validDaemon), 0) - obj, err := storage.Delete(ctx, validDaemon.Name, nil) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if status, ok := obj.(*api.Status); !ok { - t.Errorf("Expected status of delete, got %#v", status) - } else if status.Status != api.StatusSuccess { - t.Errorf("Expected success, got %#v", status.Status) - } - if len(fakeClient.DeletedKeys) != 1 { - t.Errorf("Expected 1 delete, found %#v", fakeClient.DeletedKeys) - } - if fakeClient.DeletedKeys[0] != key { - t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key) - } -} - -func TestDelete(t *testing.T) { - ctx := api.NewDefaultContext() - storage, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError) - key, _ := storage.KeyFunc(ctx, validDaemon.Name) - key = etcdtest.AddPrefix(key) - - createFn := func() runtime.Object { - dc := validNewDaemon() - dc.ResourceVersion = "1" - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), dc), - ModifiedIndex: 1, - }, - }, - } - return dc - } - gracefulSetFn := func() bool { - // If the daemon is still around after trying to delete either the delete - // failed, or we're deleting it gracefully. - if fakeClient.Data[key].R.Node != nil { - return true - } - return false - } - - test.TestDelete(createFn, gracefulSetFn) -} diff --git a/pkg/registry/deployment/etcd/etcd_test.go b/pkg/registry/deployment/etcd/etcd_test.go index 6c91b34d53b..9a3a2ac6206 100755 --- a/pkg/registry/deployment/etcd/etcd_test.go +++ b/pkg/registry/deployment/etcd/etcd_test.go @@ -19,17 +19,13 @@ package etcd import ( "testing" - "github.com/coreos/go-etcd/etcd" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { @@ -122,6 +118,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewDeployment()) +} + func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -157,58 +159,3 @@ func TestWatch(t *testing.T) { }, ) } - -func TestEtcdDelete(t *testing.T) { - ctx := api.NewDefaultContext() - storage, fakeClient := newStorage(t) - key, err := storage.KeyFunc(ctx, validDeployment.Name) - key = etcdtest.AddPrefix(key) - - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), validNewDeployment()), 0) - obj, err := storage.Delete(ctx, validDeployment.Name, nil) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if status, ok := obj.(*api.Status); !ok { - t.Errorf("Expected status of delete, got %#v", status) - } else if status.Status != api.StatusSuccess { - t.Errorf("Expected success, got %#v", status.Status) - } - if len(fakeClient.DeletedKeys) != 1 { - t.Errorf("Expected 1 delete, found %#v", fakeClient.DeletedKeys) - } - if fakeClient.DeletedKeys[0] != key { - t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key) - } -} - -func TestDelete(t *testing.T) { - ctx := api.NewDefaultContext() - storage, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError) - key, _ := storage.KeyFunc(ctx, validDeployment.Name) - key = etcdtest.AddPrefix(key) - - createFn := func() runtime.Object { - dc := validNewDeployment() - dc.ResourceVersion = "1" - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), dc), - ModifiedIndex: 1, - }, - }, - } - return dc - } - gracefulSetFn := func() bool { - // If the deployment is still around after trying to delete either the delete - // failed, or we're deleting it gracefully. - if fakeClient.Data[key].R.Node != nil { - return true - } - return false - } - test.TestDelete(createFn, gracefulSetFn) -} diff --git a/pkg/registry/endpoint/etcd/etcd_test.go b/pkg/registry/endpoint/etcd/etcd_test.go index 9c77fa01146..cff15c74962 100644 --- a/pkg/registry/endpoint/etcd/etcd_test.go +++ b/pkg/registry/endpoint/etcd/etcd_test.go @@ -20,15 +20,11 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" - "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { @@ -93,31 +89,9 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - ctx := api.NewDefaultContext() storage, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError) - - endpoints := validChangedEndpoints() - key, _ := storage.KeyFunc(ctx, endpoints.Name) - key = etcdtest.AddPrefix(key) - createFn := func() runtime.Object { - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), endpoints), - ModifiedIndex: 1, - }, - }, - } - return endpoints - } - gracefulSetFn := func() bool { - if fakeClient.Data[key].R.Node == nil { - return false - } - return fakeClient.Data[key].R.Node.TTL == 30 - } - test.TestDelete(createFn, gracefulSetFn) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewEndpoints()) } func TestGet(t *testing.T) { @@ -132,43 +106,25 @@ func TestList(t *testing.T) { test.TestList(validNewEndpoints()) } -func TestEndpointsDecode(t *testing.T) { - storage, _ := newStorage(t) - expected := validNewEndpoints() - body, err := testapi.Codec().Encode(expected) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - actual := storage.New() - if err := testapi.Codec().DecodeInto(body, actual); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !api.Semantic.DeepEqual(expected, actual) { - t.Errorf("mismatch: %s", util.ObjectDiff(expected, actual)) - } -} - -func TestDeleteEndpoints(t *testing.T) { - ctx := api.NewDefaultContext() +func TestWatch(t *testing.T) { storage, fakeClient := newStorage(t) - endpoints := validNewEndpoints() - name := endpoints.Name - key, _ := storage.KeyFunc(ctx, name) - key = etcdtest.AddPrefix(key) - fakeClient.ChangeIndex = 1 - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), endpoints), - ModifiedIndex: 1, - CreatedIndex: 1, - }, + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewEndpoints(), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, }, - } - _, err := storage.Delete(ctx, name, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + // matching fields + []fields.Set{ + {"metadata.name": "foo"}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) } diff --git a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go index b0f33a7796f..826d52dd3ea 100644 --- a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go +++ b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go @@ -21,17 +21,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" // Ensure that expapi/v1 package is initialized. _ "k8s.io/kubernetes/pkg/expapi/v1" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { @@ -85,30 +82,9 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - ctx := api.NewDefaultContext() storage, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError) - autoscaler := validNewHorizontalPodAutoscaler("foo2") - key, _ := storage.KeyFunc(ctx, "foo2") - key = etcdtest.AddPrefix(key) - createFn := func() runtime.Object { - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), autoscaler), - ModifiedIndex: 1, - }, - }, - } - return autoscaler - } - gracefulSetFn := func() bool { - if fakeClient.Data[key].R.Node == nil { - return false - } - return fakeClient.Data[key].R.Node.TTL == 30 - } - test.TestDelete(createFn, gracefulSetFn) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewHorizontalPodAutoscaler("foo")) } func TestGet(t *testing.T) { @@ -122,3 +98,24 @@ func TestList(t *testing.T) { test := registrytest.New(t, fakeClient, storage.Etcd) test.TestList(validNewHorizontalPodAutoscaler("foo")) } + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewHorizontalPodAutoscaler("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) +} diff --git a/pkg/registry/horizontalpodautoscaler/strategy.go b/pkg/registry/horizontalpodautoscaler/strategy.go index 3a5487eb4ec..e0de495bcdf 100644 --- a/pkg/registry/horizontalpodautoscaler/strategy.go +++ b/pkg/registry/horizontalpodautoscaler/strategy.go @@ -74,13 +74,20 @@ func (autoscalerStrategy) AllowUnconditionalUpdate() bool { return true } -// MatchAutoscaler returns a generic matcher for a given label and field selector. -func MatchAutoscaler(label labels.Selector, field fields.Selector) generic.Matcher { - return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { - autoscaler, ok := obj.(*expapi.HorizontalPodAutoscaler) - if !ok { - return false, fmt.Errorf("not a horizontal pod autoscaler") - } - return label.Matches(labels.Set(autoscaler.Labels)), nil - }) +func AutoscalerToSelectableFields(limitRange *expapi.HorizontalPodAutoscaler) fields.Set { + return fields.Set{} +} + +func MatchAutoscaler(label labels.Selector, field fields.Selector) generic.Matcher { + return &generic.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + hpa, ok := obj.(*expapi.HorizontalPodAutoscaler) + if !ok { + return nil, nil, fmt.Errorf("given object is not a horizontal pod autoscaler.") + } + return labels.Set(hpa.ObjectMeta.Labels), AutoscalerToSelectableFields(hpa), nil + }, + } } diff --git a/pkg/registry/limitrange/etcd/etcd_test.go b/pkg/registry/limitrange/etcd/etcd_test.go index a4a3ca09a38..c08e627cc31 100644 --- a/pkg/registry/limitrange/etcd/etcd_test.go +++ b/pkg/registry/limitrange/etcd/etcd_test.go @@ -21,6 +21,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" @@ -97,6 +99,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewLimitRange()) +} + func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -108,3 +116,24 @@ func TestList(t *testing.T) { test := registrytest.New(t, fakeClient, storage.Etcd) test.TestList(validNewLimitRange()) } + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewLimitRange(), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) +} diff --git a/pkg/registry/minion/etcd/etcd_test.go b/pkg/registry/minion/etcd/etcd_test.go index 721883794fe..e89051eb5c7 100644 --- a/pkg/registry/minion/etcd/etcd_test.go +++ b/pkg/registry/minion/etcd/etcd_test.go @@ -22,16 +22,11 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" - - "github.com/coreos/go-etcd/etcd" ) type fakeConnectionInfoGetter struct { @@ -67,12 +62,6 @@ func validNewNode() *api.Node { } } -func validChangedNode() *api.Node { - node := validNewNode() - node.ResourceVersion = "1" - return node -} - func TestCreate(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd).ClusterScope() @@ -104,31 +93,9 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - ctx := api.NewContext() storage, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError).ClusterScope() - - node := validChangedNode() - key, _ := storage.KeyFunc(ctx, node.Name) - key = etcdtest.AddPrefix(key) - createFn := func() runtime.Object { - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), node), - ModifiedIndex: 1, - }, - }, - } - return node - } - gracefulSetFn := func() bool { - if fakeClient.Data[key].R.Node == nil { - return false - } - return fakeClient.Data[key].R.Node.TTL == 30 - } - test.TestDelete(createFn, gracefulSetFn) + test := registrytest.New(t, fakeClient, storage.Etcd).ClusterScope() + test.TestDelete(validNewNode()) } func TestGet(t *testing.T) { @@ -143,7 +110,7 @@ func TestList(t *testing.T) { test.TestList(validNewNode()) } -func TestWatchNodes(t *testing.T) { +func TestWatch(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd).ClusterScope() test.TestWatch( @@ -167,23 +134,3 @@ func TestWatchNodes(t *testing.T) { }, ) } - -func TestEtcdDeleteNode(t *testing.T) { - ctx := api.NewContext() - storage, fakeClient := newStorage(t) - node := validNewNode() - key, _ := storage.KeyFunc(ctx, node.Name) - key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), node), 0) - _, err := storage.Delete(ctx, node.Name, nil) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(fakeClient.DeletedKeys) != 1 { - t.Errorf("Expected 1 delete, found %#v", fakeClient.DeletedKeys) - } - if fakeClient.DeletedKeys[0] != key { - t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key) - } -} diff --git a/pkg/registry/namespace/etcd/etcd_test.go b/pkg/registry/namespace/etcd/etcd_test.go index a2fb036b88a..4112a20e34c 100644 --- a/pkg/registry/namespace/etcd/etcd_test.go +++ b/pkg/registry/namespace/etcd/etcd_test.go @@ -21,6 +21,8 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" @@ -59,15 +61,6 @@ func TestCreate(t *testing.T) { ) } -func expectNamespace(t *testing.T, out runtime.Object) (*api.Namespace, bool) { - namespace, ok := out.(*api.Namespace) - if !ok || namespace == nil { - t.Errorf("Expected an api.Namespace object, was %#v", out) - return nil, false - } - return namespace, true -} - func TestCreateSetsFields(t *testing.T) { storage, fakeClient := newStorage(t) namespace := validNewNamespace() @@ -93,24 +86,10 @@ func TestCreateSetsFields(t *testing.T) { } } -func TestNamespaceDecode(t *testing.T) { - storage, _ := newStorage(t) - expected := validNewNamespace() - expected.Status.Phase = api.NamespaceActive - expected.Spec.Finalizers = []api.FinalizerName{api.FinalizerKubernetes} - body, err := testapi.Codec().Encode(expected) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - actual := storage.New() - if err := testapi.Codec().DecodeInto(body, actual); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !api.Semantic.DeepEqual(expected, actual) { - t.Errorf("mismatch: %s", util.ObjectDiff(expected, actual)) - } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd).ClusterScope().ReturnDeletedObject() + test.TestDelete(validNewNamespace()) } func TestGet(t *testing.T) { @@ -125,31 +104,27 @@ func TestList(t *testing.T) { test.TestList(validNewNamespace()) } -func TestDeleteNamespace(t *testing.T) { +func TestWatch(t *testing.T) { storage, fakeClient := newStorage(t) - fakeClient.ChangeIndex = 1 - ctx := api.NewContext() - key, err := storage.Etcd.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &api.Namespace{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - }, - Status: api.NamespaceStatus{Phase: api.NamespaceActive}, - }), - ModifiedIndex: 1, - CreatedIndex: 1, - }, + test := registrytest.New(t, fakeClient, storage.Etcd).ClusterScope() + test.TestWatch( + validNewNamespace(), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, }, - } - _, err = storage.Delete(api.NewContext(), "foo", nil) - - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + // matching fields + []fields.Set{ + {"metadata.name": "foo"}, + {"name": "foo"}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + }, + ) } func TestDeleteNamespaceWithIncompleteFinalizers(t *testing.T) { diff --git a/pkg/registry/namespace/strategy.go b/pkg/registry/namespace/strategy.go index 9f81bf2e45f..424684f28c6 100644 --- a/pkg/registry/namespace/strategy.go +++ b/pkg/registry/namespace/strategy.go @@ -143,10 +143,11 @@ func MatchNamespace(label labels.Selector, field fields.Selector) generic.Matche } // NamespaceToSelectableFields returns a label set that represents the object -// TODO: fields are not labels, and the validation rules for them do not apply. func NamespaceToSelectableFields(namespace *api.Namespace) labels.Set { return labels.Set{ - "name": namespace.Name, - "status.phase": string(namespace.Status.Phase), + "metadata.name": namespace.Name, + "status.phase": string(namespace.Status.Phase), + // This is a bug, but we need to support it for backward compatibility. + "name": namespace.Name, } } diff --git a/pkg/registry/persistentvolume/etcd/etcd_test.go b/pkg/registry/persistentvolume/etcd/etcd_test.go index 088e7ce2832..8bc00c5a2c9 100644 --- a/pkg/registry/persistentvolume/etcd/etcd_test.go +++ b/pkg/registry/persistentvolume/etcd/etcd_test.go @@ -21,15 +21,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/rest/resttest" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient) { @@ -101,31 +100,9 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - ctx := api.NewContext() storage, _, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError).ClusterScope() - - pv := validChangedPersistentVolume() - key, _ := storage.KeyFunc(ctx, pv.Name) - key = etcdtest.AddPrefix(key) - createFn := func() runtime.Object { - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), pv), - ModifiedIndex: 1, - }, - }, - } - return pv - } - gracefulSetFn := func() bool { - if fakeClient.Data[key].R.Node == nil { - return false - } - return fakeClient.Data[key].R.Node.TTL == 30 - } - test.TestDelete(createFn, gracefulSetFn) + test := registrytest.New(t, fakeClient, storage.Etcd).ClusterScope().ReturnDeletedObject() + test.TestDelete(validNewPersistentVolume("foo")) } func TestGet(t *testing.T) { @@ -140,48 +117,30 @@ func TestList(t *testing.T) { test.TestList(validNewPersistentVolume("foo")) } -func TestPersistentVolumesDecode(t *testing.T) { - storage, _, _ := newStorage(t) - expected := validNewPersistentVolume("foo") - body, err := testapi.Codec().Encode(expected) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - actual := storage.New() - if err := testapi.Codec().DecodeInto(body, actual); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !api.Semantic.DeepEqual(expected, actual) { - t.Errorf("mismatch: %s", util.ObjectDiff(expected, actual)) - } -} - -func TestDeletePersistentVolumes(t *testing.T) { - ctx := api.NewContext() +func TestWatch(t *testing.T) { storage, _, fakeClient := newStorage(t) - persistentVolume := validNewPersistentVolume("foo") - name := persistentVolume.Name - key, _ := storage.KeyFunc(ctx, name) - key = etcdtest.AddPrefix(key) - fakeClient.ChangeIndex = 1 - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), persistentVolume), - ModifiedIndex: 1, - CreatedIndex: 1, - }, + test := registrytest.New(t, fakeClient, storage.Etcd).ClusterScope() + test.TestWatch( + validNewPersistentVolume("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, }, - } - _, err := storage.Delete(ctx, name, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + // matching fields + []fields.Set{ + {"metadata.name": "foo"}, + {"name": "foo"}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + }, + ) } -func TestEtcdUpdateStatus(t *testing.T) { +func TestUpdateStatus(t *testing.T) { storage, statusStorage, fakeClient := newStorage(t) fakeClient.TestIndex = true diff --git a/pkg/registry/persistentvolume/strategy.go b/pkg/registry/persistentvolume/strategy.go index d7056cedca8..2def1d0d8b2 100644 --- a/pkg/registry/persistentvolume/strategy.go +++ b/pkg/registry/persistentvolume/strategy.go @@ -103,9 +103,10 @@ func MatchPersistentVolumes(label labels.Selector, field fields.Selector) generi } // PersistentVolumeToSelectableFields returns a label set that represents the object -// TODO: fields are not labels, and the validation rules for them do not apply. func PersistentVolumeToSelectableFields(persistentvolume *api.PersistentVolume) labels.Set { return labels.Set{ + "metadata.name": persistentvolume.Name, + // This is a bug, but we need to support it for backward compatibility. "name": persistentvolume.Name, } } diff --git a/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go b/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go index 66f284f2aef..049b2d128c5 100644 --- a/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go +++ b/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go @@ -21,15 +21,14 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/api/rest/resttest" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient) { @@ -59,12 +58,6 @@ func validNewPersistentVolumeClaim(name, ns string) *api.PersistentVolumeClaim { return pv } -func validChangedPersistentVolumeClaim() *api.PersistentVolumeClaim { - pv := validNewPersistentVolumeClaim("foo", api.NamespaceDefault) - pv.ResourceVersion = "1" - return pv -} - func TestCreate(t *testing.T) { storage, _, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -100,31 +93,9 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - ctx := api.NewDefaultContext() storage, _, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError) - - pv := validChangedPersistentVolumeClaim() - key, _ := storage.KeyFunc(ctx, pv.Name) - key = etcdtest.AddPrefix(key) - createFn := func() runtime.Object { - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), pv), - ModifiedIndex: 1, - }, - }, - } - return pv - } - gracefulSetFn := func() bool { - if fakeClient.Data[key].R.Node == nil { - return false - } - return fakeClient.Data[key].R.Node.TTL == 30 - } - test.TestDelete(createFn, gracefulSetFn) + test := registrytest.New(t, fakeClient, storage.Etcd).ReturnDeletedObject() + test.TestDelete(validNewPersistentVolumeClaim("foo", api.NamespaceDefault)) } func TestGet(t *testing.T) { @@ -139,78 +110,30 @@ func TestList(t *testing.T) { test.TestList(validNewPersistentVolumeClaim("foo", api.NamespaceDefault)) } -func TestPersistentVolumeClaimsDecode(t *testing.T) { - storage, _, _ := newStorage(t) - expected := validNewPersistentVolumeClaim("foo", api.NamespaceDefault) - body, err := testapi.Codec().Encode(expected) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - actual := storage.New() - if err := testapi.Codec().DecodeInto(body, actual); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !api.Semantic.DeepEqual(expected, actual) { - t.Errorf("mismatch: %s", util.ObjectDiff(expected, actual)) - } -} - -func TestEtcdUpdatePersistentVolumeClaims(t *testing.T) { - ctx := api.NewDefaultContext() +func TestWatch(t *testing.T) { storage, _, fakeClient := newStorage(t) - persistentVolume := validChangedPersistentVolumeClaim() - - key, _ := storage.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), validNewPersistentVolumeClaim("foo", api.NamespaceDefault)), 0) - - _, _, err := storage.Update(ctx, persistentVolume) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - response, err := fakeClient.Get(key, false, false) - if err != nil { - t.Fatalf("Unexpected error %v", err) - } - var persistentVolumeOut api.PersistentVolumeClaim - err = testapi.Codec().DecodeInto([]byte(response.Node.Value), &persistentVolumeOut) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - persistentVolume.ObjectMeta.ResourceVersion = persistentVolumeOut.ObjectMeta.ResourceVersion - if !api.Semantic.DeepEqual(persistentVolume, &persistentVolumeOut) { - t.Errorf("Unexpected persistentVolume: %#v, expected %#v", &persistentVolumeOut, persistentVolume) - } -} - -func TestDeletePersistentVolumeClaims(t *testing.T) { - ctx := api.NewDefaultContext() - storage, _, fakeClient := newStorage(t) - pvClaim := validNewPersistentVolumeClaim("foo", api.NamespaceDefault) - name := pvClaim.Name - key, _ := storage.KeyFunc(ctx, name) - key = etcdtest.AddPrefix(key) - fakeClient.ChangeIndex = 1 - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), pvClaim), - ModifiedIndex: 1, - CreatedIndex: 1, - }, + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewPersistentVolumeClaim("foo", api.NamespaceDefault), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, }, - } - _, err := storage.Delete(ctx, name, nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + // matching fields + []fields.Set{ + {"metadata.name": "foo"}, + {"name": "foo"}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + }, + ) } -func TestEtcdUpdateStatus(t *testing.T) { +func TestUpdateStatus(t *testing.T) { storage, statusStorage, fakeClient := newStorage(t) ctx := api.NewDefaultContext() fakeClient.TestIndex = true diff --git a/pkg/registry/persistentvolumeclaim/strategy.go b/pkg/registry/persistentvolumeclaim/strategy.go index 193b57a016e..a6185d95d3f 100644 --- a/pkg/registry/persistentvolumeclaim/strategy.go +++ b/pkg/registry/persistentvolumeclaim/strategy.go @@ -103,9 +103,10 @@ func MatchPersistentVolumeClaim(label labels.Selector, field fields.Selector) ge } // PersistentVolumeClaimToSelectableFields returns a label set that represents the object -// TODO: fields are not labels, and the validation rules for them do not apply. func PersistentVolumeClaimToSelectableFields(persistentvolumeclaim *api.PersistentVolumeClaim) labels.Set { return labels.Set{ + "metadata.name": persistentvolumeclaim.Name, + // This is a bug, but we need to support it for backward compatibility. "name": persistentvolumeclaim.Name, } } diff --git a/pkg/registry/pod/etcd/etcd_test.go b/pkg/registry/pod/etcd/etcd_test.go index 78d3e890b6b..2559a0e38c8 100644 --- a/pkg/registry/pod/etcd/etcd_test.go +++ b/pkg/registry/pod/etcd/etcd_test.go @@ -25,7 +25,6 @@ import ( "k8s.io/kubernetes/pkg/api/errors" etcderrors "k8s.io/kubernetes/pkg/api/errors/etcd" "k8s.io/kubernetes/pkg/api/rest" - "k8s.io/kubernetes/pkg/api/rest/resttest" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" @@ -122,50 +121,12 @@ func TestUpdate(t *testing.T) { func TestDelete(t *testing.T) { storage, _, _, fakeClient := newStorage(t) - ctx := api.NewDefaultContext() - key, _ := storage.Etcd.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - test := resttest.New(t, storage, fakeClient.SetError) + test := registrytest.New(t, fakeClient, storage.Etcd).ReturnDeletedObject() + test.TestDelete(validNewPod()) - expectedNode := "some-node" - createFn := func() runtime.Object { - pod := validChangedPod() - pod.Spec.NodeName = expectedNode - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), pod), - ModifiedIndex: 1, - }, - }, - } - return pod - } - gracefulSetFn := func() bool { - if fakeClient.Data[key].R.Node == nil { - return false - } - obj, err := testapi.Codec().Decode([]byte(fakeClient.Data[key].R.Node.Value)) - if err != nil { - return false - } - pod := obj.(*api.Pod) - t.Logf("found object %#v", pod.ObjectMeta) - return pod.DeletionTimestamp != nil && pod.DeletionGracePeriodSeconds != nil && *pod.DeletionGracePeriodSeconds != 0 - } - test.TestDeleteGraceful(createFn, 30, gracefulSetFn) - - expectedNode = "" - test.TestDelete(createFn, gracefulSetFn) -} - -func expectPod(t *testing.T, out runtime.Object) (*api.Pod, bool) { - pod, ok := out.(*api.Pod) - if !ok || pod == nil { - t.Errorf("Expected an api.Pod object, was %#v", out) - return nil, false - } - return pod, true + scheduledPod := validNewPod() + scheduledPod.Spec.NodeName = "some-node" + test.TestDeleteGraceful(scheduledPod, 30) } func TestCreateRegistryError(t *testing.T) { @@ -200,24 +161,6 @@ func TestCreateSetsFields(t *testing.T) { } } -func TestPodDecode(t *testing.T) { - storage, _, _, _ := newStorage(t) - expected := validNewPod() - body, err := testapi.Codec().Encode(expected) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - actual := storage.New() - if err := testapi.Codec().DecodeInto(body, actual); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !api.Semantic.DeepEqual(expected, actual) { - t.Errorf("mismatch: %s", util.ObjectDiff(expected, actual)) - } -} - func TestResourceLocation(t *testing.T) { expectedIP := "1.2.3.4" testCases := []struct { @@ -854,49 +797,3 @@ func TestEtcdUpdateStatus(t *testing.T) { t.Errorf("unexpected object: %s", util.ObjectDiff(&expected, podOut)) } } - -func TestEtcdDeletePod(t *testing.T) { - storage, _, _, fakeClient := newStorage(t) - ctx := api.NewDefaultContext() - fakeClient.TestIndex = true - - key, _ := storage.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{NodeName: "machine"}, - }), 0) - _, err := storage.Delete(ctx, "foo", api.NewDeleteOptions(0)) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(fakeClient.DeletedKeys) != 1 { - t.Errorf("Expected 1 delete, found %#v", fakeClient.DeletedKeys) - } else if fakeClient.DeletedKeys[0] != key { - t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key) - } -} - -func TestEtcdDeletePodMultipleContainers(t *testing.T) { - storage, _, _, fakeClient := newStorage(t) - ctx := api.NewDefaultContext() - fakeClient.TestIndex = true - key, _ := storage.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{NodeName: "machine"}, - }), 0) - _, err := storage.Delete(ctx, "foo", api.NewDeleteOptions(0)) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(fakeClient.DeletedKeys) != 1 { - t.Errorf("Expected 1 delete, found %#v", fakeClient.DeletedKeys) - } - if fakeClient.DeletedKeys[0] != key { - t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key) - } -} diff --git a/pkg/registry/podtemplate/etcd/etcd_test.go b/pkg/registry/podtemplate/etcd/etcd_test.go index ba32c04b2c9..8fc83cc8e3b 100644 --- a/pkg/registry/podtemplate/etcd/etcd_test.go +++ b/pkg/registry/podtemplate/etcd/etcd_test.go @@ -20,6 +20,8 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" @@ -87,6 +89,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd).ReturnDeletedObject() + test.TestDelete(validNewPodTemplate("foo")) +} + func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -98,3 +106,24 @@ func TestList(t *testing.T) { test := registrytest.New(t, fakeClient, storage.Etcd) test.TestList(validNewPodTemplate("foo")) } + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewPodTemplate("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) +} diff --git a/pkg/registry/podtemplate/strategy.go b/pkg/registry/podtemplate/strategy.go index d28300e9861..a3868254db1 100644 --- a/pkg/registry/podtemplate/strategy.go +++ b/pkg/registry/podtemplate/strategy.go @@ -73,13 +73,20 @@ func (podTemplateStrategy) AllowUnconditionalUpdate() bool { return true } -// MatchPodTemplate returns a generic matcher for a given label and field selector. -func MatchPodTemplate(label labels.Selector, field fields.Selector) generic.Matcher { - return generic.MatcherFunc(func(obj runtime.Object) (bool, error) { - podObj, ok := obj.(*api.PodTemplate) - if !ok { - return false, fmt.Errorf("not a pod template") - } - return label.Matches(labels.Set(podObj.Labels)), nil - }) +func PodTemplateToSelectableFields(podTemplate *api.PodTemplate) fields.Set { + return fields.Set{} +} + +func MatchPodTemplate(label labels.Selector, field fields.Selector) generic.Matcher { + return &generic.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { + pt, ok := obj.(*api.PodTemplate) + if !ok { + return nil, nil, fmt.Errorf("given object is not a pod template.") + } + return labels.Set(pt.ObjectMeta.Labels), PodTemplateToSelectableFields(pt), nil + }, + } } diff --git a/pkg/registry/registrytest/etcd.go b/pkg/registry/registrytest/etcd.go index 60b5f8465aa..95f8aecc19b 100644 --- a/pkg/registry/registrytest/etcd.go +++ b/pkg/registry/registrytest/etcd.go @@ -75,6 +75,11 @@ func (t *Tester) GeneratesName() *Tester { return t } +func (t *Tester) ReturnDeletedObject() *Tester { + t.tester = t.tester.ReturnDeletedObject() + return t +} + func (t *Tester) TestCreate(valid runtime.Object, invalid ...runtime.Object) { t.tester.TestCreate( valid, @@ -99,6 +104,24 @@ func (t *Tester) TestUpdate(valid runtime.Object, validUpdateFunc UpdateFunc, in ) } +func (t *Tester) TestDelete(valid runtime.Object) { + t.tester.TestDelete( + valid, + t.setObject, + t.getObject, + isNotFoundEtcdError, + ) +} + +func (t *Tester) TestDeleteGraceful(valid runtime.Object, expectedGrace int64) { + t.tester.TestDeleteGraceful( + valid, + t.setObject, + t.getObject, + expectedGrace, + ) +} + func (t *Tester) TestGet(valid runtime.Object) { t.tester.TestGet(valid) } @@ -219,3 +242,11 @@ func (t *Tester) emitObject(obj runtime.Object, action string) error { } return nil } + +func isNotFoundEtcdError(err error) bool { + etcdError, ok := err.(*etcd.EtcdError) + if !ok { + return false + } + return etcdError.ErrorCode == tools.EtcdErrorCodeNotFound +} diff --git a/pkg/registry/resourcequota/etcd/etcd_test.go b/pkg/registry/resourcequota/etcd/etcd_test.go index b7c92d1170c..6999bd13623 100644 --- a/pkg/registry/resourcequota/etcd/etcd_test.go +++ b/pkg/registry/resourcequota/etcd/etcd_test.go @@ -30,8 +30,6 @@ import ( "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient) { @@ -74,15 +72,6 @@ func TestCreate(t *testing.T) { ) } -func expectResourceQuota(t *testing.T, out runtime.Object) (*api.ResourceQuota, bool) { - resourcequota, ok := out.(*api.ResourceQuota) - if !ok || resourcequota == nil { - t.Errorf("Expected an api.ResourceQuota object, was %#v", out) - return nil, false - } - return resourcequota, true -} - func TestCreateRegistryError(t *testing.T) { storage, _, fakeClient := newStorage(t) fakeClient.Err = fmt.Errorf("test error") @@ -116,49 +105,10 @@ func TestCreateSetsFields(t *testing.T) { } } -func TestResourceQuotaDecode(t *testing.T) { - storage, _, _ := newStorage(t) - expected := validNewResourceQuota() - body, err := testapi.Codec().Encode(expected) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - actual := storage.New() - if err := testapi.Codec().DecodeInto(body, actual); err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !api.Semantic.DeepEqual(expected, actual) { - t.Errorf("mismatch: %s", util.ObjectDiff(expected, actual)) - } -} - -func TestDeleteResourceQuota(t *testing.T) { +func TestDelete(t *testing.T) { storage, _, fakeClient := newStorage(t) - fakeClient.ChangeIndex = 1 - ctx := api.NewDefaultContext() - key, _ := storage.Etcd.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &api.ResourceQuota{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - Namespace: api.NamespaceDefault, - }, - Status: api.ResourceQuotaStatus{}, - }), - ModifiedIndex: 1, - CreatedIndex: 1, - }, - }, - } - _, err := storage.Delete(api.NewDefaultContext(), "foo", nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + test := registrytest.New(t, fakeClient, storage.Etcd).ReturnDeletedObject() + test.TestDelete(validNewResourceQuota()) } func TestGet(t *testing.T) { @@ -195,7 +145,7 @@ func TestWatch(t *testing.T) { ) } -func TestEtcdUpdateStatus(t *testing.T) { +func TestUpdateStatus(t *testing.T) { storage, status, fakeClient := newStorage(t) ctx := api.NewDefaultContext() diff --git a/pkg/registry/secret/etcd/etcd_test.go b/pkg/registry/secret/etcd/etcd_test.go index 469068f5e80..e7506c12123 100644 --- a/pkg/registry/secret/etcd/etcd_test.go +++ b/pkg/registry/secret/etcd/etcd_test.go @@ -20,6 +20,8 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" @@ -78,6 +80,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewSecret("foo")) +} + func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -89,3 +97,24 @@ func TestList(t *testing.T) { test := registrytest.New(t, fakeClient, storage.Etcd) test.TestList(validNewSecret("foo")) } + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewSecret("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) +} diff --git a/pkg/registry/service/etcd/etcd_test.go b/pkg/registry/service/etcd/etcd_test.go index 4e3a6120eb5..580d0a093a6 100644 --- a/pkg/registry/service/etcd/etcd_test.go +++ b/pkg/registry/service/etcd/etcd_test.go @@ -106,6 +106,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd).AllowCreateOnUpdate() + test.TestDelete(validService()) +} + func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd).AllowCreateOnUpdate() diff --git a/pkg/registry/serviceaccount/etcd/etcd_test.go b/pkg/registry/serviceaccount/etcd/etcd_test.go index 43a9eecdb63..b102f965a30 100644 --- a/pkg/registry/serviceaccount/etcd/etcd_test.go +++ b/pkg/registry/serviceaccount/etcd/etcd_test.go @@ -20,6 +20,8 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" @@ -71,6 +73,12 @@ func TestUpdate(t *testing.T) { ) } +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd).ReturnDeletedObject() + test.TestDelete(validNewServiceAccount("foo")) +} + func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -82,3 +90,26 @@ func TestList(t *testing.T) { test := registrytest.New(t, fakeClient, storage.Etcd) test.TestList(validNewServiceAccount("foo")) } + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewServiceAccount("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{ + {"metadata.name": "foo"}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) +} diff --git a/pkg/registry/thirdpartyresource/etcd/etcd_test.go b/pkg/registry/thirdpartyresource/etcd/etcd_test.go index e95b0cd20cd..0f2cea31d53 100644 --- a/pkg/registry/thirdpartyresource/etcd/etcd_test.go +++ b/pkg/registry/thirdpartyresource/etcd/etcd_test.go @@ -20,17 +20,14 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" // Ensure that expapi/v1 package is initialized. _ "k8s.io/kubernetes/pkg/expapi/v1" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { @@ -81,30 +78,9 @@ func TestUpdate(t *testing.T) { } func TestDelete(t *testing.T) { - ctx := api.NewDefaultContext() storage, fakeClient := newStorage(t) - test := resttest.New(t, storage, fakeClient.SetError) - rsrc := validNewThirdPartyResource("foo2") - key, _ := storage.KeyFunc(ctx, "foo2") - key = etcdtest.AddPrefix(key) - createFn := func() runtime.Object { - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), rsrc), - ModifiedIndex: 1, - }, - }, - } - return rsrc - } - gracefulSetFn := func() bool { - if fakeClient.Data[key].R.Node == nil { - return false - } - return fakeClient.Data[key].R.Node.TTL == 30 - } - test.TestDeleteNoGraceful(createFn, gracefulSetFn) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewThirdPartyResource("foo")) } func TestGet(t *testing.T) { @@ -118,3 +94,24 @@ func TestList(t *testing.T) { test := registrytest.New(t, fakeClient, storage.Etcd) test.TestList(validNewThirdPartyResource("foo")) } + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewThirdPartyResource("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) +} From 4f282efb117034362ed64204bec9ea561c01492d Mon Sep 17 00:00:00 2001 From: Ewa Socala Date: Thu, 27 Aug 2015 10:06:36 +0200 Subject: [PATCH 022/101] Added autoscaling utils for dynamic consumption of CPU --- test/e2e/autoscaling_utils.go | 149 ++++++++++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 test/e2e/autoscaling_utils.go diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go new file mode 100644 index 00000000000..fc7dbeea22e --- /dev/null +++ b/test/e2e/autoscaling_utils.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "strconv" + "time" + + "k8s.io/kubernetes/pkg/api" + client "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/util" + + . "github.com/onsi/ginkgo" +) + +const ( + consumptionTimeInSeconds = 30 + sleepTime = 30 * time.Second + requestSizeInMilicores = 100 + port = 80 + targetPort = 8080 + timeoutRC = 120 * time.Second + image = "gcr.io/google_containers/resource_consumer:alpha" +) + +/* +ConsumingRC is a tool for testing. It helps create specified usage of CPU or memory (Warnig: memory not supported) +typical use case: +rc.ConsumeCPU(600) +// ... check your assumption here +rc.ConsumeCPU(300) +// ... check your assumption here +*/ +type ConsumingRC struct { + name string + framework *Framework + channel chan int + stop chan int +} + +// NewConsumingRC creates new ConsumingRC +func NewConsumingRC(name string, replicas int, framework *Framework) *ConsumingRC { + startService(framework.Client, framework.Namespace.Name, name, replicas) + rc := &ConsumingRC{ + name: name, + framework: framework, + channel: make(chan int), + stop: make(chan int), + } + go rc.makeConsumeCPURequests() + rc.ConsumeCPU(0) + return rc +} + +// ConsumeCPU consumes given number of CPU +func (rc *ConsumingRC) ConsumeCPU(milicores int) { + rc.channel <- milicores +} + +func (rc *ConsumingRC) makeConsumeCPURequests() { + defer GinkgoRecover() + var count int + var rest int + for { + select { + case milicores := <-rc.channel: + count = milicores / requestSizeInMilicores + rest = milicores - count*requestSizeInMilicores + case <-time.After(sleepTime): + if count > 0 { + rc.sendConsumeCPUrequests(count, requestSizeInMilicores, consumptionTimeInSeconds) + } + if rest > 0 { + go rc.sendOneConsumeCPUrequest(rest, consumptionTimeInSeconds) + } + case <-rc.stop: + return + } + } +} + +func (rc *ConsumingRC) sendConsumeCPUrequests(requests, milicores, durationSec int) { + for i := 0; i < requests; i++ { + go rc.sendOneConsumeCPUrequest(milicores, durationSec) + } +} + +// sendOneConsumeCPUrequest sends POST request for cpu consumption +func (rc *ConsumingRC) sendOneConsumeCPUrequest(milicores int, durationSec int) { + _, err := rc.framework.Client.Post(). + Prefix("proxy"). + Namespace(rc.framework.Namespace.Name). + Resource("services"). + Name(rc.name). + Suffix("ConsumeCPU"). + Param("milicores", strconv.Itoa(milicores)). + Param("durationSec", strconv.Itoa(durationSec)). + Do(). + Raw() + expectNoError(err) +} + +func (rc *ConsumingRC) CleanUp() { + rc.stop <- 0 + expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name)) + expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) +} + +func startService(c *client.Client, ns, name string, replicas int) { + c.Services(ns).Create(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: name, + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{{ + Port: port, + TargetPort: util.NewIntOrStringFromInt(targetPort), + }}, + + Selector: map[string]string{ + "name": name, + }, + }, + }) + + config := RCConfig{ + Client: c, + Image: image, + Name: name, + Namespace: ns, + Timeout: timeoutRC, + Replicas: replicas, + } + expectNoError(RunRC(config)) +} From edfaa480cfbe3190f338acdb1b30c3f6ec57cab0 Mon Sep 17 00:00:00 2001 From: kargakis Date: Wed, 2 Sep 2015 18:24:47 +0200 Subject: [PATCH 023/101] queryparams: Handle pointer fields in structs --- pkg/conversion/queryparams/convert.go | 20 ++++++++++++++---- pkg/conversion/queryparams/convert_test.go | 24 ++++++++++++++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/pkg/conversion/queryparams/convert.go b/pkg/conversion/queryparams/convert.go index a9ef5b9796a..ffa50170de0 100644 --- a/pkg/conversion/queryparams/convert.go +++ b/pkg/conversion/queryparams/convert.go @@ -50,6 +50,10 @@ func formatValue(value interface{}) string { return fmt.Sprintf("%v", value) } +func isPointerKind(kind reflect.Kind) bool { + return kind == reflect.Ptr +} + func isValueKind(kind reflect.Kind) bool { switch kind { case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, @@ -92,11 +96,11 @@ func Convert(obj runtime.Object) (url.Values, error) { case reflect.Ptr, reflect.Interface: sv = reflect.ValueOf(obj).Elem() default: - return nil, fmt.Errorf("Expecting a pointer or interface") + return nil, fmt.Errorf("expecting a pointer or interface") } st := sv.Type() if st.Kind() != reflect.Struct { - return nil, fmt.Errorf("Expecting a pointer to a struct") + return nil, fmt.Errorf("expecting a pointer to a struct") } for i := 0; i < st.NumField(); i++ { field := sv.Field(i) @@ -105,10 +109,18 @@ func Convert(obj runtime.Object) (url.Values, error) { continue } ft := field.Type() + + kind := ft.Kind() + if isPointerKind(kind) { + kind = ft.Elem().Kind() + if !field.IsNil() { + field = reflect.Indirect(field) + } + } switch { - case isValueKind(ft.Kind()): + case isValueKind(kind): addParam(result, tag, omitempty, field) - case ft.Kind() == reflect.Array || ft.Kind() == reflect.Slice: + case kind == reflect.Array || kind == reflect.Slice: if isValueKind(ft.Elem().Kind()) { addListOfParams(result, tag, omitempty, field) } diff --git a/pkg/conversion/queryparams/convert_test.go b/pkg/conversion/queryparams/convert_test.go index 88390a5dcd8..664e128046b 100644 --- a/pkg/conversion/queryparams/convert_test.go +++ b/pkg/conversion/queryparams/convert_test.go @@ -53,6 +53,13 @@ type foo struct { func (*foo) IsAnAPIObject() {} +type baz struct { + Ptr *int `json:"ptr"` + Bptr *bool `json:"bptr,omitempty"` +} + +func (*baz) IsAnAPIObject() {} + func validateResult(t *testing.T, input interface{}, actual, expected url.Values) { local := url.Values{} for k, v := range expected { @@ -131,6 +138,19 @@ func TestConvert(t *testing.T) { }, expected: url.Values{"str": {""}, "namedStr": {"named str"}}, }, + { + input: &baz{ + Ptr: intp(5), + Bptr: boolp(true), + }, + expected: url.Values{"ptr": {"5"}, "bptr": {"true"}}, + }, + { + input: &baz{ + Bptr: boolp(true), + }, + expected: url.Values{"ptr": {""}, "bptr": {"true"}}, + }, } for _, test := range tests { @@ -141,3 +161,7 @@ func TestConvert(t *testing.T) { validateResult(t, test.input, result, test.expected) } } + +func intp(n int) *int { return &n } + +func boolp(b bool) *bool { return &b } From 3f21071064b43432c50cd0787e98d780ce26560e Mon Sep 17 00:00:00 2001 From: Marcin Wielgus Date: Wed, 2 Sep 2015 12:20:11 +0200 Subject: [PATCH 024/101] HorizontalPodAutoscaler in kubectl get --- contrib/completions/bash/kubectl | 3 ++ pkg/expapi/types.go | 1 + pkg/kubectl/kubectl.go | 1 + pkg/kubectl/resource_printer.go | 49 ++++++++++++++++++++++++++++++++ 4 files changed, 54 insertions(+) diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index c096937e22c..365496f8aed 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -285,6 +285,7 @@ _kubectl_get() must_have_one_noun+=("deployment") must_have_one_noun+=("endpoints") must_have_one_noun+=("event") + must_have_one_noun+=("horizontalpodautoscaler") must_have_one_noun+=("limitrange") must_have_one_noun+=("namespace") must_have_one_noun+=("node") @@ -460,6 +461,7 @@ _kubectl_delete() must_have_one_noun+=("deployment") must_have_one_noun+=("endpoints") must_have_one_noun+=("event") + must_have_one_noun+=("horizontalpodautoscaler") must_have_one_noun+=("limitrange") must_have_one_noun+=("namespace") must_have_one_noun+=("node") @@ -848,6 +850,7 @@ _kubectl_label() must_have_one_noun+=("deployment") must_have_one_noun+=("endpoints") must_have_one_noun+=("event") + must_have_one_noun+=("horizontalpodautoscaler") must_have_one_noun+=("limitrange") must_have_one_noun+=("namespace") must_have_one_noun+=("node") diff --git a/pkg/expapi/types.go b/pkg/expapi/types.go index d424b4ce8c8..1612f2e2768 100644 --- a/pkg/expapi/types.go +++ b/pkg/expapi/types.go @@ -103,6 +103,7 @@ type HorizontalPodAutoscalerSpec struct { // HorizontalPodAutoscalerStatus contains the current status of a horizontal pod autoscaler type HorizontalPodAutoscalerStatus struct { + // TODO: Consider if it is needed. // CurrentReplicas is the number of replicas of pods managed by this autoscaler. CurrentReplicas int `json:"currentReplicas"` diff --git a/pkg/kubectl/kubectl.go b/pkg/kubectl/kubectl.go index 8e2d16718e3..15c9c0d6cb4 100644 --- a/pkg/kubectl/kubectl.go +++ b/pkg/kubectl/kubectl.go @@ -96,6 +96,7 @@ func expandResourceShortcut(resource string) string { "cs": "componentstatuses", "ev": "events", "ep": "endpoints", + "hpa": "horizontalpodautoscalers", "limits": "limitranges", "no": "nodes", "ns": "namespaces", diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 37f6a517252..0918b52f26c 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -367,6 +367,7 @@ var persistentVolumeColumns = []string{"NAME", "LABELS", "CAPACITY", "ACCESSMODE var persistentVolumeClaimColumns = []string{"NAME", "LABELS", "STATUS", "VOLUME", "CAPACITY", "ACCESSMODES", "AGE"} var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"} var thirdPartyResourceColumns = []string{"NAME", "DESCRIPTION", "VERSION(S)"} +var horizontalPodAutoscalerColumns = []string{"NAME", "REFERENCE", "TARGET", "CURRENT", "MINPODS", "MAXPODS", "AGE"} var withNamespacePrefixColumns = []string{"NAMESPACE"} // TODO(erictune): print cluster name too. var deploymentColumns = []string{"NAME", "UPDATEDREPLICAS", "AGE"} @@ -406,6 +407,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(thirdPartyResourceColumns, printThirdPartyResourceList) h.Handler(deploymentColumns, printDeployment) h.Handler(deploymentColumns, printDeploymentList) + h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscaler) + h.Handler(horizontalPodAutoscalerColumns, printHorizontalPodAutoscalerList) } func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { @@ -1151,6 +1154,52 @@ func printDeploymentList(list *expapi.DeploymentList, w io.Writer, withNamespace return nil } +func printHorizontalPodAutoscaler(hpa *expapi.HorizontalPodAutoscaler, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + namespace := hpa.Namespace + name := hpa.Name + reference := fmt.Sprintf("%s/%s/%s/%s", + hpa.Spec.ScaleRef.Kind, + hpa.Spec.ScaleRef.Namespace, + hpa.Spec.ScaleRef.Name, + hpa.Spec.ScaleRef.Subresource) + target := fmt.Sprintf("%s %v", hpa.Spec.Target.Quantity.String(), hpa.Spec.Target.Resource) + + current := "" + if hpa.Status != nil && hpa.Status.CurrentConsumption != nil { + current = fmt.Sprintf("%s %v", hpa.Status.CurrentConsumption.Quantity.String(), hpa.Status.CurrentConsumption.Resource) + } + minPods := hpa.Spec.MinCount + maxPods := hpa.Spec.MaxCount + if withNamespace { + if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil { + return err + } + } + + if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d\t%d\t%s", + name, + reference, + target, + current, + minPods, + maxPods, + translateTimestamp(hpa.CreationTimestamp), + ); err != nil { + return err + } + _, err := fmt.Fprint(w, appendLabels(hpa.Labels, columnLabels)) + return err +} + +func printHorizontalPodAutoscalerList(list *expapi.HorizontalPodAutoscalerList, w io.Writer, withNamespace bool, wide bool, showAll bool, columnLabels []string) error { + for i := range list.Items { + if err := printHorizontalPodAutoscaler(&list.Items[i], w, withNamespace, wide, showAll, columnLabels); err != nil { + return err + } + } + return nil +} + func appendLabels(itemLabels map[string]string, columnLabels []string) string { var buffer bytes.Buffer From 65bfd3541f4fce8eaafa323d33bcb3f0893d6684 Mon Sep 17 00:00:00 2001 From: hurf Date: Tue, 1 Sep 2015 21:35:27 +0800 Subject: [PATCH 025/101] Make -o option working for rolling-update Use simple message to show the result. If -o is specified, print updated rc. --- pkg/kubectl/cmd/rollingupdate.go | 35 +++++++++++++++++++++++++------- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index 01017d8a243..b85b0cf6887 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -28,6 +28,7 @@ import ( "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubectl" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" @@ -138,6 +139,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg interval := cmdutil.GetFlagDuration(cmd, "poll-interval") timeout := cmdutil.GetFlagDuration(cmd, "timeout") dryrun := cmdutil.GetFlagBool(cmd, "dry-run") + outputFormat := cmdutil.GetFlagString(cmd, "output") cmdNamespace, enforceNamespace, err := f.DefaultNamespace() if err != nil { @@ -263,12 +265,17 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg } if dryrun { oldRcData := &bytes.Buffer{} - if err := f.PrintObject(cmd, oldRc, oldRcData); err != nil { - return err - } newRcData := &bytes.Buffer{} - if err := f.PrintObject(cmd, newRc, newRcData); err != nil { - return err + if outputFormat == "" { + oldRcData.WriteString(oldRc.Name) + newRcData.WriteString(newRc.Name) + } else { + if err := f.PrintObject(cmd, oldRc, oldRcData); err != nil { + return err + } + if err := f.PrintObject(cmd, newRc, newRcData); err != nil { + return err + } } fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes())) return nil @@ -297,11 +304,25 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg return err } + message := "rolling updated" if keepOldName { - fmt.Fprintf(out, "%s\n", oldName) + newRc.Name = oldName } else { - fmt.Fprintf(out, "%s\n", newRc.Name) + message = fmt.Sprintf("rolling updated to %q", newRc.Name) } + newRc, err = client.ReplicationControllers(cmdNamespace).Get(newRc.Name) + if err != nil { + return err + } + if outputFormat != "" { + return f.PrintObject(cmd, newRc, out) + } + _, kind, err := api.Scheme.ObjectVersionAndKind(newRc) + if err != nil { + return err + } + _, res := meta.KindToResource(kind, false) + cmdutil.PrintSuccess(mapper, false, out, res, oldName, message) return nil } From 6c32e071f4ab1bcc6f63c20d8c72ceabae664cdd Mon Sep 17 00:00:00 2001 From: kargakis Date: Thu, 3 Sep 2015 15:32:19 +0200 Subject: [PATCH 026/101] Dont output nil; test nil & omitempty --- pkg/conversion/queryparams/convert.go | 8 +++++++- pkg/conversion/queryparams/convert_test.go | 6 ++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/conversion/queryparams/convert.go b/pkg/conversion/queryparams/convert.go index ffa50170de0..9be5f581e61 100644 --- a/pkg/conversion/queryparams/convert.go +++ b/pkg/conversion/queryparams/convert.go @@ -74,7 +74,13 @@ func addParam(values url.Values, tag string, omitempty bool, value reflect.Value if omitempty && zeroValue(value) { return } - values.Add(tag, fmt.Sprintf("%v", value.Interface())) + val := "" + iValue := fmt.Sprintf("%v", value.Interface()) + + if iValue != "" { + val = iValue + } + values.Add(tag, val) } func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) { diff --git a/pkg/conversion/queryparams/convert_test.go b/pkg/conversion/queryparams/convert_test.go index 664e128046b..e068f0d9b08 100644 --- a/pkg/conversion/queryparams/convert_test.go +++ b/pkg/conversion/queryparams/convert_test.go @@ -151,6 +151,12 @@ func TestConvert(t *testing.T) { }, expected: url.Values{"ptr": {""}, "bptr": {"true"}}, }, + { + input: &baz{ + Ptr: intp(5), + }, + expected: url.Values{"ptr": {"5"}}, + }, } for _, test := range tests { From 46e7f5684c07aecf13071a3d6c34a0e95734f6bb Mon Sep 17 00:00:00 2001 From: hurf Date: Mon, 31 Aug 2015 20:52:04 +0800 Subject: [PATCH 027/101] Add --dry-run option for label command With --dry-run option, label command will print the object locally without update it on server side. --- contrib/completions/bash/kubectl | 1 + docs/man/man1/kubectl-label.1 | 4 ++++ docs/user-guide/kubectl/kubectl_label.md | 3 ++- pkg/kubectl/cmd/label.go | 27 +++++++++++++++++------- 4 files changed, 26 insertions(+), 9 deletions(-) diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index c096937e22c..ca101d64a29 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -820,6 +820,7 @@ _kubectl_label() flags_completion=() flags+=("--all") + flags+=("--dry-run") flags+=("--filename=") flags_with_completion+=("--filename") flags_completion+=("__handle_filename_extension_flag json|yaml|yml") diff --git a/docs/man/man1/kubectl-label.1 b/docs/man/man1/kubectl-label.1 index e4c953edd14..22eb98fce49 100644 --- a/docs/man/man1/kubectl-label.1 +++ b/docs/man/man1/kubectl-label.1 @@ -26,6 +26,10 @@ If \-\-resource\-version is specified, then updates will use this resource versi \fB\-\-all\fP=false select all resources in the namespace of the specified resource types +.PP +\fB\-\-dry\-run\fP=false + If true, only print the object that would be sent, without sending it. + .PP \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to a file identifying the resource to update the labels diff --git a/docs/user-guide/kubectl/kubectl_label.md b/docs/user-guide/kubectl/kubectl_label.md index 16c9dc29888..3e90c683358 100644 --- a/docs/user-guide/kubectl/kubectl_label.md +++ b/docs/user-guide/kubectl/kubectl_label.md @@ -75,6 +75,7 @@ $ kubectl label pods foo bar- ``` --all[=false]: select all resources in the namespace of the specified resource types + --dry-run[=false]: If true, only print the object that would be sent, without sending it. -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update the labels -h, --help[=false]: help for label --no-headers[=false]: When using the default output, don't print headers. @@ -120,7 +121,7 @@ $ kubectl label pods foo bar- * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-26 09:03:39.977006328 +0000 UTC +###### Auto generated by spf13/cobra at 2015-08-31 12:51:55.222410248 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_label.md?pixel)]() diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 333416db8ca..b5618ba25fe 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -90,6 +90,8 @@ func NewCmdLabel(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("resource-version", "", "If non-empty, the labels update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.") usage := "Filename, directory, or URL to a file identifying the resource to update the labels" kubectl.AddJsonFilenameFlag(cmd, &options.Filenames, usage) + cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") + return cmd } @@ -219,22 +221,31 @@ func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri if err != nil { return err } - obj, err := cmdutil.UpdateObject(info, func(obj runtime.Object) error { - err := labelFunc(obj, overwrite, resourceVersion, lbls, remove) + + var outputObj runtime.Object + if cmdutil.GetFlagBool(cmd, "dry-run") { + err = labelFunc(info.Object, overwrite, resourceVersion, lbls, remove) + if err != nil { + return err + } + outputObj = info.Object + } else { + outputObj, err = cmdutil.UpdateObject(info, func(obj runtime.Object) error { + err := labelFunc(obj, overwrite, resourceVersion, lbls, remove) + if err != nil { + return err + } + return nil + }) if err != nil { return err } - return nil - }) - if err != nil { - return err } - outputFormat := cmdutil.GetFlagString(cmd, "output") if outputFormat == "" { cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "labeled") } else { - f.PrintObject(cmd, obj, out) + return f.PrintObject(cmd, outputObj, out) } return nil }) From f9bca7bc7df013d3ec134008a5f06fce6947707d Mon Sep 17 00:00:00 2001 From: kargakis Date: Thu, 3 Sep 2015 15:44:03 +0200 Subject: [PATCH 028/101] handle structs --- pkg/conversion/queryparams/convert.go | 18 ++++++++++++++++-- pkg/conversion/queryparams/convert_test.go | 6 +++--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/pkg/conversion/queryparams/convert.go b/pkg/conversion/queryparams/convert.go index 9be5f581e61..450a43001a2 100644 --- a/pkg/conversion/queryparams/convert.go +++ b/pkg/conversion/queryparams/convert.go @@ -54,6 +54,10 @@ func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } +func isStructKind(kind reflect.Kind) bool { + return kind == reflect.Struct +} + func isValueKind(kind reflect.Kind) bool { switch kind { case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, @@ -105,9 +109,17 @@ func Convert(obj runtime.Object) (url.Values, error) { return nil, fmt.Errorf("expecting a pointer or interface") } st := sv.Type() - if st.Kind() != reflect.Struct { + if !isStructKind(st.Kind()) { return nil, fmt.Errorf("expecting a pointer to a struct") } + + // Check all object fields + convertStruct(result, st, sv) + + return result, nil +} + +func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { for i := 0; i < st.NumField(); i++ { field := sv.Field(i) tag, omitempty := jsonTag(st.Field(i)) @@ -123,6 +135,7 @@ func Convert(obj runtime.Object) (url.Values, error) { field = reflect.Indirect(field) } } + switch { case isValueKind(kind): addParam(result, tag, omitempty, field) @@ -130,7 +143,8 @@ func Convert(obj runtime.Object) (url.Values, error) { if isValueKind(ft.Elem().Kind()) { addListOfParams(result, tag, omitempty, field) } + case isStructKind(kind) && !(zeroValue(field) && omitempty): + convertStruct(result, ft, field) } } - return result, nil } diff --git a/pkg/conversion/queryparams/convert_test.go b/pkg/conversion/queryparams/convert_test.go index e068f0d9b08..c409756e44a 100644 --- a/pkg/conversion/queryparams/convert_test.go +++ b/pkg/conversion/queryparams/convert_test.go @@ -111,12 +111,12 @@ func TestConvert(t *testing.T) { }, { input: &foo{ - Str: "ignore embedded struct", + Str: "don't ignore embedded struct", Foobar: bar{ Float1: 5.0, }, }, - expected: url.Values{"str": {"ignore embedded struct"}}, + expected: url.Values{"str": {"don't ignore embedded struct"}, "float1": {"5"}, "float2": {"0"}}, }, { // Ignore untagged fields @@ -149,7 +149,7 @@ func TestConvert(t *testing.T) { input: &baz{ Bptr: boolp(true), }, - expected: url.Values{"ptr": {""}, "bptr": {"true"}}, + expected: url.Values{"ptr": {""}, "bptr": {"true"}}, }, { input: &baz{ From 04fc8ae3dd2c07e285d39b96be3657dea468763c Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 3 Sep 2015 10:10:11 -0400 Subject: [PATCH 029/101] s|github.com/GoogleCloudPlatform/kubernetes|github.com/kubernetes/kubernetes| --- CHANGELOG.md | 2 +- .../fluentd-elasticsearch/fluentd-es-image/README.md | 2 +- .../addons/fluentd-gcp/fluentd-gcp-image/README.md | 2 +- docs/admin/authentication.md | 4 ++-- docs/admin/node.md | 2 +- docs/design/architecture.md | 2 +- docs/devel/cherry-picks.md | 2 +- docs/devel/cli-roadmap.md | 6 +++--- docs/devel/flaky-tests.md | 2 +- docs/devel/instrumentation.md | 12 ++++++------ docs/devel/issues.md | 2 +- docs/devel/making-release-notes.md | 2 +- docs/devel/pull-requests.md | 2 +- docs/getting-started-guides/aws.md | 2 +- docs/getting-started-guides/azure.md | 2 +- docs/getting-started-guides/binary_release.md | 4 ++-- docs/getting-started-guides/coreos/azure/README.md | 2 +- .../coreos/bare_metal_calico.md | 2 +- .../coreos/bare_metal_offline.md | 10 +++++----- docs/getting-started-guides/gce.md | 2 +- docs/getting-started-guides/mesos-docker.md | 4 ++-- docs/getting-started-guides/mesos.md | 2 +- docs/getting-started-guides/rackspace.md | 4 ++-- docs/getting-started-guides/scratch.md | 4 ++-- docs/getting-started-guides/ubuntu.md | 2 +- docs/getting-started-guides/vagrant.md | 2 +- docs/proposals/autoscaling.md | 2 +- docs/proposals/deployment.md | 2 +- docs/proposals/horizontal-pod-autoscaler.md | 8 ++++---- docs/proposals/job.md | 6 +++--- docs/roadmap.md | 2 +- docs/user-guide/application-troubleshooting.md | 4 ++-- docs/user-guide/compute-resources.md | 2 +- docs/user-guide/configuring-containers.md | 4 ++-- docs/user-guide/connecting-applications.md | 2 +- docs/user-guide/debugging-services.md | 2 +- docs/user-guide/deploying-applications.md | 2 +- docs/user-guide/node-selection/README.md | 4 ++-- docs/user-guide/pods.md | 2 +- docs/user-guide/prereqs.md | 2 +- docs/user-guide/replication-controller.md | 2 +- docs/user-guide/services.md | 2 +- docs/user-guide/walkthrough/README.md | 2 +- docs/user-guide/walkthrough/k8s201.md | 4 ++-- examples/javaee/README.md | 2 +- logo_usage_guidelines.md | 8 ++++---- third_party/swagger-ui/README.md | 2 +- 47 files changed, 75 insertions(+), 75 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c45c725a88..a4daafba0fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -Please see the [Releases Page](https://github.com/GoogleCloudPlatform/kubernetes/releases) +Please see the [Releases Page](https://github.com/kubernetes/kubernetes/releases) [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/CHANGELOG.md?pixel)]() diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md index 8040f686db0..1dd1268ff1f 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/README.md @@ -2,7 +2,7 @@ This directory contains the source files needed to make a Docker image that collects Docker container log files using [Fluentd](http://www.fluentd.org/) and sends them to an instance of [Elasticsearch](http://www.elasticsearch.org/). -This image is designed to be used as part of the [Kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) +This image is designed to be used as part of the [Kubernetes](https://github.com/kubernetes/kubernetes) cluster bring up process. The image resides at DockerHub under the name [kubernetes/fluentd-eslasticsearch](https://registry.hub.docker.com/u/kubernetes/fluentd-elasticsearch/). diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md b/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md index f7393f02f1c..a663d2f131b 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md @@ -2,7 +2,7 @@ This directory contains the source files needed to make a Docker image that collects Docker container log files using [Fluentd](http://www.fluentd.org/) and sends them to GCP. -This image is designed to be used as part of the [Kubernetes](https://github.com/GoogleCloudPlatform/kubernetes) +This image is designed to be used as part of the [Kubernetes](https://github.com/kubernetes/kubernetes) cluster bring up process. The image resides at DockerHub under the name [kubernetes/fluentd-gcp](https://registry.hub.docker.com/u/kubernetes/fluentd-gcp/). diff --git a/docs/admin/authentication.md b/docs/admin/authentication.md index 8ab706f2a5f..c6039a4f0e5 100644 --- a/docs/admin/authentication.md +++ b/docs/admin/authentication.md @@ -88,8 +88,8 @@ option to the apiserver during startup. The plugin is implemented in For details on how to use keystone to manage projects and users, refer to the [Keystone documentation](http://docs.openstack.org/developer/keystone/). Please note that this plugin is still experimental which means it is subject to changes. -Please refer to the [discussion](https://github.com/GoogleCloudPlatform/kubernetes/pull/11798#issuecomment-129655212) -and the [blueprint](https://github.com/GoogleCloudPlatform/kubernetes/issues/11626) for more details +Please refer to the [discussion](https://github.com/kubernetes/kubernetes/pull/11798#issuecomment-129655212) +and the [blueprint](https://github.com/kubernetes/kubernetes/issues/11626) for more details ## Plugin Development diff --git a/docs/admin/node.md b/docs/admin/node.md index 569d5c592a0..2d5520311a3 100644 --- a/docs/admin/node.md +++ b/docs/admin/node.md @@ -253,7 +253,7 @@ on each kubelet where you want to reserve resources. Node is a top-level resource in the kubernetes REST API. More details about the API object can be found at: [Node API -object](https://htmlpreview.github.io/?https://github.com/GoogleCloudPlatform/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_node). +object](https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_node). diff --git a/docs/design/architecture.md b/docs/design/architecture.md index b17345efc94..2a761dea645 100644 --- a/docs/design/architecture.md +++ b/docs/design/architecture.md @@ -51,7 +51,7 @@ The `kubelet` manages [pods](../user-guide/pods.md) and their containers, their ### `kube-proxy` -Each node also runs a simple network proxy and load balancer (see the [services FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/Services-FAQ) for more details). This reflects `services` (see [the services doc](../user-guide/services.md) for more details) as defined in the Kubernetes API on each node and can do simple TCP and UDP stream forwarding (round robin) across a set of backends. +Each node also runs a simple network proxy and load balancer (see the [services FAQ](https://github.com/kubernetes/kubernetes/wiki/Services-FAQ) for more details). This reflects `services` (see [the services doc](../user-guide/services.md) for more details) as defined in the Kubernetes API on each node and can do simple TCP and UDP stream forwarding (round robin) across a set of backends. Service endpoints are currently found via [DNS](../admin/dns.md) or through environment variables (both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) and Kubernetes `{FOO}_SERVICE_HOST` and `{FOO}_SERVICE_PORT` variables are supported). These variables resolve to ports managed by the service proxy. diff --git a/docs/devel/cherry-picks.md b/docs/devel/cherry-picks.md index 519c73c3dc9..7cb6046563b 100644 --- a/docs/devel/cherry-picks.md +++ b/docs/devel/cherry-picks.md @@ -62,7 +62,7 @@ conflict***. Now that we've structured cherry picks as PRs, searching for all cherry-picks against a release is a GitHub query: For example, -[this query is all of the v0.21.x cherry-picks](https://github.com/GoogleCloudPlatform/kubernetes/pulls?utf8=%E2%9C%93&q=is%3Apr+%22automated+cherry+pick%22+base%3Arelease-0.21) +[this query is all of the v0.21.x cherry-picks](https://github.com/kubernetes/kubernetes/pulls?utf8=%E2%9C%93&q=is%3Apr+%22automated+cherry+pick%22+base%3Arelease-0.21) diff --git a/docs/devel/cli-roadmap.md b/docs/devel/cli-roadmap.md index 6908455566d..42784dbcf4e 100644 --- a/docs/devel/cli-roadmap.md +++ b/docs/devel/cli-roadmap.md @@ -34,9 +34,9 @@ Documentation for other releases can be found at # Kubernetes CLI/Configuration Roadmap See github issues with the following labels: -* [area/app-config-deployment](https://github.com/GoogleCloudPlatform/kubernetes/labels/area/app-config-deployment) -* [component/CLI](https://github.com/GoogleCloudPlatform/kubernetes/labels/component/CLI) -* [component/client](https://github.com/GoogleCloudPlatform/kubernetes/labels/component/client) +* [area/app-config-deployment](https://github.com/kubernetes/kubernetes/labels/area/app-config-deployment) +* [component/CLI](https://github.com/kubernetes/kubernetes/labels/component/CLI) +* [component/client](https://github.com/kubernetes/kubernetes/labels/component/client) diff --git a/docs/devel/flaky-tests.md b/docs/devel/flaky-tests.md index 9db9e15cd56..3a7af51e4f3 100644 --- a/docs/devel/flaky-tests.md +++ b/docs/devel/flaky-tests.md @@ -64,7 +64,7 @@ spec: - name: TEST_PACKAGE value: pkg/tools - name: REPO_SPEC - value: https://github.com/GoogleCloudPlatform/kubernetes + value: https://github.com/kubernetes/kubernetes ``` Note that we omit the labels and the selector fields of the replication controller, because they will be populated from the labels field of the pod template by default. diff --git a/docs/devel/instrumentation.md b/docs/devel/instrumentation.md index 8cc9e2b2b1c..683f9d936ea 100644 --- a/docs/devel/instrumentation.md +++ b/docs/devel/instrumentation.md @@ -44,18 +44,18 @@ We use the Prometheus monitoring system's golang client library for instrumentin 2. Give the metric a name and description. 3. Pick whether you want to distinguish different categories of things using labels on the metric. If so, add "Vec" to the name of the type of metric you want and add a slice of the label names to the definition. - https://github.com/GoogleCloudPlatform/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L53 - https://github.com/GoogleCloudPlatform/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/kubelet/metrics/metrics.go#L31 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L53 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/kubelet/metrics/metrics.go#L31 3. Register the metric so that prometheus will know to export it. - https://github.com/GoogleCloudPlatform/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/kubelet/metrics/metrics.go#L74 - https://github.com/GoogleCloudPlatform/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L78 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/kubelet/metrics/metrics.go#L74 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L78 4. Use the metric by calling the appropriate method for your metric type (Set, Inc/Add, or Observe, respectively for Gauge, Counter, or Histogram/Summary), first calling WithLabelValues if your metric has any labels - https://github.com/GoogleCloudPlatform/kubernetes/blob/3ce7fe8310ff081dbbd3d95490193e1d5250d2c9/pkg/kubelet/kubelet.go#L1384 - https://github.com/GoogleCloudPlatform/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L87 + https://github.com/kubernetes/kubernetes/blob/3ce7fe8310ff081dbbd3d95490193e1d5250d2c9/pkg/kubelet/kubelet.go#L1384 + https://github.com/kubernetes/kubernetes/blob/cd3299307d44665564e1a5c77d0daa0286603ff5/pkg/apiserver/apiserver.go#L87 These are the metric type definitions if you're curious to learn about them or need more information: diff --git a/docs/devel/issues.md b/docs/devel/issues.md index 46beb9ce17e..c7bda07b7db 100644 --- a/docs/devel/issues.md +++ b/docs/devel/issues.md @@ -33,7 +33,7 @@ Documentation for other releases can be found at GitHub Issues for the Kubernetes Project ======================================== -A list quick overview of how we will review and prioritize incoming issues at https://github.com/GoogleCloudPlatform/kubernetes/issues +A list quick overview of how we will review and prioritize incoming issues at https://github.com/kubernetes/kubernetes/issues Priorities ---------- diff --git a/docs/devel/making-release-notes.md b/docs/devel/making-release-notes.md index 1efab1ac7da..871e65b48cd 100644 --- a/docs/devel/making-release-notes.md +++ b/docs/devel/making-release-notes.md @@ -66,7 +66,7 @@ With the final markdown all set, cut and paste it to the top of `CHANGELOG.md` ### 5) Update the Release page - * Switch to the [releases](https://github.com/GoogleCloudPlatform/kubernetes/releases) page. + * Switch to the [releases](https://github.com/kubernetes/kubernetes/releases) page. * Open up the release you are working on. * Cut and paste the final markdown from above into the release notes * Press Save. diff --git a/docs/devel/pull-requests.md b/docs/devel/pull-requests.md index 157646c0540..a81c01c57d7 100644 --- a/docs/devel/pull-requests.md +++ b/docs/devel/pull-requests.md @@ -52,7 +52,7 @@ Life of a Pull Request Unless in the last few weeks of a milestone when we need to reduce churn and stabilize, we aim to be always accepting pull requests. -Either the [on call](https://github.com/GoogleCloudPlatform/kubernetes/wiki/Kubernetes-on-call-rotation) manually or the [submit queue](https://github.com/contrib/tree/master/submit-queue) automatically will manage merging PRs. +Either the [on call](https://github.com/kubernetes/kubernetes/wiki/Kubernetes-on-call-rotation) manually or the [submit queue](https://github.com/contrib/tree/master/submit-queue) automatically will manage merging PRs. There are several requirements for the submit queue to work: * Author must have signed CLA ("cla: yes" label added to PR) diff --git a/docs/getting-started-guides/aws.md b/docs/getting-started-guides/aws.md index 526b10ab355..a55c1f92b3d 100644 --- a/docs/getting-started-guides/aws.md +++ b/docs/getting-started-guides/aws.md @@ -106,7 +106,7 @@ EC2 with user data (cloud-config). ### Command line administration tool: `kubectl` The cluster startup script will leave you with a `kubernetes` directory on your workstation. -Alternately, you can download the latest Kubernetes release from [this page](https://github.com/GoogleCloudPlatform/kubernetes/releases). +Alternately, you can download the latest Kubernetes release from [this page](https://github.com/kubernetes/kubernetes/releases). Next, add the appropriate binary folder to your `PATH` to access kubectl: diff --git a/docs/getting-started-guides/azure.md b/docs/getting-started-guides/azure.md index c0c1cf30342..1983bc8773d 100644 --- a/docs/getting-started-guides/azure.md +++ b/docs/getting-started-guides/azure.md @@ -76,7 +76,7 @@ You can create a virtual network: Now you're ready. -You can download and install the latest Kubernetes release from [this page](https://github.com/GoogleCloudPlatform/kubernetes/releases), then run the `/cluster/kube-up.sh` script to start the cluster: +You can download and install the latest Kubernetes release from [this page](https://github.com/kubernetes/kubernetes/releases), then run the `/cluster/kube-up.sh` script to start the cluster: cd kubernetes cluster/kube-up.sh diff --git a/docs/getting-started-guides/binary_release.md b/docs/getting-started-guides/binary_release.md index 49e0f12b930..d483e765558 100644 --- a/docs/getting-started-guides/binary_release.md +++ b/docs/getting-started-guides/binary_release.md @@ -37,7 +37,7 @@ You can either build a release from sources or download a pre-built release. If ### Prebuilt Binary Release -The list of binary releases is available for download from the [GitHub Kubernetes repo release page](https://github.com/GoogleCloudPlatform/kubernetes/releases). +The list of binary releases is available for download from the [GitHub Kubernetes repo release page](https://github.com/kubernetes/kubernetes/releases). Download the latest release and unpack this tar file on Linux or OS X, cd to the created `kubernetes/` directory, and then follow the getting started guide for your cloud. @@ -48,7 +48,7 @@ Get the Kubernetes source. If you are simply building a release from source the Building a release is simple. ```bash -git clone https://github.com/GoogleCloudPlatform/kubernetes.git +git clone https://github.com/kubernetes/kubernetes.git cd kubernetes make release ``` diff --git a/docs/getting-started-guides/coreos/azure/README.md b/docs/getting-started-guides/coreos/azure/README.md index 40ec141dc1d..4bee80ff37a 100644 --- a/docs/getting-started-guides/coreos/azure/README.md +++ b/docs/getting-started-guides/coreos/azure/README.md @@ -57,7 +57,7 @@ In this guide I will demonstrate how to deploy a Kubernetes cluster to Azure clo To get started, you need to checkout the code: ```sh -git clone https://github.com/GoogleCloudPlatform/kubernetes +git clone https://github.com/kubernetes/kubernetes cd kubernetes/docs/getting-started-guides/coreos/azure/ ``` diff --git a/docs/getting-started-guides/coreos/bare_metal_calico.md b/docs/getting-started-guides/coreos/bare_metal_calico.md index 2945514301c..d2bef03e92e 100644 --- a/docs/getting-started-guides/coreos/bare_metal_calico.md +++ b/docs/getting-started-guides/coreos/bare_metal_calico.md @@ -63,7 +63,7 @@ In the next few steps you will be asked to configure these files and host them o To get the Kubernetes source, clone the GitHub repo, and build the binaries. ``` -git clone https://github.com/GoogleCloudPlatform/kubernetes.git +git clone https://github.com/kubernetes/kubernetes.git cd kubernetes ./build/release.sh ``` diff --git a/docs/getting-started-guides/coreos/bare_metal_offline.md b/docs/getting-started-guides/coreos/bare_metal_offline.md index 1083c91e128..c32a5172142 100644 --- a/docs/getting-started-guides/coreos/bare_metal_offline.md +++ b/docs/getting-started-guides/coreos/bare_metal_offline.md @@ -412,7 +412,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl content: | [Unit] Description=Kubernetes API Server - Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Documentation=https://github.com/kubernetes/kubernetes Requires=etcd.service After=etcd.service [Service] @@ -432,7 +432,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl content: | [Unit] Description=Kubernetes Controller Manager - Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Documentation=https://github.com/kubernetes/kubernetes Requires=kube-apiserver.service After=kube-apiserver.service [Service] @@ -448,7 +448,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl content: | [Unit] Description=Kubernetes Scheduler - Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Documentation=https://github.com/kubernetes/kubernetes Requires=kube-apiserver.service After=kube-apiserver.service [Service] @@ -579,7 +579,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl content: | [Unit] Description=Kubernetes Proxy - Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Documentation=https://github.com/kubernetes/kubernetes Requires=setup-network-environment.service After=setup-network-environment.service [Service] @@ -595,7 +595,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl content: | [Unit] Description=Kubernetes Kubelet - Documentation=https://github.com/GoogleCloudPlatform/kubernetes + Documentation=https://github.com/kubernetes/kubernetes Requires=setup-network-environment.service After=setup-network-environment.service [Service] diff --git a/docs/getting-started-guides/gce.md b/docs/getting-started-guides/gce.md index 1ae240f0ba6..cf542d42de9 100644 --- a/docs/getting-started-guides/gce.md +++ b/docs/getting-started-guides/gce.md @@ -90,7 +90,7 @@ By default, some containers will already be running on your cluster. Containers The script run by the commands above creates a cluster with the name/prefix "kubernetes". It defines one specific cluster config, so you can't run it more than once. -Alternately, you can download and install the latest Kubernetes release from [this page](https://github.com/GoogleCloudPlatform/kubernetes/releases), then run the `/cluster/kube-up.sh` script to start the cluster: +Alternately, you can download and install the latest Kubernetes release from [this page](https://github.com/kubernetes/kubernetes/releases), then run the `/cluster/kube-up.sh` script to start the cluster: ```bash cd kubernetes diff --git a/docs/getting-started-guides/mesos-docker.md b/docs/getting-started-guides/mesos-docker.md index 597841d1956..e56b230bf0a 100644 --- a/docs/getting-started-guides/mesos-docker.md +++ b/docs/getting-started-guides/mesos-docker.md @@ -151,12 +151,12 @@ host machine (mac). 1. Checkout source ``` - git clone https://github.com/GoogleCloudPlatform/kubernetes + git clone https://github.com/kubernetes/kubernetes cd kubernetes ``` By default, that will get you the bleeding edge of master branch. - You may want a [release branch](https://github.com/GoogleCloudPlatform/kubernetes/releases) instead, + You may want a [release branch](https://github.com/kubernetes/kubernetes/releases) instead, if you have trouble with master. 1. Build binaries diff --git a/docs/getting-started-guides/mesos.md b/docs/getting-started-guides/mesos.md index 888447147b9..15539984b47 100644 --- a/docs/getting-started-guides/mesos.md +++ b/docs/getting-started-guides/mesos.md @@ -85,7 +85,7 @@ ssh jclouds@${ip_address_of_master_node} Build Kubernetes-Mesos. ```bash -git clone https://github.com/GoogleCloudPlatform/kubernetes +git clone https://github.com/kubernetes/kubernetes cd kubernetes export KUBERNETES_CONTRIB=mesos make diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index 9f7a3f08a35..c0066c582bb 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -91,9 +91,9 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo - A number of the items in `config-default.sh` are overridable via environment variables. - For older versions please either: * Sync back to `v0.9` with `git checkout v0.9` - * Download a [snapshot of `v0.9`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.9.tar.gz) + * Download a [snapshot of `v0.9`](https://github.com/kubernetes/kubernetes/archive/v0.9.tar.gz) * Sync back to `v0.3` with `git checkout v0.3` - * Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz) + * Download a [snapshot of `v0.3`](https://github.com/kubernetes/kubernetes/archive/v0.3.tar.gz) ## Network Design diff --git a/docs/getting-started-guides/scratch.md b/docs/getting-started-guides/scratch.md index 0ac88880870..1a74a3eef21 100644 --- a/docs/getting-started-guides/scratch.md +++ b/docs/getting-started-guides/scratch.md @@ -212,7 +212,7 @@ A Kubernetes binary release includes all the Kubernetes binaries as well as the You can use a Kubernetes binary release (recommended) or build your Kubernetes binaries following the instructions in the [Developer Documentation](../devel/README.md). Only using a binary release is covered in this guide. -Download the [latest binary release](https://github.com/GoogleCloudPlatform/kubernetes/releases/latest) and unzip it. +Download the [latest binary release](https://github.com/kubernetes/kubernetes/releases/latest) and unzip it. Then locate `./kubernetes/server/kubernetes-server-linux-amd64.tar.gz` and unzip *that*. Then, within the second set of unzipped files, locate `./kubernetes/server/bin`, which contains all the necessary binaries. @@ -226,7 +226,7 @@ we recommend that you run these as containers, so you need an image to be built. You have several choices for Kubernetes images: - Use images hosted on Google Container Registry (GCR): - e.g `gcr.io/google_containers/hyperkube:$TAG`, where `TAG` is the latest - release tag, which can be found on the [latest releases page](https://github.com/GoogleCloudPlatform/kubernetes/releases/latest). + release tag, which can be found on the [latest releases page](https://github.com/kubernetes/kubernetes/releases/latest). - Ensure $TAG is the same tag as the release tag you are using for kubelet and kube-proxy. - The [hyperkube](../../cmd/hyperkube/) binary is an all in one binary - `hyperkube kubelet ...` runs the kublet, `hyperkube apiserver ...` runs an apiserver, etc. diff --git a/docs/getting-started-guides/ubuntu.md b/docs/getting-started-guides/ubuntu.md index 0ed0cc7719c..3c2c484e02f 100644 --- a/docs/getting-started-guides/ubuntu.md +++ b/docs/getting-started-guides/ubuntu.md @@ -69,7 +69,7 @@ Ubuntu 15 which use systemd instead of upstart. We are working around fixing thi First clone the kubernetes github repo ``` console -$ git clone https://github.com/GoogleCloudPlatform/kubernetes.git +$ git clone https://github.com/kubernetes/kubernetes.git ``` Then download all the needed binaries into given directory (cluster/ubuntu/binaries) diff --git a/docs/getting-started-guides/vagrant.md b/docs/getting-started-guides/vagrant.md index 6eb7cf674e6..49c1828c379 100644 --- a/docs/getting-started-guides/vagrant.md +++ b/docs/getting-started-guides/vagrant.md @@ -72,7 +72,7 @@ export KUBERNETES_PROVIDER=vagrant curl -sS https://get.k8s.io | bash ``` -Alternatively, you can download [Kubernetes release](https://github.com/GoogleCloudPlatform/kubernetes/releases) and extract the archive. To start your local cluster, open a shell and run: +Alternatively, you can download [Kubernetes release](https://github.com/kubernetes/kubernetes/releases) and extract the archive. To start your local cluster, open a shell and run: ```sh cd kubernetes diff --git a/docs/proposals/autoscaling.md b/docs/proposals/autoscaling.md index 9c5ec752261..ea60af7443e 100644 --- a/docs/proposals/autoscaling.md +++ b/docs/proposals/autoscaling.md @@ -47,7 +47,7 @@ done automatically based on statistical analysis and thresholds. * Provide a concrete proposal for implementing auto-scaling pods within Kubernetes * Implementation proposal should be in line with current discussions in existing issues: * Scale verb - [1629](http://issue.k8s.io/1629) - * Config conflicts - [Config](https://github.com/GoogleCloudPlatform/kubernetes/blob/c7cb991987193d4ca33544137a5cb7d0292cf7df/docs/config.md#automated-re-configuration-processes) + * Config conflicts - [Config](https://github.com/kubernetes/kubernetes/blob/c7cb991987193d4ca33544137a5cb7d0292cf7df/docs/config.md#automated-re-configuration-processes) * Rolling updates - [1353](http://issue.k8s.io/1353) * Multiple scalable types - [1624](http://issue.k8s.io/1624) diff --git a/docs/proposals/deployment.md b/docs/proposals/deployment.md index 0a79ca860db..6819acee5b0 100644 --- a/docs/proposals/deployment.md +++ b/docs/proposals/deployment.md @@ -260,7 +260,7 @@ Apart from the above, we want to add support for the following: ## References -- https://github.com/GoogleCloudPlatform/kubernetes/issues/1743 has most of the +- https://github.com/kubernetes/kubernetes/issues/1743 has most of the discussion that resulted in this proposal. diff --git a/docs/proposals/horizontal-pod-autoscaler.md b/docs/proposals/horizontal-pod-autoscaler.md index c10f54f7881..6ae84532fc7 100644 --- a/docs/proposals/horizontal-pod-autoscaler.md +++ b/docs/proposals/horizontal-pod-autoscaler.md @@ -61,7 +61,7 @@ HorizontalPodAutoscaler object will be bound with exactly one Scale subresource autoscaling associated replication controller/deployment through it. The main advantage of such approach is that whenever we introduce another type we want to auto-scale, we just need to implement Scale subresource for it (w/o modifying autoscaler code or API). -The wider discussion regarding Scale took place in [#1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629). +The wider discussion regarding Scale took place in [#1629](https://github.com/kubernetes/kubernetes/issues/1629). Scale subresource will be present in API for replication controller or deployment under the following paths: @@ -192,7 +192,7 @@ The autoscaler will be implemented as a control loop. It will periodically (e.g.: every 1 minute) query pods described by ```Status.PodSelector``` of Scale subresource, and check their average CPU or memory usage from the last 1 minute (there will be API on master for this purpose, see -[#11951](https://github.com/GoogleCloudPlatform/kubernetes/issues/11951). +[#11951](https://github.com/kubernetes/kubernetes/issues/11951). Then, it will compare the current CPU or memory consumption with the Target, and adjust the count of the Scale if needed to match the target (preserving condition: MinCount <= Count <= MaxCount). @@ -265,9 +265,9 @@ Our design is in general compatible with them. and then turned-on when there is a demand for them. When a request to service with no pods arrives, kube-proxy will generate an event for autoscaler to create a new pod. - Discussed in [#3247](https://github.com/GoogleCloudPlatform/kubernetes/issues/3247). + Discussed in [#3247](https://github.com/kubernetes/kubernetes/issues/3247). * When scaling down, make more educated decision which pods to kill (e.g.: if two or more pods are on the same node, kill one of them). - Discussed in [#4301](https://github.com/GoogleCloudPlatform/kubernetes/issues/4301). + Discussed in [#4301](https://github.com/kubernetes/kubernetes/issues/4301). * Allow rule based autoscaling: instead of specifying the target value for metric, specify a rule, e.g.: “if average CPU consumption of pod is higher than 80% add two more replicas”. This approach was initially suggested in diff --git a/docs/proposals/job.md b/docs/proposals/job.md index 57717ea58a0..198a1437fbd 100644 --- a/docs/proposals/job.md +++ b/docs/proposals/job.md @@ -40,8 +40,8 @@ for managing pod(s) that require running once to completion even if the machine the pod is running on fails, in contrast to what ReplicationController currently offers. Several existing issues and PRs were already created regarding that particular subject: -* Job Controller [#1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624) -* New Job resource [#7380](https://github.com/GoogleCloudPlatform/kubernetes/pull/7380) +* Job Controller [#1624](https://github.com/kubernetes/kubernetes/issues/1624) +* New Job resource [#7380](https://github.com/kubernetes/kubernetes/pull/7380) ## Use Cases @@ -181,7 +181,7 @@ Below are the possible future extensions to the Job controller: * Be able to limit the execution time for a job, similarly to ActiveDeadlineSeconds for Pods. * Be able to create a chain of jobs dependent one on another. * Be able to specify the work each of the workers should execute (see type 1 from - [this comment](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624#issuecomment-97622142)) + [this comment](https://github.com/kubernetes/kubernetes/issues/1624#issuecomment-97622142)) * Be able to inspect Pods running a Job, especially after a Job has finished, e.g. by providing pointers to Pods in the JobStatus ([see comment](https://github.com/kubernetes/kubernetes/pull/11746/files#r37142628)). diff --git a/docs/roadmap.md b/docs/roadmap.md index 38c8d98bdec..2b01a0c4e77 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -35,7 +35,7 @@ Documentation for other releases can be found at We're in the process of prioritizing changes to be made after 1.0. -Please watch the [Github milestones] (https://github.com/GoogleCloudPlatform/kubernetes/milestones) for our future plans. +Please watch the [Github milestones] (https://github.com/kubernetes/kubernetes/milestones) for our future plans. diff --git a/docs/user-guide/application-troubleshooting.md b/docs/user-guide/application-troubleshooting.md index 54cb40fde7d..abe7778fc32 100644 --- a/docs/user-guide/application-troubleshooting.md +++ b/docs/user-guide/application-troubleshooting.md @@ -58,7 +58,7 @@ This is *not* a guide for people who want to debug their cluster. For that you ## FAQ -Users are highly encouraged to check out our [FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/User-FAQ) +Users are highly encouraged to check out our [FAQ](https://github.com/kubernetes/kubernetes/wiki/User-FAQ) ## Diagnosing the problem @@ -152,7 +152,7 @@ If you misspelled `command` as `commnd` then will give an error like this: ``` I0805 10:43:25.129850 46757 schema.go:126] unknown field: commnd -I0805 10:43:25.129973 46757 schema.go:129] this may be a false alarm, see https://github.com/GoogleCloudPlatform/kubernetes/issues/6842 +I0805 10:43:25.129973 46757 schema.go:129] this may be a false alarm, see https://github.com/kubernetes/kubernetes/issues/6842 pods/mypod ``` diff --git a/docs/user-guide/compute-resources.md b/docs/user-guide/compute-resources.md index 8e961d35e6e..0ff1442daa4 100644 --- a/docs/user-guide/compute-resources.md +++ b/docs/user-guide/compute-resources.md @@ -255,7 +255,7 @@ You can call `get pod` with the `-o template -t ...` option to fetch the status ```console [13:59:01] $ ./cluster/kubectl.sh get pod -o template -t '{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-60xbc Container Name: simmemleak -LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]][13:59:03] clusterScaleDoc ~/go/src/github.com/GoogleCloudPlatform/kubernetes $ +LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]][13:59:03] clusterScaleDoc ~/go/src/github.com/kubernetes/kubernetes $ ``` We can see that this container was terminated because `reason:OOM Killed`, where *OOM* stands for Out Of Memory. diff --git a/docs/user-guide/configuring-containers.md b/docs/user-guide/configuring-containers.md index 493b4d36d7e..76bc72fd00f 100644 --- a/docs/user-guide/configuring-containers.md +++ b/docs/user-guide/configuring-containers.md @@ -109,12 +109,12 @@ pods/hello-world `kubectl create --validate` currently warns about problems it detects, but creates the resource anyway, unless a required field is absent or a field value is invalid. Unknown API fields are ignored, so be careful. This pod was created, but with no `command`, which is an optional field, since the image may specify an `Entrypoint`. View the [Pod API -object](https://htmlpreview.github.io/?https://github.com/GoogleCloudPlatform/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_pod) +object](https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_pod) to see the list of valid fields. ## Environment variables and variable expansion -Kubernetes [does not automatically run commands in a shell](https://github.com/GoogleCloudPlatform/kubernetes/wiki/User-FAQ#use-of-environment-variables-on-the-command-line) (not all images contain shells). If you would like to run your command in a shell, such as to expand environment variables (specified using `env`), you could do the following: +Kubernetes [does not automatically run commands in a shell](https://github.com/kubernetes/kubernetes/wiki/User-FAQ#use-of-environment-variables-on-the-command-line) (not all images contain shells). If you would like to run your command in a shell, such as to expand environment variables (specified using `env`), you could do the following: ```yaml apiVersion: v1 diff --git a/docs/user-guide/connecting-applications.md b/docs/user-guide/connecting-applications.md index fca07b946a8..4739c32b37a 100644 --- a/docs/user-guide/connecting-applications.md +++ b/docs/user-guide/connecting-applications.md @@ -128,7 +128,7 @@ spec: app: nginx ``` -This specification will create a Service which targets TCP port 80 on any Pod with the `app=nginx` label, and expose it on an abstracted Service port (`targetPort`: is the port the container accepts traffic on, `port`: is the abstracted Service port, which can be any port other pods use to access the Service). View [service API object](https://htmlpreview.github.io/?https://github.com/GoogleCloudPlatform/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_service) to see the list of supported fields in service definition. +This specification will create a Service which targets TCP port 80 on any Pod with the `app=nginx` label, and expose it on an abstracted Service port (`targetPort`: is the port the container accepts traffic on, `port`: is the abstracted Service port, which can be any port other pods use to access the Service). View [service API object](https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_service) to see the list of supported fields in service definition. Check your Service: ```console diff --git a/docs/user-guide/debugging-services.md b/docs/user-guide/debugging-services.md index 1750f2fec3f..a439f6cff38 100644 --- a/docs/user-guide/debugging-services.md +++ b/docs/user-guide/debugging-services.md @@ -548,7 +548,7 @@ us know, so we can help investigate! Contact us on [IRC](http://webchat.freenode.net/?channels=google-containers) or [email](https://groups.google.com/forum/#!forum/google-containers) or -[GitHub](https://github.com/GoogleCloudPlatform/kubernetes). +[GitHub](https://github.com/kubernetes/kubernetes). ## More information diff --git a/docs/user-guide/deploying-applications.md b/docs/user-guide/deploying-applications.md index a8fa89357ad..6d9af19284d 100644 --- a/docs/user-guide/deploying-applications.md +++ b/docs/user-guide/deploying-applications.md @@ -76,7 +76,7 @@ spec: Some differences compared to specifying just a pod are that the `kind` is `ReplicationController`, the number of `replicas` desired is specified, and the pod specification is under the `template` field. The names of the pods don’t need to be specified explicitly because they are generated from the name of the replication controller. View the [replication controller API -object](https://htmlpreview.github.io/?https://github.com/GoogleCloudPlatform/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_replicationcontroller) +object](https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_replicationcontroller) to view the list of supported fields. This replication controller can be created using `create`, just as with pods: diff --git a/docs/user-guide/node-selection/README.md b/docs/user-guide/node-selection/README.md index d4e21841502..c97d1e24140 100644 --- a/docs/user-guide/node-selection/README.md +++ b/docs/user-guide/node-selection/README.md @@ -37,7 +37,7 @@ This example shows how to assign a [pod](../pods.md) to a specific [node](../../ ### Step Zero: Prerequisites -This example assumes that you have a basic understanding of Kubernetes pods and that you have [turned up a Kubernetes cluster](https://github.com/GoogleCloudPlatform/kubernetes#documentation). +This example assumes that you have a basic understanding of Kubernetes pods and that you have [turned up a Kubernetes cluster](https://github.com/kubernetes/kubernetes#documentation). ### Step One: Attach label to the node @@ -45,7 +45,7 @@ Run `kubectl get nodes` to get the names of your cluster's nodes. Pick out the o Then, to add a label to the node you've chosen, run `kubectl label nodes =`. For example, if my node name is 'kubernetes-foo-node-1.c.a-robinson.internal' and my desired label is 'disktype=ssd', then I can run `kubectl label nodes kubernetes-foo-node-1.c.a-robinson.internal disktype=ssd`. -If this fails with an "invalid command" error, you're likely using an older version of kubectl that doesn't have the `label` command. In that case, see the [previous version](https://github.com/GoogleCloudPlatform/kubernetes/blob/a053dbc313572ed60d89dae9821ecab8bfd676dc/examples/node-selection/README.md) of this guide for instructions on how to manually set labels on a node. +If this fails with an "invalid command" error, you're likely using an older version of kubectl that doesn't have the `label` command. In that case, see the [previous version](https://github.com/kubernetes/kubernetes/blob/a053dbc313572ed60d89dae9821ecab8bfd676dc/examples/node-selection/README.md) of this guide for instructions on how to manually set labels on a node. Also, note that label keys must be in the form of DNS labels (as described in the [identifiers doc](../../../docs/design/identifiers.md)), meaning that they are not allowed to contain any upper-case letters. diff --git a/docs/user-guide/pods.md b/docs/user-guide/pods.md index 4d6950688b2..ac2b6cecd16 100644 --- a/docs/user-guide/pods.md +++ b/docs/user-guide/pods.md @@ -114,7 +114,7 @@ The current best practice for pets is to create a replication controller with `r Pod is a top-level resource in the kubernetes REST API. More details about the API object can be found at: [Pod API -object](https://htmlpreview.github.io/?https://github.com/GoogleCloudPlatform/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_pod). +object](https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_pod). diff --git a/docs/user-guide/prereqs.md b/docs/user-guide/prereqs.md index c0ce1ba03e0..3918364cc41 100644 --- a/docs/user-guide/prereqs.md +++ b/docs/user-guide/prereqs.md @@ -37,7 +37,7 @@ To deploy and manage applications on Kubernetes, you’ll use the Kubernetes com ## Installing kubectl -If you downloaded a pre-compiled [release](https://github.com/GoogleCloudPlatform/kubernetes/releases), kubectl should be under `platforms//` from the tar bundle. +If you downloaded a pre-compiled [release](https://github.com/kubernetes/kubernetes/releases), kubectl should be under `platforms//` from the tar bundle. If you built from source, kubectl should be either under `_output/local/bin//` or `_output/dockerized/bin//`. diff --git a/docs/user-guide/replication-controller.md b/docs/user-guide/replication-controller.md index 00033ba3834..d0d968104d8 100644 --- a/docs/user-guide/replication-controller.md +++ b/docs/user-guide/replication-controller.md @@ -122,7 +122,7 @@ For instance, a service might target all pods with `tier in (frontend), environm Replication controller is a top-level resource in the kubernetes REST API. More details about the API object can be found at: [ReplicationController API -object](https://htmlpreview.github.io/?https://github.com/GoogleCloudPlatform/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_replicationcontroller). +object](https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_replicationcontroller). diff --git a/docs/user-guide/services.md b/docs/user-guide/services.md index 99242d6a476..2badff776ae 100644 --- a/docs/user-guide/services.md +++ b/docs/user-guide/services.md @@ -545,7 +545,7 @@ of which `Pods` they are actually accessing. Service is a top-level resource in the kubernetes REST API. More details about the API object can be found at: [Service API -object](https://htmlpreview.github.io/?https://github.com/GoogleCloudPlatform/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_service). +object](https://htmlpreview.github.io/?https://github.com/kubernetes/kubernetes/HEAD/docs/api-reference/definitions.html#_v1_service). diff --git a/docs/user-guide/walkthrough/README.md b/docs/user-guide/walkthrough/README.md index 14418d25c7e..fd5e5f92f01 100644 --- a/docs/user-guide/walkthrough/README.md +++ b/docs/user-guide/walkthrough/README.md @@ -35,7 +35,7 @@ Documentation for other releases can be found at For Kubernetes 101, we will cover kubectl, pods, volumes, and multiple containers -In order for the kubectl usage examples to work, make sure you have an examples directory locally, either from [a release](https://github.com/GoogleCloudPlatform/kubernetes/releases) or [the source](https://github.com/GoogleCloudPlatform/kubernetes). +In order for the kubectl usage examples to work, make sure you have an examples directory locally, either from [a release](https://github.com/kubernetes/kubernetes/releases) or [the source](https://github.com/kubernetes/kubernetes). **Table of Contents** diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index fa416d513ab..46b99f6d036 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -37,7 +37,7 @@ If you went through [Kubernetes 101](README.md), you learned about kubectl, pods For Kubernetes 201, we will pick up where 101 left off and cover some slightly more advanced topics in Kubernetes, related to application productionization, deployment and scaling. -In order for the kubectl usage examples to work, make sure you have an examples directory locally, either from [a release](https://github.com/GoogleCloudPlatform/kubernetes/releases) or [the source](https://github.com/GoogleCloudPlatform/kubernetes). +In order for the kubectl usage examples to work, make sure you have an examples directory locally, either from [a release](https://github.com/kubernetes/kubernetes/releases) or [the source](https://github.com/kubernetes/kubernetes). **Table of Contents** @@ -235,7 +235,7 @@ For more information, see [Services](../services.md). ## Health Checking -When I write code it never crashes, right? Sadly the [Kubernetes issues list](https://github.com/GoogleCloudPlatform/kubernetes/issues) indicates otherwise... +When I write code it never crashes, right? Sadly the [Kubernetes issues list](https://github.com/kubernetes/kubernetes/issues) indicates otherwise... Rather than trying to write bug-free code, a better approach is to use a management system to perform periodic health checking and repair of your application. That way a system outside of your application itself is responsible for monitoring the diff --git a/examples/javaee/README.md b/examples/javaee/README.md index 10f76263682..b5f609c3057 100644 --- a/examples/javaee/README.md +++ b/examples/javaee/README.md @@ -37,7 +37,7 @@ The following document describes the deployment of a Java EE application using [ ### Prerequisites -https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/user-guide/prereqs.md +https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/prereqs.md ### Start MySQL Pod diff --git a/logo_usage_guidelines.md b/logo_usage_guidelines.md index 1dba3b3413e..12661a5fd7d 100644 --- a/logo_usage_guidelines.md +++ b/logo_usage_guidelines.md @@ -2,17 +2,17 @@ These guidelines provide you with guidance for using the Kubernetes logo. You can use the logo on your website or in print without pre-approval, provided you follow these basic guidelines. -You may display, modify or use the Kubernetes logo only in connection with compliant implementations of Kubernetes and related uses in the following ways. A compliant implementation is an implementation of the unmodified Google version of Kubernetes found at https://github.com/GoogleCloudPlatform/kubernetes and compatible branches thereof, together with published specifications, APIs and operational patterns.  Acceptable related uses include display, modify or use of the Kubernetes logo in connection with your compliant implementation, your integration with a compliant implementation, your support for a compliant implementation, your Kubernetes-compatible product, or in collateral, presentations, and marketing materials relating to compliant implementations of Kubernetes. +You may display, modify or use the Kubernetes logo only in connection with compliant implementations of Kubernetes and related uses in the following ways. A compliant implementation is an implementation of the unmodified Google version of Kubernetes found at https://github.com/kubernetes/kubernetes and compatible branches thereof, together with published specifications, APIs and operational patterns.  Acceptable related uses include display, modify or use of the Kubernetes logo in connection with your compliant implementation, your integration with a compliant implementation, your support for a compliant implementation, your Kubernetes-compatible product, or in collateral, presentations, and marketing materials relating to compliant implementations of Kubernetes. Use of the Kubernetes logo or other Google brands in ways not expressly covered by this document is not allowed without prior written consent from Google -- see Google's [Guidelines for Third Party Use of Google Brand Features](http://www.google.com/intl/en/permissions/guidelines.html) for more information. ## Links to logo images -[SVG format](https://github.com/GoogleCloudPlatform/kubernetes/raw/master/logo.svg) +[SVG format](https://github.com/kubernetes/kubernetes/raw/master/logo.svg) -[PNG format](https://github.com/GoogleCloudPlatform/kubernetes/raw/master/logo.png) +[PNG format](https://github.com/kubernetes/kubernetes/raw/master/logo.png) -[PDF format](https://github.com/GoogleCloudPlatform/kubernetes/raw/master/logo.pdf) +[PDF format](https://github.com/kubernetes/kubernetes/raw/master/logo.pdf) [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/logo_usage_guidelines.md?pixel)]() diff --git a/third_party/swagger-ui/README.md b/third_party/swagger-ui/README.md index a8f7687335c..ac36ef48a7b 100644 --- a/third_party/swagger-ui/README.md +++ b/third_party/swagger-ui/README.md @@ -15,7 +15,7 @@ https://github.com/swagger-api/swagger-ui#how-to-use-it ## Local Modifications - Updated the url in index.html to "../../swaggerapi" as per instructions at: https://github.com/swagger-api/swagger-ui#how-to-use-it -- Modified swagger-ui.js to list resources and operations in sorted order: https://github.com/GoogleCloudPlatform/kubernetes/pull/3421 +- Modified swagger-ui.js to list resources and operations in sorted order: https://github.com/kubernetes/kubernetes/pull/3421 - Set supportedSubmitMethods: [] in index.html to remove "Try it out" buttons. LICENSE file has been created for compliance purposes. From 44e74b4c20e8e6562dae23931f5872a34364d554 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Wed, 2 Sep 2015 17:02:23 -0400 Subject: [PATCH 030/101] Disable swap memory on vagrant nodes to support qos properly --- cluster/vagrant/provision-minion.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-minion.sh index 68c7ad6c0f9..cff40ae0428 100755 --- a/cluster/vagrant/provision-minion.sh +++ b/cluster/vagrant/provision-minion.sh @@ -155,6 +155,10 @@ grains: docker_opts: '$(echo "$DOCKER_OPTS" | sed -e "s/'/''/g")' EOF +# QoS support requires that swap memory is disabled on each of the minions +echo "Disable swap memory to ensure proper QoS" +swapoff -a + # we will run provision to update code each time we test, so we do not want to do salt install each time if ! which salt-minion >/dev/null 2>&1; then # Install Salt From 535c509dd39dfbaed77d72950e8a37414edb3cf7 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Wed, 26 Aug 2015 21:20:22 -0700 Subject: [PATCH 031/101] Update go-restful --- Godeps/Godeps.json | 4 +- .../github.com/emicklei/go-restful/CHANGES.md | 5 ++ .../github.com/emicklei/go-restful/README.md | 2 +- .../emicklei/go-restful/compress.go | 14 ++-- .../emicklei/go-restful/compress_test.go | 74 +++++++++++++++++++ .../emicklei/go-restful/compressor_pools.go | 63 ++++++++++++++++ .../emicklei/go-restful/container.go | 38 +++++++++- .../emicklei/go-restful/parameter.go | 8 +- .../github.com/emicklei/go-restful/request.go | 42 ++++++++--- .../emicklei/go-restful/response.go | 30 ++++++-- .../emicklei/go-restful/response_test.go | 28 +++---- .../go-restful/swagger/swagger_webservice.go | 9 ++- 12 files changed, 268 insertions(+), 49 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/emicklei/go-restful/compressor_pools.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 1a0e19fb676..d967a12357f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -215,8 +215,8 @@ }, { "ImportPath": "github.com/emicklei/go-restful", - "Comment": "v1.1.3-76-gbfd6ff2", - "Rev": "bfd6ff29d2961031cec64346a92bae4cde96c868" + "Comment": "v1.1.3-98-g1f9a0ee", + "Rev": "1f9a0ee00ff93717a275e15b30cf7df356255877" }, { "ImportPath": "github.com/evanphx/json-patch", diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md index b34d43b1ca6..1d209676d9f 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/CHANGES.md @@ -1,5 +1,10 @@ Change history of go-restful = +2015-08-06 +- add support for reading entities from compressed request content +- use sync.Pool for compressors of http response and request body +- add Description to Parameter for documentation in Swagger UI + 2015-03-20 - add configurable logging diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/README.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/README.md index 6e4dedf512b..b20603fb395 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/README.md +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/README.md @@ -47,7 +47,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Filters for intercepting the request → response flow on Service or Route level - Request-scoped variables using attributes - Containers for WebServices on different HTTP endpoints -- Content encoding (gzip,deflate) of responses +- Content encoding (gzip,deflate) of request and response payloads - Automatic responses on OPTIONS (using a filter) - Automatic CORS request handling (using a filter) - API declaration for Swagger UI (see swagger package) diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go index c4dcca000f8..4493f4db2ec 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/compress.go @@ -73,15 +73,13 @@ func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding strin c.writer = httpWriter var err error if ENCODING_GZIP == encoding { - c.compressor, err = gzip.NewWriterLevel(httpWriter, gzip.BestSpeed) - if err != nil { - return nil, err - } + w := GzipWriterPool.Get().(*gzip.Writer) + w.Reset(httpWriter) + c.compressor = w } else if ENCODING_DEFLATE == encoding { - c.compressor, err = zlib.NewWriterLevel(httpWriter, zlib.BestSpeed) - if err != nil { - return nil, err - } + w := ZlibWriterPool.Get().(*zlib.Writer) + w.Reset(httpWriter) + c.compressor = w } else { return nil, errors.New("Unknown encoding:" + encoding) } diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go index 332fb221974..84a93c3fc90 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/compress_test.go @@ -1,11 +1,17 @@ package restful import ( + "bytes" + "compress/gzip" + "compress/zlib" + "io" + "io/ioutil" "net/http" "net/http/httptest" "testing" ) +// go test -v -test.run TestGzip ...restful func TestGzip(t *testing.T) { EnableContentEncoding = true httpRequest, _ := http.NewRequest("GET", "/test", nil) @@ -27,6 +33,17 @@ func TestGzip(t *testing.T) { if httpWriter.Header().Get("Content-Encoding") != "gzip" { t.Fatal("Missing gzip header") } + reader, err := gzip.NewReader(httpWriter.Body) + if err != nil { + t.Fatal(err.Error()) + } + data, err := ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err.Error()) + } + if got, want := string(data), "Hello World"; got != want { + t.Errorf("got %v want %v", got, want) + } } func TestDeflate(t *testing.T) { @@ -50,4 +67,61 @@ func TestDeflate(t *testing.T) { if httpWriter.Header().Get("Content-Encoding") != "deflate" { t.Fatal("Missing deflate header") } + reader, err := zlib.NewReader(httpWriter.Body) + if err != nil { + t.Fatal(err.Error()) + } + data, err := ioutil.ReadAll(reader) + if err != nil { + t.Fatal(err.Error()) + } + if got, want := string(data), "Hello World"; got != want { + t.Errorf("got %v want %v", got, want) + } +} + +func TestGzipDecompressRequestBody(t *testing.T) { + b := new(bytes.Buffer) + w := newGzipWriter() + w.Reset(b) + io.WriteString(w, `{"msg":"hi"}`) + w.Flush() + w.Close() + + req := new(Request) + httpRequest, _ := http.NewRequest("GET", "/", bytes.NewReader(b.Bytes())) + httpRequest.Header.Set("Content-Type", "application/json") + httpRequest.Header.Set("Content-Encoding", "gzip") + req.Request = httpRequest + + doCacheReadEntityBytes = false + doc := make(map[string]interface{}) + req.ReadEntity(&doc) + + if got, want := doc["msg"], "hi"; got != want { + t.Errorf("got %v want %v", got, want) + } +} + +func TestZlibDecompressRequestBody(t *testing.T) { + b := new(bytes.Buffer) + w := newZlibWriter() + w.Reset(b) + io.WriteString(w, `{"msg":"hi"}`) + w.Flush() + w.Close() + + req := new(Request) + httpRequest, _ := http.NewRequest("GET", "/", bytes.NewReader(b.Bytes())) + httpRequest.Header.Set("Content-Type", "application/json") + httpRequest.Header.Set("Content-Encoding", "deflate") + req.Request = httpRequest + + doCacheReadEntityBytes = false + doc := make(map[string]interface{}) + req.ReadEntity(&doc) + + if got, want := doc["msg"], "hi"; got != want { + t.Errorf("got %v want %v", got, want) + } } diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/compressor_pools.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/compressor_pools.go new file mode 100644 index 00000000000..5ee18296054 --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/compressor_pools.go @@ -0,0 +1,63 @@ +package restful + +import ( + "bytes" + "compress/gzip" + "compress/zlib" + "sync" +) + +// GzipWriterPool is used to get reusable zippers. +// The Get() result must be type asserted to *gzip.Writer. +var GzipWriterPool = &sync.Pool{ + New: func() interface{} { + return newGzipWriter() + }, +} + +func newGzipWriter() *gzip.Writer { + // create with an empty bytes writer; it will be replaced before using the gzipWriter + writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} + +// GzipReaderPool is used to get reusable zippers. +// The Get() result must be type asserted to *gzip.Reader. +var GzipReaderPool = &sync.Pool{ + New: func() interface{} { + return newGzipReader() + }, +} + +func newGzipReader() *gzip.Reader { + // create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader + w := GzipWriterPool.Get().(*gzip.Writer) + b := new(bytes.Buffer) + w.Reset(b) + w.Flush() + w.Close() + reader, err := gzip.NewReader(bytes.NewReader(b.Bytes())) + if err != nil { + panic(err.Error()) + } + return reader +} + +// ZlibWriterPool is used to get reusable zippers. +// The Get() result must be type asserted to *zlib.Writer. +var ZlibWriterPool = &sync.Pool{ + New: func() interface{} { + return newZlibWriter() + }, +} + +func newZlibWriter() *zlib.Writer { + writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed) + if err != nil { + panic(err.Error()) + } + return writer +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/container.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/container.go index fd2e12ca44c..840d14b31e3 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/container.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/container.go @@ -11,6 +11,7 @@ import ( "os" "runtime" "strings" + "sync" "github.com/emicklei/go-restful/log" ) @@ -18,6 +19,7 @@ import ( // Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. // The requests are further dispatched to routes of WebServices using a RouteSelector type Container struct { + webServicesLock sync.RWMutex webServices []*WebService ServeMux *http.ServeMux isRegisteredOnRoot bool @@ -83,6 +85,8 @@ func (c *Container) EnableContentEncoding(enabled bool) { // Add a WebService to the Container. It will detect duplicate root paths and panic in that case. func (c *Container) Add(service *WebService) *Container { + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() // If registered on root then no additional specific mapping is needed if !c.isRegisteredOnRoot { pattern := c.fixedPrefixPath(service.RootPath()) @@ -122,6 +126,19 @@ func (c *Container) Add(service *WebService) *Container { return c } +func (c *Container) Remove(ws *WebService) error { + c.webServicesLock.Lock() + defer c.webServicesLock.Unlock() + newServices := []*WebService{} + for ix := range c.webServices { + if c.webServices[ix].rootPath != ws.rootPath { + newServices = append(newServices, c.webServices[ix]) + } + } + c.webServices = newServices + return nil +} + // logStackOnRecover is the default RecoverHandleFunction and is called // when DoNotRecover is false and the recoverHandleFunc is not set for the container. // Default implementation logs the stacktrace and writes the stacktrace on the response. @@ -190,9 +207,16 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R } } // Find best match Route ; err is non nil if no match was found - webService, route, err := c.router.SelectRoute( - c.webServices, - httpRequest) + var webService *WebService + var route *Route + var err error + func() { + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + webService, route, err = c.router.SelectRoute( + c.webServices, + httpRequest) + }() if err != nil { // a non-200 response has already been written // run container filters anyway ; they should not touch the response... @@ -272,7 +296,13 @@ func (c *Container) Filter(filter FilterFunction) { // RegisteredWebServices returns the collections of added WebServices func (c Container) RegisteredWebServices() []*WebService { - return c.webServices + c.webServicesLock.RLock() + defer c.webServicesLock.RUnlock() + result := make([]*WebService, len(c.webServices)) + for ix := range c.webServices { + result[ix] = c.webServices[ix] + } + return result } // computeAllowedMethods returns a list of HTTP methods that are valid for a Request diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go index 05a9987600f..a836120b5d5 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go @@ -95,8 +95,14 @@ func (p *Parameter) DataType(typeName string) *Parameter { return p } -// DefaultValue sets the default value field and returnw the receiver +// DefaultValue sets the default value field and returns the receiver func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { p.data.DefaultValue = stringRepresentation return p } + +// Description sets the description value field and returns the receiver +func (p *Parameter) Description(doc string) *Parameter { + p.data.Description = doc + return p +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/request.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/request.go index 00a069f094d..e944b8d000f 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/request.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/request.go @@ -6,6 +6,8 @@ package restful import ( "bytes" + "compress/gzip" + "compress/zlib" "encoding/json" "encoding/xml" "io" @@ -82,15 +84,17 @@ func (r *Request) HeaderParameter(name string) string { // ReadEntity checks the Accept header and reads the content into the entityPointer // May be called multiple times in the request-response flow func (r *Request) ReadEntity(entityPointer interface{}) (err error) { + defer r.Request.Body.Close() contentType := r.Request.Header.Get(HEADER_ContentType) + contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding) if doCacheReadEntityBytes { - return r.cachingReadEntity(contentType, entityPointer) + return r.cachingReadEntity(contentType, contentEncoding, entityPointer) } // unmarshall directly from request Body - return r.decodeEntity(r.Request.Body, contentType, entityPointer) + return r.decodeEntity(r.Request.Body, contentType, contentEncoding, entityPointer) } -func (r *Request) cachingReadEntity(contentType string, entityPointer interface{}) (err error) { +func (r *Request) cachingReadEntity(contentType string, contentEncoding string, entityPointer interface{}) (err error) { var buffer []byte if r.bodyContent != nil { buffer = *r.bodyContent @@ -101,22 +105,38 @@ func (r *Request) cachingReadEntity(contentType string, entityPointer interface{ } r.bodyContent = &buffer } - return r.decodeEntity(bytes.NewReader(buffer), contentType, entityPointer) + return r.decodeEntity(bytes.NewReader(buffer), contentType, contentEncoding, entityPointer) } -func (r *Request) decodeEntity(reader io.Reader, contentType string, entityPointer interface{}) (err error) { - if strings.Contains(contentType, MIME_XML) { - return xml.NewDecoder(reader).Decode(entityPointer) +func (r *Request) decodeEntity(reader io.Reader, contentType string, contentEncoding string, entityPointer interface{}) (err error) { + entityReader := reader + + // check if the request body needs decompression + if ENCODING_GZIP == contentEncoding { + gzipReader := GzipReaderPool.Get().(*gzip.Reader) + gzipReader.Reset(reader) + entityReader = gzipReader + } else if ENCODING_DEFLATE == contentEncoding { + zlibReader, err := zlib.NewReader(reader) + if err != nil { + return err + } + entityReader = zlibReader } + + // decode JSON if strings.Contains(contentType, MIME_JSON) || MIME_JSON == defaultRequestContentType { - decoder := json.NewDecoder(reader) + decoder := json.NewDecoder(entityReader) decoder.UseNumber() return decoder.Decode(entityPointer) } - if MIME_XML == defaultRequestContentType { - return xml.NewDecoder(reader).Decode(entityPointer) + + // decode XML + if strings.Contains(contentType, MIME_XML) || MIME_XML == defaultRequestContentType { + return xml.NewDecoder(entityReader).Decode(entityPointer) } - return NewError(400, "Unable to unmarshal content of type:"+contentType) + + return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType) } // SetAttribute adds or replaces the attribute with the given value. diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/response.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/response.go index a33f14248c5..eb5a023563a 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/response.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/response.go @@ -28,11 +28,12 @@ type Response struct { statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200) contentLength int // number of bytes written for the response body prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses. + err error // err property is kept when WriteError is called } // Creates a new response based on a http ResponseWriter. func NewResponse(httpWriter http.ResponseWriter) *Response { - return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses} // empty content-types + return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types } // If Accept header matching fails, fall back to this type, otherwise @@ -182,6 +183,7 @@ func (r *Response) WriteJson(value interface{}, contentType string) error { // WriteError write the http status and the error string on the response. func (r *Response) WriteError(httpStatus int, err error) error { + r.err = err return r.WriteErrorString(httpStatus, err.Error()) } @@ -203,21 +205,30 @@ func (r *Response) WriteErrorString(status int, errorReason string) error { // WriteHeader is overridden to remember the Status Code that has been written. // Note that using this method, the status value is only written when -// - calling WriteEntity, -// - or directly calling WriteAsXml or WriteAsJson, -// - or if the status is one for which no response is allowed (i.e., -// 204 (http.StatusNoContent) or 304 (http.StatusNotModified)) +// calling WriteEntity, +// or directly calling WriteAsXml or WriteAsJson, +// or if the status is one for which no response is allowed: +// +// 202 = http.StatusAccepted +// 204 = http.StatusNoContent +// 206 = http.StatusPartialContent +// 304 = http.StatusNotModified +// +// If this behavior does not fit your need then you can write to the underlying response, such as: +// response.ResponseWriter.WriteHeader(http.StatusAccepted) func (r *Response) WriteHeader(httpStatus int) { r.statusCode = httpStatus - // if 201,204,304 then WriteEntity will not be called so we need to pass this code + // if 202,204,206,304 then WriteEntity will not be called so we need to pass this code if http.StatusNoContent == httpStatus || http.StatusNotModified == httpStatus || - http.StatusPartialContent == httpStatus { + http.StatusPartialContent == httpStatus || + http.StatusAccepted == httpStatus { r.ResponseWriter.WriteHeader(httpStatus) } } // StatusCode returns the code that has been written using WriteHeader. +// If WriteHeader, WriteEntity or WriteAsXml has not been called (yet) then return 200 OK. func (r Response) StatusCode() int { if 0 == r.statusCode { // no status code has been written yet; assume OK @@ -245,3 +256,8 @@ func (r Response) ContentLength() int { func (r Response) CloseNotify() <-chan bool { return r.ResponseWriter.(http.CloseNotifier).CloseNotify() } + +// Error returns the err created by WriteError +func (r Response) Error() error { + return r.err +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go index 6caa4d5274f..c66b1f33523 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/response_test.go @@ -9,7 +9,7 @@ import ( func TestWriteHeader(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true} + resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp.WriteHeader(123) if resp.StatusCode() != 123 { t.Errorf("Unexpected status code:%d", resp.StatusCode()) @@ -18,7 +18,7 @@ func TestWriteHeader(t *testing.T) { func TestNoWriteHeader(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true} + resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} if resp.StatusCode() != http.StatusOK { t.Errorf("Unexpected status code:%d", resp.StatusCode()) } @@ -31,7 +31,7 @@ type food struct { // go test -v -test.run TestMeasureContentLengthXml ...restful func TestMeasureContentLengthXml(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true} + resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp.WriteAsXml(food{"apple"}) if resp.ContentLength() != 76 { t.Errorf("Incorrect measured length:%d", resp.ContentLength()) @@ -41,7 +41,7 @@ func TestMeasureContentLengthXml(t *testing.T) { // go test -v -test.run TestMeasureContentLengthJson ...restful func TestMeasureContentLengthJson(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true} + resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp.WriteAsJson(food{"apple"}) if resp.ContentLength() != 22 { t.Errorf("Incorrect measured length:%d", resp.ContentLength()) @@ -51,7 +51,7 @@ func TestMeasureContentLengthJson(t *testing.T) { // go test -v -test.run TestMeasureContentLengthJsonNotPretty ...restful func TestMeasureContentLengthJsonNotPretty(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, false} + resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, false, nil} resp.WriteAsJson(food{"apple"}) if resp.ContentLength() != 16 { t.Errorf("Incorrect measured length:%d", resp.ContentLength()) @@ -61,7 +61,7 @@ func TestMeasureContentLengthJsonNotPretty(t *testing.T) { // go test -v -test.run TestMeasureContentLengthWriteErrorString ...restful func TestMeasureContentLengthWriteErrorString(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true} + resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp.WriteErrorString(404, "Invalid") if resp.ContentLength() != len("Invalid") { t.Errorf("Incorrect measured length:%d", resp.ContentLength()) @@ -79,7 +79,7 @@ func TestStatusIsPassedToResponse(t *testing.T) { {write: 400, read: 200}, } { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true} + resp := Response{httpWriter, "*/*", []string{"*/*"}, 0, 0, true, nil} resp.WriteHeader(each.write) if got, want := httpWriter.Code, each.read; got != want { t.Error("got %v want %v", got, want) @@ -90,7 +90,7 @@ func TestStatusIsPassedToResponse(t *testing.T) { // go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue54 ...restful func TestStatusCreatedAndContentTypeJson_Issue54(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true} + resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil} resp.WriteHeader(201) resp.WriteAsJson(food{"Juicy"}) if httpWriter.HeaderMap.Get("Content-Type") != "application/json" { @@ -112,7 +112,7 @@ func (e errorOnWriteRecorder) Write(bytes []byte) (int, error) { // go test -v -test.run TestLastWriteErrorCaught ...restful func TestLastWriteErrorCaught(t *testing.T) { httpWriter := errorOnWriteRecorder{httptest.NewRecorder()} - resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true} + resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil} err := resp.WriteAsJson(food{"Juicy"}) if err.Error() != "fail" { t.Errorf("Unexpected error message:%v", err) @@ -123,7 +123,7 @@ func TestLastWriteErrorCaught(t *testing.T) { func TestAcceptStarStar_Issue83(t *testing.T) { httpWriter := httptest.NewRecorder() // Accept Produces - resp := Response{httpWriter, "application/bogus,*/*;q=0.8", []string{"application/json"}, 0, 0, true} + resp := Response{httpWriter, "application/bogus,*/*;q=0.8", []string{"application/json"}, 0, 0, true, nil} resp.WriteEntity(food{"Juicy"}) ct := httpWriter.Header().Get("Content-Type") if "application/json" != ct { @@ -135,7 +135,7 @@ func TestAcceptStarStar_Issue83(t *testing.T) { func TestAcceptSkipStarStar_Issue83(t *testing.T) { httpWriter := httptest.NewRecorder() // Accept Produces - resp := Response{httpWriter, " application/xml ,*/* ; q=0.8", []string{"application/json", "application/xml"}, 0, 0, true} + resp := Response{httpWriter, " application/xml ,*/* ; q=0.8", []string{"application/json", "application/xml"}, 0, 0, true, nil} resp.WriteEntity(food{"Juicy"}) ct := httpWriter.Header().Get("Content-Type") if "application/xml" != ct { @@ -147,7 +147,7 @@ func TestAcceptSkipStarStar_Issue83(t *testing.T) { func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) { httpWriter := httptest.NewRecorder() // Accept Produces - resp := Response{httpWriter, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", []string{"application/json"}, 0, 0, true} + resp := Response{httpWriter, "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", []string{"application/json"}, 0, 0, true, nil} resp.WriteEntity(food{"Juicy"}) ct := httpWriter.Header().Get("Content-Type") if "application/json" != ct { @@ -158,7 +158,7 @@ func TestAcceptXmlBeforeStarStar_Issue83(t *testing.T) { // go test -v -test.run TestWriteHeaderNoContent_Issue124 ...restful func TestWriteHeaderNoContent_Issue124(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "text/plain", []string{"text/plain"}, 0, 0, true} + resp := Response{httpWriter, "text/plain", []string{"text/plain"}, 0, 0, true, nil} resp.WriteHeader(http.StatusNoContent) if httpWriter.Code != http.StatusNoContent { t.Errorf("got %d want %d", httpWriter.Code, http.StatusNoContent) @@ -168,7 +168,7 @@ func TestWriteHeaderNoContent_Issue124(t *testing.T) { // go test -v -test.run TestStatusCreatedAndContentTypeJson_Issue163 ...restful func TestStatusCreatedAndContentTypeJson_Issue163(t *testing.T) { httpWriter := httptest.NewRecorder() - resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true} + resp := Response{httpWriter, "application/json", []string{"application/json"}, 0, 0, true, nil} resp.WriteHeader(http.StatusNotModified) if httpWriter.Code != http.StatusNotModified { t.Errorf("Got %d want %d", httpWriter.Code, http.StatusNotModified) diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go index d8585b4f4ee..7237253bd14 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go @@ -173,7 +173,14 @@ func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Re } else { host = hostvalues[0] } - (&decl).BasePath = fmt.Sprintf("http://%s", host) + // inspect Referer for the scheme (http vs https) + scheme := "http" + if referer := req.Request.Header["Referer"]; len(referer) > 0 { + if strings.HasPrefix(referer[0], "https") { + scheme = "https" + } + } + (&decl).BasePath = fmt.Sprintf("%s://%s", scheme, host) } resp.WriteAsJson(decl) } From 06bbe006422bcd305224bedc7a57e0743b46a062 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Mon, 31 Aug 2015 11:19:47 -0400 Subject: [PATCH 032/101] Remove NamespaceExists from configuration --- cluster/aws/config-default.sh | 2 +- cluster/aws/config-test.sh | 2 +- cluster/azure/config-default.sh | 2 +- cluster/gce/config-default.sh | 2 +- cluster/gce/config-test.sh | 2 +- cluster/mesos/docker/docker-compose.yml | 2 +- cluster/ubuntu/config-default.sh | 2 +- cluster/vagrant/config-default.sh | 2 +- docs/admin/high-availability/kube-apiserver.yaml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index ad530ed7cc0..aca9cb13aea 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -87,7 +87,7 @@ DNS_REPLICAS=1 ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 869640d7589..9bce781d45b 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -83,7 +83,7 @@ DNS_REPLICAS=1 ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! diff --git a/cluster/azure/config-default.sh b/cluster/azure/config-default.sh index 6dd8bd0ff60..aa86c0c6790 100644 --- a/cluster/azure/config-default.sh +++ b/cluster/azure/config-default.sh @@ -55,4 +55,4 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}" # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 7e62500c6c5..c0a18753c08 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -96,7 +96,7 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then fi # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: if set to true kube-up will automatically check for existing resources and clean them up. KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false} diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 4f3930caf34..9cb958ed8ac 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -100,7 +100,7 @@ if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}" fi -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: if set to true kube-up will automatically check for existing resources and clean them up. KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false} diff --git a/cluster/mesos/docker/docker-compose.yml b/cluster/mesos/docker/docker-compose.yml index fdc48a7bf37..94dd95d8d1a 100644 --- a/cluster/mesos/docker/docker-compose.yml +++ b/cluster/mesos/docker/docker-compose.yml @@ -89,7 +89,7 @@ apiserver: --external-hostname=apiserver --etcd-servers=http://etcd:4001 --port=8888 - --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota --authorization-mode=AlwaysAllow --token-auth-file=/var/run/kubernetes/auth/token-users --basic-auth-file=/var/run/kubernetes/auth/basic-users diff --git a/cluster/ubuntu/config-default.sh b/cluster/ubuntu/config-default.sh index ec63f7650fb..a6052108369 100755 --- a/cluster/ubuntu/config-default.sh +++ b/cluster/ubuntu/config-default.sh @@ -35,7 +35,7 @@ export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # f export FLANNEL_NET=${FLANNEL_NET:-172.16.0.0/16} # Admission Controllers to invoke prior to persisting objects in cluster -export ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny +export ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny SERVICE_NODE_PORT_RANGE=${SERVICE_NODE_PORT_RANGE:-"30000-32767"} diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index 0bd26367cd1..12793fef556 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -53,7 +53,7 @@ MASTER_USER=vagrant MASTER_PASSWD=vagrant # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: Enable node logging. ENABLE_NODE_LOGGING=false diff --git a/docs/admin/high-availability/kube-apiserver.yaml b/docs/admin/high-availability/kube-apiserver.yaml index f5081925aa3..33d5cff5cdc 100644 --- a/docs/admin/high-availability/kube-apiserver.yaml +++ b/docs/admin/high-availability/kube-apiserver.yaml @@ -11,7 +11,7 @@ spec: - /bin/sh - -c - /usr/local/bin/kube-apiserver --address=127.0.0.1 --etcd-servers=http://127.0.0.1:4001 - --cloud-provider=gce --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + --cloud-provider=gce --admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota --service-cluster-ip-range=10.0.0.0/16 --client-ca-file=/srv/kubernetes/ca.crt --basic-auth-file=/srv/kubernetes/basic_auth.csv --cluster-name=e2e-test-bburns --tls-cert-file=/srv/kubernetes/server.cert --tls-private-key-file=/srv/kubernetes/server.key From 5dc74e8dbff71ad34e2561e005752487fe5b1728 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Tue, 1 Sep 2015 09:27:01 -0400 Subject: [PATCH 033/101] Add support for CFS quota in kubelet --- cmd/kubelet/app/server.go | 10 ++++- contrib/mesos/pkg/executor/service/service.go | 2 + hack/verify-flags/known-flags.txt | 1 + pkg/kubelet/dockertools/docker.go | 25 ++++++++++++ pkg/kubelet/dockertools/docker_test.go | 40 +++++++++++++++++++ pkg/kubelet/dockertools/fake_manager.go | 2 +- pkg/kubelet/dockertools/manager.go | 17 +++++++- pkg/kubelet/kubelet.go | 10 ++++- 8 files changed, 101 insertions(+), 6 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index f0a75f556ab..3eb671c148f 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -124,7 +124,7 @@ type KubeletServer struct { MaxPods int DockerExecHandlerName string ResolverConfig string - + CPUCFSQuota bool // Flags intended for testing // Crash immediately, rather than eating panics. @@ -189,6 +189,7 @@ func NewKubeletServer() *KubeletServer { SystemContainer: "", ConfigureCBR0: false, DockerExecHandlerName: "native", + CPUCFSQuota: false, } } @@ -255,6 +256,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.") fs.StringVar(&s.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.") fs.StringVar(&s.ResolverConfig, "resolv-conf", kubelet.ResolvConfDefault, "Resolver configuration file used as the basis for the container DNS resolution configuration.") + fs.BoolVar(&s.CPUCFSQuota, "cpu-cfs-quota", s.CPUCFSQuota, "Enable CPU CFS quota enforcement for containers that specify CPU limits") // Flags intended for testing, not recommended used in production environments. fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.") fs.Float64Var(&s.ChaosChance, "chaos-chance", s.ChaosChance, "If > 0.0, introduce random client errors and latency. Intended for testing. [default=0.0]") @@ -362,6 +364,7 @@ func (s *KubeletServer) KubeletConfig() (*KubeletConfig, error) { MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, ResolverConfig: s.ResolverConfig, + CPUCFSQuota: s.CPUCFSQuota, }, nil } @@ -604,6 +607,7 @@ func SimpleKubelet(client *client.Client, MaxPods: 32, DockerExecHandler: &dockertools.NativeExecHandler{}, ResolverConfig: kubelet.ResolvConfDefault, + CPUCFSQuota: false, } return &kcfg } @@ -774,6 +778,7 @@ type KubeletConfig struct { MaxPods int DockerExecHandler dockertools.ExecHandler ResolverConfig string + CPUCFSQuota bool } func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) { @@ -833,7 +838,8 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod kc.PodCIDR, kc.MaxPods, kc.DockerExecHandler, - kc.ResolverConfig) + kc.ResolverConfig, + kc.CPUCFSQuota) if err != nil { return nil, nil, err diff --git a/contrib/mesos/pkg/executor/service/service.go b/contrib/mesos/pkg/executor/service/service.go index a026ba7314f..522846c2d8e 100644 --- a/contrib/mesos/pkg/executor/service/service.go +++ b/contrib/mesos/pkg/executor/service/service.go @@ -262,6 +262,7 @@ func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error { MaxPods: s.MaxPods, DockerExecHandler: dockerExecHandler, ResolverConfig: s.ResolverConfig, + CPUCFSQuota: s.CPUCFSQuota, } kcfg.NodeName = kcfg.Hostname @@ -364,6 +365,7 @@ func (ks *KubeletExecutorServer) createAndInitKubelet( kc.MaxPods, kc.DockerExecHandler, kc.ResolverConfig, + kc.CPUCFSQuota, ) if err != nil { return nil, nil, err diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 71a9127a3ce..6aa889faac7 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -264,3 +264,4 @@ whitelist-override-label www-prefix retry_time file_content_in_loop +cpu-cfs-quota \ No newline at end of file diff --git a/pkg/kubelet/dockertools/docker.go b/pkg/kubelet/dockertools/docker.go index 15bbde1df76..d96c201641e 100644 --- a/pkg/kubelet/dockertools/docker.go +++ b/pkg/kubelet/dockertools/docker.go @@ -50,6 +50,9 @@ const ( minShares = 2 sharesPerCPU = 1024 milliCPUToCPU = 1000 + + // 100000 is equivalent to 100ms + quotaPeriod = 100000 ) // DockerInterface is an abstract interface for testability. It abstracts the interface of docker.Client. @@ -306,6 +309,28 @@ func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface { return client } +// milliCPUToQuota converts milliCPU to CFS quota and period values +func milliCPUToQuota(milliCPU int64) (quota int64, period int64) { + // CFS quota is measured in two values: + // - cfs_period_us=100ms (the amount of time to measure usage across) + // - cfs_quota=20ms (the amount of cpu time allowed to be used across a period) + // so in the above example, you are limited to 20% of a single CPU + // for multi-cpu environments, you just scale equivalent amounts + + if milliCPU == 0 { + // take the default behavior from docker + return + } + + // we set the period to 100ms by default + period = quotaPeriod + + // we then convert your milliCPU to a value normalized over a period + quota = (milliCPU * quotaPeriod) / milliCPUToCPU + + return +} + func milliCPUToShares(milliCPU int64) int64 { if milliCPU == 0 { // Docker converts zero milliCPU to unset, which maps to kernel default diff --git a/pkg/kubelet/dockertools/docker_test.go b/pkg/kubelet/dockertools/docker_test.go index 22161b13d0c..760b8f63a5c 100644 --- a/pkg/kubelet/dockertools/docker_test.go +++ b/pkg/kubelet/dockertools/docker_test.go @@ -737,3 +737,43 @@ func TestMakePortsAndBindings(t *testing.T) { } } } + +func TestMilliCPUToQuota(t *testing.T) { + testCases := []struct { + input int64 + quota int64 + period int64 + }{ + { + input: int64(0), + quota: int64(0), + period: int64(0), + }, + { + input: int64(200), + quota: int64(20000), + period: int64(100000), + }, + { + input: int64(500), + quota: int64(50000), + period: int64(100000), + }, + { + input: int64(1000), + quota: int64(100000), + period: int64(100000), + }, + { + input: int64(1500), + quota: int64(150000), + period: int64(100000), + }, + } + for _, testCase := range testCases { + quota, period := milliCPUToQuota(testCase.input) + if quota != testCase.quota || period != testCase.period { + t.Errorf("Input %v, expected quota %v period %v, but got quota %v period %v", testCase.input, testCase.quota, testCase.period, quota, period) + } + } +} diff --git a/pkg/kubelet/dockertools/fake_manager.go b/pkg/kubelet/dockertools/fake_manager.go index 3bb0a3cd961..03bf8a6aec3 100644 --- a/pkg/kubelet/dockertools/fake_manager.go +++ b/pkg/kubelet/dockertools/fake_manager.go @@ -46,7 +46,7 @@ func NewFakeDockerManager( fakeProcFs := procfs.NewFakeProcFs() dm := NewDockerManager(client, recorder, readinessManager, containerRefManager, machineInfo, podInfraContainerImage, qps, burst, containerLogsDir, osInterface, networkPlugin, generator, httpClient, &NativeExecHandler{}, - fakeOomAdjuster, fakeProcFs) + fakeOomAdjuster, fakeProcFs, false) dm.dockerPuller = &FakeDockerPuller{} dm.prober = prober.New(nil, readinessManager, containerRefManager, recorder) return dm diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index ef7ea29beb9..feb42fc8786 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -132,6 +132,9 @@ type DockerManager struct { // Get information from /proc mount. procFs procfs.ProcFsInterface + + // If true, enforce container cpu limits with CFS quota support + cpuCFSQuota bool } func NewDockerManager( @@ -150,7 +153,8 @@ func NewDockerManager( httpClient kubeletTypes.HttpGetter, execHandler ExecHandler, oomAdjuster *oom.OomAdjuster, - procFs procfs.ProcFsInterface) *DockerManager { + procFs procfs.ProcFsInterface, + cpuCFSQuota bool) *DockerManager { // Work out the location of the Docker runtime, defaulting to /var/lib/docker // if there are any problems. dockerRoot := "/var/lib/docker" @@ -201,6 +205,7 @@ func NewDockerManager( execHandler: execHandler, oomAdjuster: oomAdjuster, procFs: procFs, + cpuCFSQuota: cpuCFSQuota, } dm.runner = lifecycle.NewHandlerRunner(httpClient, dm, dm) dm.prober = prober.New(dm, readinessManager, containerRefManager, recorder) @@ -673,6 +678,7 @@ func (dm *DockerManager) runContainer( // of CPU shares. cpuShares = milliCPUToShares(cpuRequest.MilliValue()) } + _, containerName := BuildDockerName(dockerName, container) dockerOpts := docker.CreateContainerOptions{ Name: containerName, @@ -742,6 +748,15 @@ func (dm *DockerManager) runContainer( MemorySwap: -1, CPUShares: cpuShares, } + + if dm.cpuCFSQuota { + // if cpuLimit.Amount is nil, then the appropriate default value is returned to allow full usage of cpu resource. + cpuQuota, cpuPeriod := milliCPUToQuota(cpuLimit.MilliValue()) + + hc.CPUQuota = cpuQuota + hc.CPUPeriod = cpuPeriod + } + if len(opts.DNS) > 0 { hc.DNS = opts.DNS } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 5c49ddc8f48..a91466ba461 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -164,7 +164,8 @@ func NewMainKubelet( podCIDR string, pods int, dockerExecHandler dockertools.ExecHandler, - resolverConfig string) (*Kubelet, error) { + resolverConfig string, + cpuCFSQuota bool) (*Kubelet, error) { if rootDirectory == "" { return nil, fmt.Errorf("invalid root directory %q", rootDirectory) } @@ -281,6 +282,7 @@ func NewMainKubelet( pods: pods, syncLoopMonitor: util.AtomicValue{}, resolverConfig: resolverConfig, + cpuCFSQuota: cpuCFSQuota, } if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil { @@ -317,7 +319,8 @@ func NewMainKubelet( klet.httpClient, dockerExecHandler, oomAdjuster, - procFs) + procFs, + klet.cpuCFSQuota) case "rkt": conf := &rkt.Config{ Path: rktPath, @@ -556,6 +559,9 @@ type Kubelet struct { // Optionally shape the bandwidth of a pod shaper bandwidth.BandwidthShaper + + // True if container cpu limits should be enforced via cgroup CFS quota + cpuCFSQuota bool } // getRootDir returns the full path to the directory under which kubelet can From aff9ee5a40940fc0d2a9c5d4bc1ffca9c7643820 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Tue, 1 Sep 2015 10:22:15 -0400 Subject: [PATCH 034/101] Enable CFS quota in vagrant setup --- cluster/saltbase/salt/kubelet/default | 7 ++++++- cluster/vagrant/config-default.sh | 3 +++ cluster/vagrant/provision-master.sh | 1 + cluster/vagrant/util.sh | 1 + 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 0962711d3a0..9d228f39620 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -97,10 +97,15 @@ {% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %} {% endif %} +{% set cpu_cfs_quota = "" %} +{% if pillar['enable_cpu_cfs_quota'] is defined -%} + {% set cpu_cfs_quota = "--cpu-cfs-quota=" + pillar['enable_cpu_cfs_quota'] -%} +{% endif -%} + {% set test_args = "" -%} {% if pillar['kubelet_test_args'] is defined -%} {% set test_args=pillar['kubelet_test_args'] %} {% endif -%} # test_args has to be kept at the end, so they'll overwrite any prior configuration -DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{test_args}}" +DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{cpu_cfs_quota}} {{test_args}}" diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index 0bd26367cd1..598a0556e84 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -76,6 +76,9 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" #EXTRA_DOCKER_OPTS="-b=cbr0 --selinux-enabled --insecure-registry 10.0.0.0/8" EXTRA_DOCKER_OPTS="-b=cbr0 --insecure-registry 10.0.0.0/8" +# Flag to tell the kubelet to enable CFS quota support +ENABLE_CPU_CFS_QUOTA="${KUBE_ENABLE_CPU_CFS_QUOTA:-true}" + # Optional: Install cluster DNS. ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}" DNS_SERVER_IP="10.247.0.10" diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index ac2cbc69329..d94be740054 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -126,6 +126,7 @@ cat </srv/salt-overlay/pillar/cluster-params.sls dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")' instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' + enable_cpu_cfs_quota: '$(echo "$ENABLE_CPU_CFS_QUOTA" | sed -e "s/'/''/g")' EOF # Configure the salt-master diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 39cbc883c4c..f0803411d37 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -153,6 +153,7 @@ function create-provision-scripts { echo "KUBELET_TOKEN='${KUBELET_TOKEN:-}'" echo "KUBE_PROXY_TOKEN='${KUBE_PROXY_TOKEN:-}'" echo "MASTER_EXTRA_SANS='${MASTER_EXTRA_SANS:-}'" + echo "ENABLE_CPU_CFS_QUOTA='${ENABLE_CPU_CFS_QUOTA}'" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network.sh" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" ) > "${KUBE_TEMP}/master-start.sh" From cb65cfa746830f025301395f3f037479c8542f3e Mon Sep 17 00:00:00 2001 From: Jeff Lowdermilk Date: Tue, 1 Sep 2015 16:36:55 -0700 Subject: [PATCH 035/101] Print recognized file extensions in resource builder error --- contrib/completions/bash/kubectl | 48 ++++++++++++++++---------------- pkg/kubectl/bash_comp_utils.go | 10 +++++-- pkg/kubectl/resource/builder.go | 6 ++-- 3 files changed, 36 insertions(+), 28 deletions(-) diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index c096937e22c..d9caa16b818 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -256,10 +256,10 @@ _kubectl_get() flags+=("--all-namespaces") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--label-columns=") @@ -312,10 +312,10 @@ _kubectl_describe() flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--selector=") @@ -349,10 +349,10 @@ _kubectl_create() flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--output=") @@ -378,10 +378,10 @@ _kubectl_replace() flags+=("--cascade") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--force") flags+=("--grace-period=") flags+=("--help") @@ -409,10 +409,10 @@ _kubectl_patch() flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--output=") @@ -440,10 +440,10 @@ _kubectl_delete() flags+=("--cascade") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--grace-period=") flags+=("--help") flags+=("-h") @@ -530,10 +530,10 @@ _kubectl_rolling-update() flags+=("--dry-run") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--image=") @@ -572,10 +572,10 @@ _kubectl_scale() flags+=("--current-replicas=") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--output=") @@ -743,10 +743,10 @@ _kubectl_stop() flags+=("--all") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--grace-period=") flags+=("--help") flags+=("-h") @@ -777,10 +777,10 @@ _kubectl_expose() flags+=("--external-ip=") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--generator=") flags+=("--help") flags+=("-h") @@ -822,10 +822,10 @@ _kubectl_label() flags+=("--all") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--no-headers") @@ -876,10 +876,10 @@ _kubectl_annotate() flags+=("--all") flags+=("--filename=") flags_with_completion+=("--filename") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") two_word_flags+=("-f") flags_with_completion+=("-f") - flags_completion+=("__handle_filename_extension_flag json|yaml|yml") + flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--help") flags+=("-h") flags+=("--overwrite") diff --git a/pkg/kubectl/bash_comp_utils.go b/pkg/kubectl/bash_comp_utils.go index bf480bd05d9..a89fef07a06 100644 --- a/pkg/kubectl/bash_comp_utils.go +++ b/pkg/kubectl/bash_comp_utils.go @@ -19,12 +19,18 @@ limitations under the License. package kubectl import ( + "strings" + "github.com/spf13/cobra" + + "k8s.io/kubernetes/pkg/kubectl/resource" ) func AddJsonFilenameFlag(cmd *cobra.Command, value *[]string, usage string) { cmd.Flags().StringSliceVarP(value, "filename", "f", *value, usage) - - annotations := []string{"json", "yaml", "yml"} + annotations := []string{} + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } cmd.Flags().SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) } diff --git a/pkg/kubectl/resource/builder.go b/pkg/kubectl/resource/builder.go index 835fd2225a1..4386ed8d723 100644 --- a/pkg/kubectl/resource/builder.go +++ b/pkg/kubectl/resource/builder.go @@ -32,6 +32,8 @@ import ( "k8s.io/kubernetes/pkg/util/errors" ) +var FileExtensions = []string{".json", ".stdin", ".yaml", ".yml"} + // Builder provides convenience functions for taking arguments and parameters // from the command line and converting them to a list of resources to iterate // over using the Visitor interface. @@ -164,7 +166,7 @@ func (b *Builder) Path(paths ...string) *Builder { continue } - visitors, err := ExpandPathsToFileVisitors(b.mapper, p, false, []string{".json", ".stdin", ".yaml", ".yml"}, b.schema) + visitors, err := ExpandPathsToFileVisitors(b.mapper, p, false, FileExtensions, b.schema) if err != nil { b.errs = append(b.errs, fmt.Errorf("error reading %q: %v", p, err)) } @@ -646,7 +648,7 @@ func (b *Builder) visitorResult() *Result { return &Result{singular: singular, visitor: visitors, sources: b.paths} } - return &Result{err: fmt.Errorf("you must provide one or more resources by argument or filename")} + return &Result{err: fmt.Errorf("you must provide one or more resources by argument or filename (%s)", strings.Join(FileExtensions, "|"))} } // Do returns a Result object with a Visitor for the resources identified by the Builder. From 2e76842eb7baa5382058e7b11a8f314552af58f8 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Thu, 13 Aug 2015 10:19:27 -0400 Subject: [PATCH 036/101] Update resource quota for requests --- pkg/api/resource/quantity.go | 8 + .../resource_quota_controller.go | 110 +++-- .../resource_quota_controller_test.go | 231 ++++++++--- .../pkg/admission/resourcequota/admission.go | 99 +++-- .../admission/resourcequota/admission_test.go | 389 ++++++------------ 5 files changed, 444 insertions(+), 393 deletions(-) diff --git a/pkg/api/resource/quantity.go b/pkg/api/resource/quantity.go index 2b42e040fcc..ef8eaef9e6b 100644 --- a/pkg/api/resource/quantity.go +++ b/pkg/api/resource/quantity.go @@ -309,6 +309,14 @@ func (q *Quantity) Add(y Quantity) error { return nil } +func (q *Quantity) Sub(y Quantity) error { + if q.Format != y.Format { + return fmt.Errorf("format mismatch: %v vs. %v", q.Format, y.Format) + } + q.Amount.Sub(q.Amount, y.Amount) + return nil +} + // MarshalJSON implements the json.Marshaller interface. func (q Quantity) MarshalJSON() ([]byte, error) { return []byte(`"` + q.String() + `"`), nil diff --git a/pkg/controller/resourcequota/resource_quota_controller.go b/pkg/controller/resourcequota/resource_quota_controller.go index 3b92d2cd607..9d319aacaf8 100644 --- a/pkg/controller/resourcequota/resource_quota_controller.go +++ b/pkg/controller/resourcequota/resource_quota_controller.go @@ -17,6 +17,7 @@ limitations under the License. package resourcequotacontroller import ( + "fmt" "time" "github.com/golang/glog" @@ -163,18 +164,6 @@ func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e switch k { case api.ResourcePods: value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI) - case api.ResourceMemory: - val := int64(0) - for _, pod := range filteredPods { - val = val + PodMemory(pod).Value() - } - value = resource.NewQuantity(int64(val), resource.DecimalSI) - case api.ResourceCPU: - val := int64(0) - for _, pod := range filteredPods { - val = val + PodCPU(pod).MilliValue() - } - value = resource.NewMilliQuantity(int64(val), resource.DecimalSI) case api.ResourceServices: items, err := rm.kubeClient.Services(usage.Namespace).List(labels.Everything()) if err != nil { @@ -205,6 +194,10 @@ func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) + case api.ResourceMemory: + value = PodsRequests(filteredPods, api.ResourceMemory) + case api.ResourceCPU: + value = PodsRequests(filteredPods, api.ResourceCPU) } // ignore fields we do not understand (assume another controller is tracking it) @@ -224,7 +217,73 @@ func (rm *ResourceQuotaController) syncResourceQuota(quota api.ResourceQuota) (e return nil } -// PodCPU computes total cpu usage of a pod +// PodsRequests returns sum of each resource request for each pod in list +// If a given pod in the list does not have a request for the named resource, we log the error +// but still attempt to get the most representative count +func PodsRequests(pods []*api.Pod, resourceName api.ResourceName) *resource.Quantity { + var sum *resource.Quantity + for i := range pods { + pod := pods[i] + podQuantity, err := PodRequests(pod, resourceName) + if err != nil { + // log the error, but try to keep the most accurate count possible in log + // rationale here is that you may have had pods in a namespace that did not have + // explicit requests prior to adding the quota + glog.Infof("No explicit request for resource, pod %s/%s, %s", pod.Namespace, pod.Name, resourceName) + } else { + if sum == nil { + sum = podQuantity + } else { + sum.Add(*podQuantity) + } + } + } + // if list is empty + if sum == nil { + q := resource.MustParse("0") + sum = &q + } + return sum +} + +// PodRequests returns sum of each resource request across all containers in pod +func PodRequests(pod *api.Pod, resourceName api.ResourceName) (*resource.Quantity, error) { + if !PodHasRequests(pod, resourceName) { + return nil, fmt.Errorf("Each container in pod %s/%s does not have an explicit request for resource %s.", pod.Namespace, pod.Name, resourceName) + } + var sum *resource.Quantity + for j := range pod.Spec.Containers { + value, _ := pod.Spec.Containers[j].Resources.Requests[resourceName] + if sum == nil { + sum = value.Copy() + } else { + err := sum.Add(value) + if err != nil { + return sum, err + } + } + } + // if list is empty + if sum == nil { + q := resource.MustParse("0") + sum = &q + } + return sum, nil +} + +// PodHasRequests verifies that each container in the pod has an explicit request that is non-zero for a named resource +func PodHasRequests(pod *api.Pod, resourceName api.ResourceName) bool { + for j := range pod.Spec.Containers { + value, valueSet := pod.Spec.Containers[j].Resources.Requests[resourceName] + if !valueSet || value.Value() == int64(0) { + return false + } + } + return true +} + +// PodCPU computes total cpu limit across all containers in pod +// TODO: Remove this once the mesos scheduler becomes request aware func PodCPU(pod *api.Pod) *resource.Quantity { val := int64(0) for j := range pod.Spec.Containers { @@ -233,29 +292,8 @@ func PodCPU(pod *api.Pod) *resource.Quantity { return resource.NewMilliQuantity(int64(val), resource.DecimalSI) } -// IsPodCPUUnbounded returns true if the cpu use is unbounded for any container in pod -func IsPodCPUUnbounded(pod *api.Pod) bool { - for j := range pod.Spec.Containers { - container := pod.Spec.Containers[j] - if container.Resources.Limits.Cpu().MilliValue() == int64(0) { - return true - } - } - return false -} - -// IsPodMemoryUnbounded returns true if the memory use is unbounded for any container in pod -func IsPodMemoryUnbounded(pod *api.Pod) bool { - for j := range pod.Spec.Containers { - container := pod.Spec.Containers[j] - if container.Resources.Limits.Memory().Value() == int64(0) { - return true - } - } - return false -} - -// PodMemory computes the memory usage of a pod +// PodMemory computes total memory limit across all containers in a pod +// TODO: Remove this once the mesos scheduler becomes request aware func PodMemory(pod *api.Pod) *resource.Quantity { val := int64(0) for j := range pod.Spec.Containers { diff --git a/pkg/controller/resourcequota/resource_quota_controller_test.go b/pkg/controller/resourcequota/resource_quota_controller_test.go index 20d0677a21c..346d6bdf438 100644 --- a/pkg/controller/resourcequota/resource_quota_controller_test.go +++ b/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -17,6 +17,7 @@ limitations under the License. package resourcequotacontroller import ( + "strconv" "testing" "k8s.io/kubernetes/pkg/api" @@ -25,19 +26,39 @@ import ( "k8s.io/kubernetes/pkg/util" ) -func getResourceRequirements(cpu, memory string) api.ResourceRequirements { - res := api.ResourceRequirements{} - res.Limits = api.ResourceList{} +func getResourceList(cpu, memory string) api.ResourceList { + res := api.ResourceList{} if cpu != "" { - res.Limits[api.ResourceCPU] = resource.MustParse(cpu) + res[api.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { - res.Limits[api.ResourceMemory] = resource.MustParse(memory) + res[api.ResourceMemory] = resource.MustParse(memory) } - return res } +func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { + res := api.ResourceRequirements{} + res.Requests = requests + res.Limits = limits + return res +} + +func validPod(name string, numContainers int, resources api.ResourceRequirements) *api.Pod { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"}, + Spec: api.PodSpec{}, + } + pod.Spec.Containers = make([]api.Container, 0, numContainers) + for i := 0; i < numContainers; i++ { + pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ + Image: "foo:V" + strconv.Itoa(i), + Resources: resources, + }) + } + return pod +} + func TestFilterQuotaPods(t *testing.T) { pods := []api.Pod{ { @@ -105,7 +126,7 @@ func TestSyncResourceQuota(t *testing.T) { Status: api.PodStatus{Phase: api.PodRunning}, Spec: api.PodSpec{ Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, + Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, }, }, { @@ -113,7 +134,7 @@ func TestSyncResourceQuota(t *testing.T) { Status: api.PodStatus{Phase: api.PodRunning}, Spec: api.PodSpec{ Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, + Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, }, }, { @@ -121,7 +142,7 @@ func TestSyncResourceQuota(t *testing.T) { Status: api.PodStatus{Phase: api.PodFailed}, Spec: api.PodSpec{ Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, + Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, }, }, }, @@ -144,7 +165,7 @@ func TestSyncResourceQuota(t *testing.T) { }, Used: api.ResourceList{ api.ResourceCPU: resource.MustParse("200m"), - api.ResourceMemory: resource.MustParse("2147483648"), + api.ResourceMemory: resource.MustParse("2Gi"), api.ResourcePods: resource.MustParse("2"), }, }, @@ -177,7 +198,6 @@ func TestSyncResourceQuota(t *testing.T) { t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) } } - } func TestSyncResourceQuotaSpecChange(t *testing.T) { @@ -269,62 +289,151 @@ func TestSyncResourceQuotaNoChange(t *testing.T) { } } -func TestIsPodCPUUnbounded(t *testing.T) { - pod := api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "pod-running"}, - Status: api.PodStatus{Phase: api.PodRunning}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "0")}}, +func TestPodHasRequests(t *testing.T) { + type testCase struct { + pod *api.Pod + resourceName api.ResourceName + expectedResult bool + } + testCases := []testCase{ + { + pod: validPod("request-cpu", 2, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + expectedResult: true, + }, + { + pod: validPod("no-request-cpu", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + expectedResult: false, + }, + { + pod: validPod("request-zero-cpu", 2, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + expectedResult: false, + }, + { + pod: validPod("request-memory", 2, getResourceRequirements(getResourceList("", "2Mi"), getResourceList("", ""))), + resourceName: api.ResourceMemory, + expectedResult: true, + }, + { + pod: validPod("no-request-memory", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + resourceName: api.ResourceMemory, + expectedResult: false, + }, + { + pod: validPod("request-zero-memory", 2, getResourceRequirements(getResourceList("", "0"), getResourceList("", ""))), + resourceName: api.ResourceMemory, + expectedResult: false, }, } - if IsPodCPUUnbounded(&pod) { - t.Errorf("Expected false") - } - pod = api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "pod-running"}, - Status: api.PodStatus{Phase: api.PodRunning}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}}, - }, - } - if !IsPodCPUUnbounded(&pod) { - t.Errorf("Expected true") - } - - pod.Spec.Containers[0].Resources = api.ResourceRequirements{} - if !IsPodCPUUnbounded(&pod) { - t.Errorf("Expected true") + for _, item := range testCases { + if actual := PodHasRequests(item.pod, item.resourceName); item.expectedResult != actual { + t.Errorf("Pod %s for resource %s expected %v actual %v", item.pod.Name, item.resourceName, item.expectedResult, actual) + } } } -func TestIsPodMemoryUnbounded(t *testing.T) { - pod := api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "pod-running"}, - Status: api.PodStatus{Phase: api.PodRunning}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "1Gi")}}, +func TestPodRequests(t *testing.T) { + type testCase struct { + pod *api.Pod + resourceName api.ResourceName + expectedResult string + expectedError bool + } + testCases := []testCase{ + { + pod: validPod("request-cpu", 2, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + expectedResult: "200m", + expectedError: false, + }, + { + pod: validPod("no-request-cpu", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + expectedResult: "", + expectedError: true, + }, + { + pod: validPod("request-zero-cpu", 2, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + expectedResult: "", + expectedError: true, + }, + { + pod: validPod("request-memory", 2, getResourceRequirements(getResourceList("", "500Mi"), getResourceList("", ""))), + resourceName: api.ResourceMemory, + expectedResult: "1000Mi", + expectedError: false, + }, + { + pod: validPod("no-request-memory", 2, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + resourceName: api.ResourceMemory, + expectedResult: "", + expectedError: true, + }, + { + pod: validPod("request-zero-memory", 2, getResourceRequirements(getResourceList("", "0"), getResourceList("", ""))), + resourceName: api.ResourceMemory, + expectedResult: "", + expectedError: true, }, } - if IsPodMemoryUnbounded(&pod) { - t.Errorf("Expected false") - } - pod = api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "pod-running"}, - Status: api.PodStatus{Phase: api.PodRunning}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0", "0")}}, - }, - } - if !IsPodMemoryUnbounded(&pod) { - t.Errorf("Expected true") - } - - pod.Spec.Containers[0].Resources = api.ResourceRequirements{} - if !IsPodMemoryUnbounded(&pod) { - t.Errorf("Expected true") + for _, item := range testCases { + actual, err := PodRequests(item.pod, item.resourceName) + if item.expectedError != (err != nil) { + t.Errorf("Unexpected error result for pod %s for resource %s expected error %v got %v", item.pod.Name, item.resourceName, item.expectedError, err) + } + if item.expectedResult != "" && (item.expectedResult != actual.String()) { + t.Errorf("Expected %s, Actual %s, pod %s for resource %s", item.expectedResult, actual.String(), item.pod.Name, item.resourceName) + } + } +} + +func TestPodsRequests(t *testing.T) { + type testCase struct { + pods []*api.Pod + resourceName api.ResourceName + expectedResult string + } + testCases := []testCase{ + { + pods: []*api.Pod{ + validPod("request-cpu-1", 1, getResourceRequirements(getResourceList("100m", ""), getResourceList("", ""))), + validPod("request-cpu-2", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))), + }, + resourceName: api.ResourceCPU, + expectedResult: "1100m", + }, + { + pods: []*api.Pod{ + validPod("no-request-cpu-1", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + validPod("no-request-cpu-2", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + }, + resourceName: api.ResourceCPU, + expectedResult: "", + }, + { + pods: []*api.Pod{ + validPod("request-zero-cpu-1", 1, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))), + validPod("request-zero-cpu-1", 1, getResourceRequirements(getResourceList("0", ""), getResourceList("", ""))), + }, + resourceName: api.ResourceCPU, + expectedResult: "", + }, + { + pods: []*api.Pod{ + validPod("request-memory-1", 1, getResourceRequirements(getResourceList("", "500Mi"), getResourceList("", ""))), + validPod("request-memory-2", 1, getResourceRequirements(getResourceList("", "1Gi"), getResourceList("", ""))), + }, + resourceName: api.ResourceMemory, + expectedResult: "1524Mi", + }, + } + for _, item := range testCases { + actual := PodsRequests(item.pods, item.resourceName) + if item.expectedResult != "" && (item.expectedResult != actual.String()) { + t.Errorf("Expected %s, Actual %s, pod %s for resource %s", item.expectedResult, actual.String(), item.pods[0].Name, item.resourceName) + } } } diff --git a/plugin/pkg/admission/resourcequota/admission.go b/plugin/pkg/admission/resourcequota/admission.go index cfef57c4ab7..aba2d235f45 100644 --- a/plugin/pkg/admission/resourcequota/admission.go +++ b/plugin/pkg/admission/resourcequota/admission.go @@ -190,52 +190,69 @@ func IncrementUsage(a admission.Attributes, status *api.ResourceQuotaStatus, cli } } } - // handle memory/cpu constraints, and any diff of usage based on memory/cpu on updates - if a.GetResource() == "pods" && (set[api.ResourceMemory] || set[api.ResourceCPU]) { - pod := obj.(*api.Pod) - deltaCPU := resourcequotacontroller.PodCPU(pod) - deltaMemory := resourcequotacontroller.PodMemory(pod) - // if this is an update, we need to find the delta cpu/memory usage from previous state - if a.GetOperation() == admission.Update { - oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name) - if err != nil { - return false, err - } - oldCPU := resourcequotacontroller.PodCPU(oldPod) - oldMemory := resourcequotacontroller.PodMemory(oldPod) - deltaCPU = resource.NewMilliQuantity(deltaCPU.MilliValue()-oldCPU.MilliValue(), resource.DecimalSI) - deltaMemory = resource.NewQuantity(deltaMemory.Value()-oldMemory.Value(), resource.DecimalSI) - } - hardMem, hardMemFound := status.Hard[api.ResourceMemory] - if hardMemFound { - if set[api.ResourceMemory] && resourcequotacontroller.IsPodMemoryUnbounded(pod) { - return false, fmt.Errorf("Limited to %s memory, but pod has no specified memory limit", hardMem.String()) + if a.GetResource() == "pods" { + for _, resourceName := range []api.ResourceName{api.ResourceMemory, api.ResourceCPU} { + + // ignore tracking the resource if its not in the quota document + if !set[resourceName] { + continue } - used, usedFound := status.Used[api.ResourceMemory] + + hard, hardFound := status.Hard[resourceName] + if !hardFound { + continue + } + + // if we do not yet know how much of the current resource is used, we cannot accept any request + used, usedFound := status.Used[resourceName] if !usedFound { - return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") + return false, fmt.Errorf("Unable to admit pod until quota usage stats are calculated.") } - if used.Value()+deltaMemory.Value() > hardMem.Value() { - return false, fmt.Errorf("Limited to %s memory", hardMem.String()) + + // the amount of resource being requested, or an error if it does not make a request that is tracked + pod := obj.(*api.Pod) + delta, err := resourcequotacontroller.PodRequests(pod, resourceName) + + if err != nil { + return false, fmt.Errorf("Must make a non-zero request for %s since it is tracked by quota.", resourceName) + } + + // if this operation is an update, we need to find the delta usage from the previous state + if a.GetOperation() == admission.Update { + oldPod, err := client.Pods(a.GetNamespace()).Get(pod.Name) + if err != nil { + return false, err + } + + // if the previous version of the resource made a resource request, we need to subtract the old request + // from the current to get the actual resource request delta. if the previous version of the pod + // made no request on the resource, then we get an err value. we ignore the err value, and delta + // will just be equal to the total resource request on the pod since there is nothing to subtract. + oldRequest, err := resourcequotacontroller.PodRequests(oldPod, resourceName) + if err == nil { + err = delta.Sub(*oldRequest) + if err != nil { + return false, err + } + } + } + + newUsage := used.Copy() + newUsage.Add(*delta) + + // make the most precise comparison possible + newUsageValue := newUsage.Value() + hardUsageValue := hard.Value() + if newUsageValue <= resource.MaxMilliValue && hardUsageValue <= resource.MaxMilliValue { + newUsageValue = newUsage.MilliValue() + hardUsageValue = hard.MilliValue() + } + + if newUsageValue > hardUsageValue { + return false, fmt.Errorf("Unable to admit pod without exceeding quota for resource %s. Limited to %s but require %s to succeed.", resourceName, hard.String(), newUsage.String()) } else { - status.Used[api.ResourceMemory] = *resource.NewQuantity(used.Value()+deltaMemory.Value(), resource.DecimalSI) - dirty = true - } - } - hardCPU, hardCPUFound := status.Hard[api.ResourceCPU] - if hardCPUFound { - if set[api.ResourceCPU] && resourcequotacontroller.IsPodCPUUnbounded(pod) { - return false, fmt.Errorf("Limited to %s CPU, but pod has no specified cpu limit", hardCPU.String()) - } - used, usedFound := status.Used[api.ResourceCPU] - if !usedFound { - return false, fmt.Errorf("Quota usage stats are not yet known, unable to admit resource until an accurate count is completed.") - } - if used.MilliValue()+deltaCPU.MilliValue() > hardCPU.MilliValue() { - return false, fmt.Errorf("Limited to %s CPU", hardCPU.String()) - } else { - status.Used[api.ResourceCPU] = *resource.NewMilliQuantity(used.MilliValue()+deltaCPU.MilliValue(), resource.DecimalSI) + status.Used[resourceName] = *newUsage dirty = true } } diff --git a/plugin/pkg/admission/resourcequota/admission_test.go b/plugin/pkg/admission/resourcequota/admission_test.go index c066f31d81a..83c0efe05b1 100644 --- a/plugin/pkg/admission/resourcequota/admission_test.go +++ b/plugin/pkg/admission/resourcequota/admission_test.go @@ -17,6 +17,7 @@ limitations under the License. package resourcequota import ( + "strconv" "testing" "k8s.io/kubernetes/pkg/admission" @@ -24,21 +25,42 @@ import ( "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/client/unversioned/testclient" + "k8s.io/kubernetes/pkg/controller/resourcequota" ) -func getResourceRequirements(cpu, memory string) api.ResourceRequirements { - res := api.ResourceRequirements{} - res.Limits = api.ResourceList{} +func getResourceList(cpu, memory string) api.ResourceList { + res := api.ResourceList{} if cpu != "" { - res.Limits[api.ResourceCPU] = resource.MustParse(cpu) + res[api.ResourceCPU] = resource.MustParse(cpu) } if memory != "" { - res.Limits[api.ResourceMemory] = resource.MustParse(memory) + res[api.ResourceMemory] = resource.MustParse(memory) } - return res } +func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { + res := api.ResourceRequirements{} + res.Requests = requests + res.Limits = limits + return res +} + +func validPod(name string, numContainers int, resources api.ResourceRequirements) *api.Pod { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"}, + Spec: api.PodSpec{}, + } + pod.Spec.Containers = make([]api.Container, 0, numContainers) + for i := 0; i < numContainers; i++ { + pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ + Image: "foo:V" + strconv.Itoa(i), + Resources: resources, + }) + } + return pod +} + func TestAdmissionIgnoresDelete(t *testing.T) { namespace := "default" handler := createResourceQuota(&testclient.Fake{}, nil) @@ -64,38 +86,118 @@ func TestAdmissionIgnoresSubresources(t *testing.T) { indexer.Add(quota) - newPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: quota.Namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "2Gi")}}, - }} - - err := handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "123", "pods", "", admission.Create, nil)) + newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", ""))) + err := handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, newPod.Name, "pods", "", admission.Create, nil)) if err == nil { t.Errorf("Expected an error because the pod exceeded allowed quota") } - err = handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "123", "pods", "subresource", admission.Create, nil)) + err = handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, newPod.Name, "pods", "subresource", admission.Create, nil)) if err != nil { t.Errorf("Did not expect an error because the action went to a subresource: %v", err) } } -func TestIncrementUsagePods(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, +func TestIncrementUsagePodResources(t *testing.T) { + type testCase struct { + testName string + existing *api.Pod + input *api.Pod + resourceName api.ResourceName + hard resource.Quantity + expectedUsage resource.Quantity + expectedError bool + } + testCases := []testCase{ + { + testName: "memory-allowed", + existing: validPod("a", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))), + input: validPod("b", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))), + resourceName: api.ResourceMemory, + hard: resource.MustParse("500Mi"), + expectedUsage: resource.MustParse("200Mi"), + expectedError: false, }, - }) + { + testName: "memory-not-allowed", + existing: validPod("a", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))), + input: validPod("b", 1, getResourceRequirements(getResourceList("", "450Mi"), getResourceList("", ""))), + resourceName: api.ResourceMemory, + hard: resource.MustParse("500Mi"), + expectedError: true, + }, + { + testName: "memory-no-request", + existing: validPod("a", 1, getResourceRequirements(getResourceList("", "100Mi"), getResourceList("", ""))), + input: validPod("b", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + resourceName: api.ResourceMemory, + hard: resource.MustParse("500Mi"), + expectedError: true, + }, + { + testName: "cpu-allowed", + existing: validPod("a", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))), + input: validPod("b", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + hard: resource.MustParse("2"), + expectedUsage: resource.MustParse("2"), + expectedError: false, + }, + { + testName: "cpu-not-allowed", + existing: validPod("a", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))), + input: validPod("b", 1, getResourceRequirements(getResourceList("600m", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + hard: resource.MustParse("1500m"), + expectedError: true, + }, + { + testName: "cpu-no-request", + existing: validPod("a", 1, getResourceRequirements(getResourceList("1", ""), getResourceList("", ""))), + input: validPod("b", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", ""))), + resourceName: api.ResourceCPU, + hard: resource.MustParse("1500m"), + expectedError: true, + }, + } + for _, item := range testCases { + podList := &api.PodList{Items: []api.Pod{*item.existing}} + client := testclient.NewSimpleFake(podList) + status := &api.ResourceQuotaStatus{ + Hard: api.ResourceList{}, + Used: api.ResourceList{}, + } + used, err := resourcequotacontroller.PodRequests(item.existing, item.resourceName) + if err != nil { + t.Errorf("Test %s, unexpected error %v", item.testName, err) + } + status.Hard[item.resourceName] = item.hard + status.Used[item.resourceName] = *used + + dirty, err := IncrementUsage(admission.NewAttributesRecord(item.input, "Pod", item.input.Namespace, item.input.Name, "pods", "", admission.Create, nil), status, client) + if err == nil && item.expectedError { + t.Errorf("Test %s, expected error", item.testName) + } + if err != nil && !item.expectedError { + t.Errorf("Test %s, unexpected error", err) + } + if !item.expectedError { + if !dirty { + t.Errorf("Test %s, expected the quota to be dirty", item.testName) + } + quantity := status.Used[item.resourceName] + if quantity.String() != item.expectedUsage.String() { + t.Errorf("Test %s, expected usage %s, actual usage %s", item.testName, item.expectedUsage.String(), quantity.String()) + } + } + } +} + +func TestIncrementUsagePods(t *testing.T) { + pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))) + podList := &api.PodList{Items: []api.Pod{*pod}} + client := testclient.NewSimpleFake(podList) status := &api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, @@ -103,7 +205,7 @@ func TestIncrementUsagePods(t *testing.T) { r := api.ResourcePods status.Hard[r] = resource.MustParse("2") status.Used[r] = resource.MustParse("1") - dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client) + dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, "Pod", pod.Namespace, "new-pod", "pods", "", admission.Create, nil), status, client) if err != nil { t.Errorf("Unexpected error: %v", err) } @@ -116,233 +218,10 @@ func TestIncrementUsagePods(t *testing.T) { } } -func TestIncrementUsageMemory(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, - }, - }) - status := &api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, - } - r := api.ResourceMemory - status.Hard[r] = resource.MustParse("2Gi") - status.Used[r] = resource.MustParse("1Gi") - - newPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }} - dirty, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if !dirty { - t.Errorf("Expected the status to get incremented, therefore should have been dirty") - } - expectedVal := resource.MustParse("2Gi") - quantity := status.Used[r] - if quantity.Value() != expectedVal.Value() { - t.Errorf("Expected %v was %v", expectedVal.Value(), quantity.Value()) - } -} - -func TestExceedUsageMemory(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, - }, - }) - status := &api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, - } - r := api.ResourceMemory - status.Hard[r] = resource.MustParse("2Gi") - status.Used[r] = resource.MustParse("1Gi") - - newPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "3Gi")}}, - }} - _, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client) - if err == nil { - t.Errorf("Expected memory usage exceeded error") - } -} - -func TestIncrementUsageCPU(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, - }, - }) - status := &api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, - } - r := api.ResourceCPU - status.Hard[r] = resource.MustParse("200m") - status.Used[r] = resource.MustParse("100m") - - newPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }} - dirty, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if !dirty { - t.Errorf("Expected the status to get incremented, therefore should have been dirty") - } - expectedVal := resource.MustParse("200m") - quantity := status.Used[r] - if quantity.Value() != expectedVal.Value() { - t.Errorf("Expected %v was %v", expectedVal.Value(), quantity.Value()) - } -} - -func TestUnboundedCPU(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, - }, - }) - status := &api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, - } - r := api.ResourceCPU - status.Hard[r] = resource.MustParse("200m") - status.Used[r] = resource.MustParse("100m") - - newPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("0m", "1Gi")}}, - }} - _, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client) - if err == nil { - t.Errorf("Expected CPU unbounded usage error") - } -} - -func TestUnboundedMemory(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, - }, - }) - status := &api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, - } - r := api.ResourceMemory - status.Hard[r] = resource.MustParse("10Gi") - status.Used[r] = resource.MustParse("1Gi") - - newPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("250m", "0")}}, - }} - _, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client) - if err == nil { - t.Errorf("Expected memory unbounded usage error") - } -} - -func TestExceedUsageCPU(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, - }, - }) - status := &api.ResourceQuotaStatus{ - Hard: api.ResourceList{}, - Used: api.ResourceList{}, - } - r := api.ResourceCPU - status.Hard[r] = resource.MustParse("200m") - status.Used[r] = resource.MustParse("100m") - - newPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("500m", "1Gi")}}, - }} - _, err := IncrementUsage(admission.NewAttributesRecord(newPod, "Pod", namespace, newPod.Name, "pods", "", admission.Create, nil), status, client) - if err == nil { - t.Errorf("Expected CPU usage exceeded error") - } -} - func TestExceedUsagePods(t *testing.T) { - namespace := "default" - client := testclient.NewSimpleFake(&api.PodList{ - Items: []api.Pod{ - { - ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace}, - Spec: api.PodSpec{ - Volumes: []api.Volume{{Name: "vol"}}, - Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "1Gi")}}, - }, - }, - }, - }) + pod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))) + podList := &api.PodList{Items: []api.Pod{*pod}} + client := testclient.NewSimpleFake(podList) status := &api.ResourceQuotaStatus{ Hard: api.ResourceList{}, Used: api.ResourceList{}, @@ -350,7 +229,7 @@ func TestExceedUsagePods(t *testing.T) { r := api.ResourcePods status.Hard[r] = resource.MustParse("1") status.Used[r] = resource.MustParse("1") - _, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, "Pod", namespace, "name", "pods", "", admission.Create, nil), status, client) + _, err := IncrementUsage(admission.NewAttributesRecord(&api.Pod{}, "Pod", pod.Namespace, "name", "pods", "", admission.Create, nil), status, client) if err == nil { t.Errorf("Expected error because this would exceed your quota") } From 9ca9ab1b66a2d37e3c13e771047cfa65e7970321 Mon Sep 17 00:00:00 2001 From: Sami Wagiaalla Date: Fri, 26 Jun 2015 14:51:38 -0400 Subject: [PATCH 037/101] iSCSI Plugin: Remove remounting of device in global path. This code was originally added because the first mount call did not respect the ro option. This no longer seems to be the cause so there is no need to use remount. Signed-off-by: Sami Wagiaalla --- pkg/volume/iscsi/iscsi.go | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 10adc7f0683..ef9dca56fb5 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -165,16 +165,8 @@ func (b *iscsiDiskBuilder) SetUpAt(dir string) error { err := diskSetUp(b.manager, *b, dir, b.mounter) if err != nil { glog.Errorf("iscsi: failed to setup") - return err } - globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) - var options []string - if b.readOnly { - options = []string{"remount", "ro"} - } else { - options = []string{"remount", "rw"} - } - return b.mounter.Mount(globalPDPath, dir, "", options) + return err } type iscsiDiskCleaner struct { From 124929e373b638721d80c6f29e81c4e16adb07c7 Mon Sep 17 00:00:00 2001 From: James DeFelice Date: Fri, 28 Aug 2015 07:19:54 +0000 Subject: [PATCH 038/101] refactoring of child process handling code - tasks subpackage responsible for managing system process lifecycle - minion propagates SIGTERM to child procs - child procs will be SIGKILL'd upon parent process death --- contrib/mesos/pkg/minion/server.go | 204 ++++++------ contrib/mesos/pkg/minion/tasks/doc.go | 20 ++ contrib/mesos/pkg/minion/tasks/events.go | 98 ++++++ contrib/mesos/pkg/minion/tasks/task.go | 351 ++++++++++++++++++++ contrib/mesos/pkg/minion/tasks/task_test.go | 222 +++++++++++++ 5 files changed, 790 insertions(+), 105 deletions(-) create mode 100644 contrib/mesos/pkg/minion/tasks/doc.go create mode 100644 contrib/mesos/pkg/minion/tasks/events.go create mode 100644 contrib/mesos/pkg/minion/tasks/task.go create mode 100644 contrib/mesos/pkg/minion/tasks/task_test.go diff --git a/contrib/mesos/pkg/minion/server.go b/contrib/mesos/pkg/minion/server.go index 7a6adff4cc1..1ec81f7cb44 100644 --- a/contrib/mesos/pkg/minion/server.go +++ b/contrib/mesos/pkg/minion/server.go @@ -21,15 +21,15 @@ import ( "io" "io/ioutil" "os" - "os/exec" + "os/signal" "path" "strings" - "time" + "syscall" exservice "k8s.io/kubernetes/contrib/mesos/pkg/executor/service" "k8s.io/kubernetes/contrib/mesos/pkg/hyperkube" "k8s.io/kubernetes/contrib/mesos/pkg/minion/config" - "k8s.io/kubernetes/contrib/mesos/pkg/runtime" + "k8s.io/kubernetes/contrib/mesos/pkg/minion/tasks" "k8s.io/kubernetes/pkg/api/resource" client "k8s.io/kubernetes/pkg/client/unversioned" @@ -39,6 +39,11 @@ import ( "gopkg.in/natefinch/lumberjack.v2" ) +const ( + proxyLogFilename = "proxy.log" + executorLogFilename = "executor.log" +) + type MinionServer struct { // embed the executor server to be able to use its flags // TODO(sttts): get rid of this mixing of the minion and the executor server with a multiflags implementation for km @@ -48,8 +53,7 @@ type MinionServer struct { hks hyperkube.Interface clientConfig *client.Config kmBinary string - done chan struct{} // closed when shutting down - exit chan error // to signal fatal errors + tasks []*tasks.Task pathOverride string // the PATH environment for the sub-processes cgroupPrefix string // e.g. mesos @@ -69,15 +73,11 @@ func NewMinionServer() *MinionServer { s := &MinionServer{ KubeletExecutorServer: exservice.NewKubeletExecutorServer(), privateMountNS: false, // disabled until Docker supports customization of the parent mount namespace - done: make(chan struct{}), - exit: make(chan error), - - cgroupPrefix: config.DefaultCgroupPrefix, - logMaxSize: config.DefaultLogMaxSize(), - logMaxBackups: config.DefaultLogMaxBackups, - logMaxAgeInDays: config.DefaultLogMaxAgeInDays, - - runProxy: true, + cgroupPrefix: config.DefaultCgroupPrefix, + logMaxSize: config.DefaultLogMaxSize(), + logMaxBackups: config.DefaultLogMaxBackups, + logMaxAgeInDays: config.DefaultLogMaxAgeInDays, + runProxy: true, } // cache this for later use @@ -141,10 +141,13 @@ func (ms *MinionServer) launchProxyServer() { args = append(args, fmt.Sprintf("--hostname-override=%s", ms.KubeletExecutorServer.HostnameOverride)) } - ms.launchHyperkubeServer(hyperkube.CommandProxy, &args, "proxy.log") + ms.launchHyperkubeServer(hyperkube.CommandProxy, args, proxyLogFilename, nil) } -func (ms *MinionServer) launchExecutorServer() { +// launchExecutorServer returns a chan that closes upon kubelet-executor death. since the kubelet- +// executor doesn't support failover right now, the right thing to do is to fail completely since all +// pods will be lost upon restart and we want mesos to recover the resources from them. +func (ms *MinionServer) launchExecutorServer() <-chan struct{} { allArgs := os.Args[1:] // filter out minion flags, leaving those for the executor @@ -159,111 +162,65 @@ func (ms *MinionServer) launchExecutorServer() { } // run executor and quit minion server when this exits cleanly - err := ms.launchHyperkubeServer(hyperkube.CommandExecutor, &executorArgs, "executor.log") - if err != nil { - // just return, executor will be restarted on error - log.Error(err) - return + execDied := make(chan struct{}) + decorator := func(t *tasks.Task) *tasks.Task { + t.Finished = func(_ bool) bool { + // this func implements the task.finished spec, so when the executor exits + // we return false to indicate that it should not be restarted. we also + // close execDied to signal interested listeners. + close(execDied) + return false + } + // since we only expect to die once, and there is no restart; don't delay any longer than needed + t.RestartDelay = 0 + return t } - - log.Info("Executor exited cleanly, stopping the minion") - ms.exit <- nil + ms.launchHyperkubeServer(hyperkube.CommandExecutor, executorArgs, executorLogFilename, decorator) + return execDied } -func (ms *MinionServer) launchHyperkubeServer(server string, args *[]string, logFileName string) error { +func (ms *MinionServer) launchHyperkubeServer(server string, args []string, logFileName string, decorator func(*tasks.Task) *tasks.Task) { log.V(2).Infof("Spawning hyperkube %v with args '%+v'", server, args) - // prepare parameters - kmArgs := []string{server} - for _, arg := range *args { - kmArgs = append(kmArgs, arg) - } - - // create command - cmd := exec.Command(ms.kmBinary, kmArgs...) - if _, err := cmd.StdoutPipe(); err != nil { - // fatal error => terminate minion - err = fmt.Errorf("error getting stdout of %v: %v", server, err) - ms.exit <- err - return err - } - stderrLogs, err := cmd.StderrPipe() - if err != nil { - // fatal error => terminate minion - err = fmt.Errorf("error getting stderr of %v: %v", server, err) - ms.exit <- err - return err - } - - ch := make(chan struct{}) - go func() { - defer func() { - select { - case <-ch: - log.Infof("killing %v process...", server) - if err = cmd.Process.Kill(); err != nil { - log.Errorf("failed to kill %v process: %v", server, err) - } - default: - } - }() - - maxSize := ms.logMaxSize.Value() - if maxSize > 0 { - // convert to MB - maxSize = maxSize / 1024 / 1024 - if maxSize == 0 { - log.Warning("maximal log file size is rounded to 1 MB") - maxSize = 1 - } + kmArgs := append([]string{server}, args...) + maxSize := ms.logMaxSize.Value() + if maxSize > 0 { + // convert to MB + maxSize = maxSize / 1024 / 1024 + if maxSize == 0 { + log.Warning("maximal log file size is rounded to 1 MB") + maxSize = 1 } - writer := &lumberjack.Logger{ + } + + writerFunc := func() io.WriteCloser { + return &lumberjack.Logger{ Filename: logFileName, MaxSize: int(maxSize), MaxBackups: ms.logMaxBackups, MaxAge: ms.logMaxAgeInDays, } - defer writer.Close() - - log.V(2).Infof("Starting logging for %v: max log file size %d MB, keeping %d backups, for %d days", server, maxSize, ms.logMaxBackups, ms.logMaxAgeInDays) - - <-ch - written, err := io.Copy(writer, stderrLogs) - if err != nil { - log.Errorf("error writing data to %v: %v", logFileName, err) - } - - log.Infof("wrote %d bytes to %v", written, logFileName) - }() + } // use given environment, but add /usr/sbin to the path for the iptables binary used in kube-proxy + var kmEnv []string if ms.pathOverride != "" { env := os.Environ() - cmd.Env = make([]string, 0, len(env)) + kmEnv = make([]string, 0, len(env)) for _, e := range env { if !strings.HasPrefix(e, "PATH=") { - cmd.Env = append(cmd.Env, e) + kmEnv = append(kmEnv, e) } } - cmd.Env = append(cmd.Env, "PATH="+ms.pathOverride) + kmEnv = append(kmEnv, "PATH="+ms.pathOverride) } - // if the server fails to start then we exit the executor, otherwise - // wait for the proxy process to end (and release resources after). - if err := cmd.Start(); err != nil { - // fatal error => terminate minion - err = fmt.Errorf("error starting %v: %v", server, err) - ms.exit <- err - return err + t := tasks.New(server, ms.kmBinary, kmArgs, kmEnv, writerFunc) + if decorator != nil { + t = decorator(t) } - close(ch) - if err := cmd.Wait(); err != nil { - log.Errorf("%v exited with error: %v", server, err) - err = fmt.Errorf("%v exited with error: %v", server, err) - return err - } - - return nil + go t.Start() + ms.tasks = append(ms.tasks, t) } // runs the main kubelet loop, closing the kubeletFinished chan when the loop exits. @@ -295,15 +252,52 @@ func (ms *MinionServer) Run(hks hyperkube.Interface, _ []string) error { cgroupLogger("using cgroup-root %q", ms.cgroupRoot) // run subprocesses until ms.done is closed on return of this function - defer close(ms.done) if ms.runProxy { - go runtime.Until(ms.launchProxyServer, 5*time.Second, ms.done) + ms.launchProxyServer() } - go runtime.Until(ms.launchExecutorServer, 5*time.Second, ms.done) - // wait until minion exit is requested - // don't close ms.exit here to avoid panics of go routines writing an error to it - return <-ms.exit + // abort closes when the kubelet-executor dies + abort := ms.launchExecutorServer() + shouldQuit := termSignalListener(abort) + te := tasks.MergeOutput(ms.tasks, shouldQuit) + + // TODO(jdef) do something fun here, such as reporting task completion to the apiserver + + <-te.Close().Done() // we don't listen for any specific events yet; wait for all tasks to finish + return nil +} + +// termSignalListener returns a signal chan that closes when either (a) the process receives a termination +// signal: SIGTERM, SIGINT, or SIGHUP; or (b) the abort chan closes. +func termSignalListener(abort <-chan struct{}) <-chan struct{} { + shouldQuit := make(chan struct{}) + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh) + + go func() { + defer close(shouldQuit) + for { + select { + case <-abort: + log.Infof("executor died, aborting") + return + case s, ok := <-sigCh: + if !ok { + return + } + switch s { + case os.Interrupt, os.Signal(syscall.SIGTERM), os.Signal(syscall.SIGINT), os.Signal(syscall.SIGHUP): + log.Infof("received signal %q, aborting", s) + return + case os.Signal(syscall.SIGCHLD): // who cares? + default: + log.Errorf("unexpected signal: %T %#v", s, s) + } + + } + } + }() + return shouldQuit } func (ms *MinionServer) AddExecutorFlags(fs *pflag.FlagSet) { diff --git a/contrib/mesos/pkg/minion/tasks/doc.go b/contrib/mesos/pkg/minion/tasks/doc.go new file mode 100644 index 00000000000..51ad8ac5edb --- /dev/null +++ b/contrib/mesos/pkg/minion/tasks/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package tasks provides an API for supervising system processes as Task's. +// It provides stronger guarantees with respect to process lifecycle than a +// standalone kubelet running static pods. +package tasks diff --git a/contrib/mesos/pkg/minion/tasks/events.go b/contrib/mesos/pkg/minion/tasks/events.go new file mode 100644 index 00000000000..aff85af01fc --- /dev/null +++ b/contrib/mesos/pkg/minion/tasks/events.go @@ -0,0 +1,98 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +type Events interface { + // Close stops delivery of events in the completion and errors channels; callers must close this when they intend to no longer read from completion() or errors() + Close() Events + + // Completion reports Completion events as they happen + Completion() <-chan *Completion + + // Done returns a signal chan that closes when all tasks have completed and there are no more events to deliver + Done() <-chan struct{} +} + +type eventsImpl struct { + tc chan *Completion + stopForwarding chan struct{} + done <-chan struct{} +} + +func newEventsImpl(tcin <-chan *Completion, done <-chan struct{}) *eventsImpl { + ei := &eventsImpl{ + tc: make(chan *Completion), + stopForwarding: make(chan struct{}), + done: done, + } + go func() { + defer close(ei.tc) + forwardCompletionUntil(tcin, ei.tc, ei.stopForwarding, done, nil) + }() + return ei +} + +func (e *eventsImpl) Close() Events { close(e.stopForwarding); return e } +func (e *eventsImpl) Completion() <-chan *Completion { return e.tc } +func (e *eventsImpl) Done() <-chan struct{} { return e.done } + +// forwardCompletionUntil is a generic pipe that forwards objects between channels. +// if discard is closed, objects are silently dropped. +// if tap != nil then it's invoked for each object as it's read from tin, but before it's written to tch. +// returns when either reading from tin completes (no more objects, and is closed), or else +// abort is closed, which ever happens first. +func forwardCompletionUntil(tin <-chan *Completion, tch chan<- *Completion, discard <-chan struct{}, abort <-chan struct{}, tap func(*Completion, bool)) { + var tc *Completion + var ok bool +forwardLoop: + for { + select { + case tc, ok = <-tin: + if !ok { + return + } + if tap != nil { + tap(tc, false) + } + select { + case <-abort: + break forwardLoop + case <-discard: + case tch <- tc: + } + case <-abort: + // best effort + select { + case tc, ok = <-tin: + if ok { + if tap != nil { + tap(tc, true) + } + break forwardLoop + } + default: + } + return + } + } + // best effort + select { + case tch <- tc: + case <-discard: + default: + } +} diff --git a/contrib/mesos/pkg/minion/tasks/task.go b/contrib/mesos/pkg/minion/tasks/task.go new file mode 100644 index 00000000000..51dfcaf07f0 --- /dev/null +++ b/contrib/mesos/pkg/minion/tasks/task.go @@ -0,0 +1,351 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "fmt" + "io" + "os/exec" + "sync" + "sync/atomic" + "syscall" + "time" + + log "github.com/golang/glog" + "k8s.io/kubernetes/contrib/mesos/pkg/runtime" +) + +const defaultTaskRestartDelay = 5 * time.Second + +// Completion represents the termination of a Task process. Each process execution should +// yield (barring drops because of an abort signal) exactly one Completion. +type Completion struct { + name string // name of the task + code int // exit code that the task process completed with + err error // process management errors are reported here +} + +// systemProcess is a useful abstraction for testing +type systemProcess interface { + // Wait works like exec.Cmd.Wait() + Wait() error + + // Kill returns the pid of the process that was killed + Kill() (int, error) +} + +type cmdProcess struct { + delegate *exec.Cmd +} + +func (cp *cmdProcess) Wait() error { + return cp.delegate.Wait() +} + +func (cp *cmdProcess) Kill() (int, error) { + // kill the entire process group, not just the one process + pid := cp.delegate.Process.Pid + processGroup := 0 - pid + + // we send a SIGTERM here for a graceful stop. users of this package should + // wait for tasks to complete normally. as a fallback/safeguard, child procs + // are spawned in notStartedTask to receive a SIGKILL when this process dies. + rc := syscall.Kill(processGroup, syscall.SIGTERM) + return pid, rc +} + +// task is a specification for running a system process; it provides hooks for customizing +// logging and restart handling as well as provides event channels for communicating process +// termination and errors related to process management. +type Task struct { + Finished func(restarting bool) bool // callback invoked when a task process has completed; if `restarting` then it will be restarted if it returns true + RestartDelay time.Duration // interval between repeated task restarts + + name string // required: unique name for this task + bin string // required: path to executable + args []string // optional: process arguments + env []string // optional: process environment override + createLogger func() io.WriteCloser // factory func that builds a log writer + cmd systemProcess // process that we started + completedCh chan *Completion // reports exit codes encountered when task processes exit, or errors during process management + shouldQuit chan struct{} // shouldQuit is closed to indicate that the task should stop its running process, if any + done chan struct{} // done closes when all processes related to the task have terminated + initialState taskStateFn // prepare and start a new live process, defaults to notStartedTask; should be set by run() + runLatch int32 // guard against multiple Task.run calls +} + +// New builds a newly initialized task object but does not start any processes for it. callers +// are expected to invoke task.run(...) on their own. +func New(name, bin string, args, env []string, cl func() io.WriteCloser) *Task { + return &Task{ + name: name, + bin: bin, + args: args, + env: env, + createLogger: cl, + completedCh: make(chan *Completion), + shouldQuit: make(chan struct{}), + done: make(chan struct{}), + RestartDelay: defaultTaskRestartDelay, + Finished: func(restarting bool) bool { return restarting }, + } +} + +// Start spawns a goroutine to execute the Task. Panics if invoked more than once. +func (t *Task) Start() { + go t.run(notStartedTask) +} + +// run executes the state machine responsible for starting, monitoring, and possibly restarting +// a system process for the task. The initialState func is the entry point of the state machine. +// Upon returning the done and completedCh chans are all closed. +func (t *Task) run(initialState taskStateFn) { + if !atomic.CompareAndSwapInt32(&t.runLatch, 0, 1) { + panic("Task.run() may only be invoked once") + } + t.initialState = initialState + + defer close(t.done) + defer close(t.completedCh) + + state := initialState + for state != nil { + next := state(t) + state = next + } +} + +func (t *Task) tryComplete(tc *Completion) { + select { + case <-t.shouldQuit: + // best effort + select { + case t.completedCh <- tc: + default: + } + case t.completedCh <- tc: + } +} + +// tryError is a convenience func that invokes tryComplete with a completion error +func (t *Task) tryError(err error) { + t.tryComplete(&Completion{err: err}) +} + +type taskStateFn func(*Task) taskStateFn + +func taskShouldRestart(t *Task) taskStateFn { + // make our best effort to stop here if signalled (shouldQuit). not doing so here + // could add cost later (a process might be launched). + + // sleep for a bit; then return t.initialState + tm := time.NewTimer(t.RestartDelay) + defer tm.Stop() + select { + case <-tm.C: + select { + case <-t.shouldQuit: + default: + if t.Finished(true) { + select { + case <-t.shouldQuit: + // the world has changed, die + return nil + default: + } + return t.initialState + } + // finish call decided not to respawn, so die + return nil + } + case <-t.shouldQuit: + } + + // we're quitting, tell the Finished callback and then die + t.Finished(false) + return nil +} + +func (t *Task) initLogging(r io.Reader) { + writer := t.createLogger() + go func() { + defer writer.Close() + _, err := io.Copy(writer, r) + if err != nil && err != io.EOF { + // using tryComplete is racy because the state machine closes completedCh and + // so we don't want to attempt to write to a closed/closing chan. so + // just log this for now. + log.Errorf("logger for task %q crashed: %v", t.bin, err) + } + }() +} + +// notStartedTask spawns the given task and transitions to a startedTask state +func notStartedTask(t *Task) taskStateFn { + log.Infof("starting task process %q with args '%+v'", t.bin, t.args) + + // create command + cmd := exec.Command(t.bin, t.args...) + if _, err := cmd.StdoutPipe(); err != nil { + t.tryError(fmt.Errorf("error getting stdout of %v: %v", t.name, err)) + return taskShouldRestart + } + stderrLogs, err := cmd.StderrPipe() + if err != nil { + t.tryError(fmt.Errorf("error getting stderr of %v: %v", t.name, err)) + return taskShouldRestart + } + + t.initLogging(stderrLogs) + if len(t.env) > 0 { + cmd.Env = t.env + } + cmd.SysProcAttr = &syscall.SysProcAttr{ + Setpgid: true, + Pdeathsig: syscall.SIGKILL, // see cmdProcess.Kill + } + + // last min check for shouldQuit here + select { + case <-t.shouldQuit: + t.tryError(fmt.Errorf("task execution canceled, aborting process launch")) + return taskShouldRestart + default: + } + + if err := cmd.Start(); err != nil { + t.tryError(fmt.Errorf("failed to start task process %q: %v", t.bin, err)) + return taskShouldRestart + } + log.Infoln("task started", t.name) + t.cmd = &cmdProcess{delegate: cmd} + return taskRunning +} + +type exitError interface { + error + + // see os.ProcessState.Sys: returned value can be converted to something like syscall.WaitStatus + Sys() interface{} +} + +func taskRunning(t *Task) taskStateFn { + waiter := t.cmd.Wait + var sendOnce sync.Once + trySend := func(wr *Completion) { + // guarded with once because we're only allowed to send a single "result" for each + // process termination. for example, if Kill() results in an error because Wait() + // already completed we only want to return a single result for the process. + sendOnce.Do(func() { + t.tryComplete(wr) + }) + } + // listen for normal process completion in a goroutine; don't block because we need to listen for shouldQuit + waitCh := make(chan *Completion, 1) + go func() { + wr := &Completion{} + defer func() { + waitCh <- wr + close(waitCh) + }() + + if err := waiter(); err != nil { + if exitError, ok := err.(exitError); ok { + if waitStatus, ok := exitError.Sys().(syscall.WaitStatus); ok { + wr.name = t.name + wr.code = waitStatus.ExitStatus() + return + } + } + wr.err = fmt.Errorf("task wait ended strangely for %q: %v", t.bin, err) + } else { + wr.name = t.name + } + }() + + // wait for the process to complete, or else for a "quit" signal on the task (at which point we'll attempt to kill manually) + select { + case <-t.shouldQuit: + // check for tie + select { + case wr := <-waitCh: + // we got a signal to quit, but we're already finished; attempt best effort delvery + trySend(wr) + default: + // Wait() has not exited yet, kill the process + log.Infof("killing %s : %s", t.name, t.bin) + pid, err := t.cmd.Kill() + if err != nil { + trySend(&Completion{err: fmt.Errorf("failed to kill process: %q pid %d: %v", t.bin, pid, err)}) + } + // else, Wait() should complete and send a completion event + } + case wr := <-waitCh: + // task has completed before we were told to quit, pass along completion and error information + trySend(wr) + } + return taskShouldRestart +} + +// forwardUntil forwards task process completion status and errors to the given output +// chans until either the task terminates or abort is closed. +func (t *Task) forwardUntil(tch chan<- *Completion, abort <-chan struct{}) { + // merge task completion and error until we're told to die, then + // tell the task to stop + defer close(t.shouldQuit) + forwardCompletionUntil(t.completedCh, tch, nil, abort, nil) +} + +// MergeOutput waits for the given tasks to complete. meanwhile it logs each time a task +// process completes or generates an error. when shouldQuit closes, tasks are canceled and this +// func eventually returns once all ongoing event handlers have completed running. +func MergeOutput(tasks []*Task, shouldQuit <-chan struct{}) Events { + tc := make(chan *Completion) + + var waitForTasks sync.WaitGroup + waitForTasks.Add(len(tasks)) + + for _, t := range tasks { + t := t + // translate task dead signal into Done + go func() { + <-t.done + waitForTasks.Done() + }() + // fan-in task completion and error events to tc, ec + go t.forwardUntil(tc, shouldQuit) + } + + tclistener := make(chan *Completion) + done := runtime.After(func() { + completionFinished := runtime.After(func() { + defer close(tclistener) + forwardCompletionUntil(tc, tclistener, nil, shouldQuit, func(tt *Completion, shutdown bool) { + prefix := "" + if shutdown { + prefix = "(shutdown) " + } + log.Infof(prefix+"task %q exited with status %d", tt.name, tt.code) + }) + }) + waitForTasks.Wait() + close(tc) + <-completionFinished + }) + ei := newEventsImpl(tclistener, done) + return ei +} diff --git a/contrib/mesos/pkg/minion/tasks/task_test.go b/contrib/mesos/pkg/minion/tasks/task_test.go new file mode 100644 index 00000000000..255e63489b6 --- /dev/null +++ b/contrib/mesos/pkg/minion/tasks/task_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "bytes" + "errors" + "fmt" + "io" + "sync" + "syscall" + "testing" + + log "github.com/golang/glog" +) + +type badWriteCloser struct { + err error +} + +func (b *badWriteCloser) Write(_ []byte) (int, error) { return 0, b.err } +func (b *badWriteCloser) Close() error { return b.err } + +type discardCloser int + +func (d discardCloser) Write(b []byte) (int, error) { return len(b), nil } +func (d discardCloser) Close() error { return nil } + +var devNull = func() io.WriteCloser { return discardCloser(0) } + +type fakeExitError uint32 + +func (f fakeExitError) Sys() interface{} { return syscall.WaitStatus(f << 8) } +func (f fakeExitError) Error() string { return fmt.Sprintf("fake-exit-error: %d", f) } + +type fakeProcess struct { + done chan struct{} + pid int + err error +} + +func (f *fakeProcess) Wait() error { + <-f.done + return f.err +} +func (f *fakeProcess) Kill() (int, error) { + close(f.done) + return f.pid, f.err +} +func (f *fakeProcess) exit(code int) { + f.err = fakeExitError(code) + close(f.done) +} + +func newFakeProcess() *fakeProcess { + return &fakeProcess{ + done: make(chan struct{}), + } +} + +func TestBadLogger(t *testing.T) { + err := errors.New("qux") + fp := newFakeProcess() + tt := New("foo", "bar", nil, nil, func() io.WriteCloser { + defer func() { + fp.pid = 123 // sanity check + fp.Kill() // this causes Wait() to return + }() + return &badWriteCloser{err} + }) + + tt.RestartDelay = 0 // don't slow the test down for no good reason + + finishCalled := make(chan struct{}) + tt.Finished = func(ok bool) bool { + log.Infof("tt.Finished: ok %t", ok) + if ok { + close(finishCalled) + } + return false // never respawn, this causes t.done to close + } + + // abuse eventsImpl: we're not going to listen on the task completion or event chans, + // and we don't want to block the state machine, so discard all task events as they happen + ei := newEventsImpl(tt.completedCh, tt.done) + ei.Close() + + go tt.run(func(_ *Task) taskStateFn { + log.Infof("tt initialized") + tt.initLogging(bytes.NewBuffer(([]byte)("unlogged bytes"))) + tt.cmd = fp + return taskRunning + }) + + // if the logger fails the task will be killed + // badWriteLogger generates an error immediately and results in a task kill + <-finishCalled + <-tt.done + + // this should never data race since the state machine is dead at this point + if fp.pid != 123 { + t.Fatalf("incorrect pid, expected 123 not %d", fp.pid) + } + + // TODO(jdef) would be nice to check for a specific error that indicates the logger died +} + +func TestMergeOutput(t *testing.T) { + var tasksStarted, tasksDone sync.WaitGroup + tasksDone.Add(2) + tasksStarted.Add(2) + + t1 := New("foo", "", nil, nil, devNull) + t1exited := make(chan struct{}) + t1.RestartDelay = 0 // don't slow the test down for no good reason + t1.Finished = func(ok bool) bool { + // we expect each of these cases to happen exactly once + if !ok { + tasksDone.Done() + } else { + close(t1exited) + } + return ok + } + go t1.run(func(t *Task) taskStateFn { + defer tasksStarted.Done() + t.initLogging(bytes.NewBuffer([]byte{})) + t.cmd = newFakeProcess() + return taskRunning + }) + + t2 := New("bar", "", nil, nil, devNull) + t2exited := make(chan struct{}) + t2.RestartDelay = 0 // don't slow the test down for no good reason + t2.Finished = func(ok bool) bool { + // we expect each of these cases to happen exactly once + if !ok { + tasksDone.Done() + } else { + close(t2exited) + } + return ok + } + go t2.run(func(t *Task) taskStateFn { + defer tasksStarted.Done() + t.initLogging(bytes.NewBuffer([]byte{})) + t.cmd = newFakeProcess() + return taskRunning + }) + + shouldQuit := make(chan struct{}) + te := MergeOutput([]*Task{t1, t2}, shouldQuit) + + tasksStarted.Wait() + tasksStarted.Add(2) // recycle the barrier + + // kill each task once, let it restart; make sure that we get the completion status? + t1.cmd.(*fakeProcess).exit(1) + t2.cmd.(*fakeProcess).exit(2) + + codes := map[int]struct{}{} + for i := 0; i < 2; i++ { + switch tc := <-te.Completion(); tc.code { + case 1, 2: + codes[tc.code] = struct{}{} + default: + if tc.err != nil { + t.Errorf("unexpected task completion error: %v", tc.err) + } else { + t.Errorf("unexpected task completion code: %d", tc.code) + } + } + } + + te.Close() // we're not going to read any other completion or error events + + if len(codes) != 2 { + t.Fatalf("expected each task process to exit once") + } + + // each task invokes Finished() once + <-t1exited + <-t2exited + + log.Infoln("each task process has completed one round") + tasksStarted.Wait() // tasks will auto-restart their exited procs + + // assert that the tasks are not dead; TODO(jdef) not sure that these checks are useful + select { + case <-t1.done: + t.Fatalf("t1 is unexpectedly dead") + default: + } + select { + case <-t2.done: + t.Fatalf("t2 is unexpectedly dead") + default: + } + + log.Infoln("firing quit signal") + close(shouldQuit) // fire shouldQuit, and everything should terminate gracefully + + log.Infoln("waiting for tasks to die") + tasksDone.Wait() // our tasks should die + + log.Infoln("waiting for merge to complete") + <-te.Done() // wait for the merge to complete +} From f0d0e2a089cf8104114c48f07597ab7e43851308 Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Tue, 1 Sep 2015 14:32:38 -0400 Subject: [PATCH 039/101] Remove unused api-rate and api-burst params. --- cmd/kube-apiserver/app/server.go | 6 ------ docs/admin/kube-apiserver.md | 2 -- hack/verify-flags/known-flags.txt | 2 -- 3 files changed, 10 deletions(-) diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 954d11364d9..8dd7c0631a2 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -68,8 +68,6 @@ type APIServer struct { AdvertiseAddress net.IP SecurePort int ExternalHost string - APIRate float32 - APIBurst int TLSCertFile string TLSPrivateKeyFile string CertDirectory string @@ -122,8 +120,6 @@ func NewAPIServer() *APIServer { InsecureBindAddress: net.ParseIP("127.0.0.1"), BindAddress: net.ParseIP("0.0.0.0"), SecurePort: 6443, - APIRate: 10.0, - APIBurst: 200, APIPrefix: "/api", ExpAPIPrefix: "/experimental", EventTTL: 1 * time.Hour, @@ -176,8 +172,6 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&s.SecurePort, "secure-port", s.SecurePort, ""+ "The port on which to serve HTTPS with authentication and authorization. If 0, "+ "don't serve HTTPS at all.") - fs.Float32Var(&s.APIRate, "api-rate", s.APIRate, "API rate limit as QPS for the read only port") - fs.IntVar(&s.APIBurst, "api-burst", s.APIBurst, "API burst amount for the read only port") fs.StringVar(&s.TLSCertFile, "tls-cert-file", s.TLSCertFile, ""+ "File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). "+ "If HTTPS serving is enabled, and --tls-cert-file and --tls-private-key-file are not provided, "+ diff --git a/docs/admin/kube-apiserver.md b/docs/admin/kube-apiserver.md index 4396f23e671..342de9490b6 100644 --- a/docs/admin/kube-apiserver.md +++ b/docs/admin/kube-apiserver.md @@ -52,9 +52,7 @@ cluster's shared state through which all other components interact. --admission-control-config-file="": File with admission control configuration. --advertise-address=: The IP address on which to advertise the apiserver to members of the cluster. This address must be reachable by the rest of the cluster. If blank, the --bind-address will be used. If --bind-address is unspecified, the host's default interface will be used. --allow-privileged=false: If true, allow privileged containers. - --api-burst=0: API burst amount for the read only port --api-prefix="": The prefix for API requests on the server. Default '/api'. - --api-rate=0: API rate limit as QPS for the read only port --authorization-mode="": Selects how to do authorization on the secure port. One of: AlwaysAllow,AlwaysDeny,ABAC --authorization-policy-file="": File with authorization policy in csv format, used with --authorization-mode=ABAC, on the secure port. --basic-auth-file="": If set, the file that will be used to admit requests to the secure port of the API server via http basic authentication. diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 71a9127a3ce..8061a257baa 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -8,9 +8,7 @@ algorithm-provider all-namespaces allocate-node-cidrs allow-privileged -api-burst api-prefix -api-rate api-servers api-token api-version From ea6238ddc3212ce1941d2fea75b89b9ef660a094 Mon Sep 17 00:00:00 2001 From: Quinton Hoole Date: Wed, 2 Sep 2015 15:01:04 -0700 Subject: [PATCH 040/101] Remove redundant filter from e2e-gce-parallel test job. --- hack/jenkins/e2e.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 3fb1d1eeb86..47403915057 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -119,7 +119,6 @@ GCE_SLOW_TESTS=( # Tests which are not able to be run in parallel. GCE_PARALLEL_SKIP_TESTS=( - ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} "Etcd" "NetworkingNew" "Nodes\sNetwork" @@ -229,9 +228,10 @@ case ${JOB_NAME} in : ${GINKGO_PARALLEL:="y"} # This list should match the list in kubernetes-e2e-gce-parallel. : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ + ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \ - ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ + ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="pull-e2e-${EXECUTOR_NUMBER}"} : ${KUBE_GCS_STAGING_PATH_SUFFIX:="-${EXECUTOR_NUMBER}"} @@ -248,8 +248,8 @@ case ${JOB_NAME} in : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \ - ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ + ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-test-parallel"} : ${PROJECT:="kubernetes-jenkins"} @@ -265,6 +265,7 @@ case ${JOB_NAME} in : ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \ ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \ + ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ )"} # Override AWS defaults. @@ -280,6 +281,7 @@ case ${JOB_NAME} in ${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \ ${GCE_PARALLEL_SKIP_TESTS[@]:+${GCE_PARALLEL_SKIP_TESTS[@]}} \ ) --ginkgo.focus=$(join_regex_no_empty \ + ${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \ ${GCE_PARALLEL_FLAKY_TESTS[@]:+${GCE_PARALLEL_FLAKY_TESTS[@]}} \ )"} : ${KUBE_GCE_INSTANCE_PREFIX:="parallel-flaky"} From 4d8eedd13d66009c94e1fa47c97303b8bfc92cef Mon Sep 17 00:00:00 2001 From: Zichang Lin Date: Thu, 3 Sep 2015 11:47:25 +0800 Subject: [PATCH 041/101] Fix rmdir error when libvirt-coreos kupe-up --- cluster/libvirt-coreos/util.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh index 3a520b346f7..0bfeff06099 100644 --- a/cluster/libvirt-coreos/util.sh +++ b/cluster/libvirt-coreos/util.sh @@ -294,7 +294,7 @@ function upload-server-tars { tar -x -C "$POOL_PATH/kubernetes" -f "$SERVER_BINARY_TAR" kubernetes rm -rf "$POOL_PATH/kubernetes/bin" mv "$POOL_PATH/kubernetes/kubernetes/server/bin" "$POOL_PATH/kubernetes/bin" - rmdir "$POOL_PATH/kubernetes/kubernetes/server" "$POOL_PATH/kubernetes/kubernetes" + rm -fr "$POOL_PATH/kubernetes/kubernetes" } # Update a kubernetes cluster with latest source From da0ab43b08c0108c13e9ddcf1f9f7be6d7fe0ccc Mon Sep 17 00:00:00 2001 From: Jeff Grafton Date: Wed, 2 Sep 2015 14:45:12 -0700 Subject: [PATCH 042/101] Save integration test results in junit-style XML report on Shippable --- shippable.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/shippable.yml b/shippable.yml index 5614cb8cb93..5686b3e3cad 100644 --- a/shippable.yml +++ b/shippable.yml @@ -7,6 +7,7 @@ matrix: - go: 1.4 env: - KUBE_TEST_API_VERSIONS=v1 KUBE_TEST_ETCD_PREFIXES=registry + - KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults" - CI_NAME="shippable" - CI_BUILD_NUMBER="$BUILD_NUMBER" - CI_BUILD_URL="$BUILD_URL" @@ -17,6 +18,7 @@ matrix: - go: 1.3 env: - KUBE_TEST_API_VERSIONS=v1 KUBE_TEST_ETCD_PREFIXES=kubernetes.io/registry + - KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults" - CI_NAME="shippable" - CI_BUILD_NUMBER="$BUILD_NUMBER" - CI_BUILD_URL="$BUILD_URL" @@ -48,7 +50,7 @@ install: script: # Disable coverage collection on pull requests - - KUBE_RACE="-race" KUBE_COVER=$([[ "$PULL_REQUEST" =~ ^[0-9]+$ ]] && echo "n" || echo "y") KUBE_GOVERALLS_BIN="$GOPATH/bin/goveralls" KUBE_TIMEOUT='-timeout 300s' KUBE_COVERPROCS=8 KUBE_TEST_ETCD_PREFIXES="${KUBE_TEST_ETCD_PREFIXES}" KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS}" KUBE_JUNIT_REPORT_DIR="$(pwd)/shippable/testresults" ./hack/test-go.sh -- -p=2 + - KUBE_RACE="-race" KUBE_COVER=$([[ "$PULL_REQUEST" =~ ^[0-9]+$ ]] && echo "n" || echo "y") KUBE_GOVERALLS_BIN="$GOPATH/bin/goveralls" KUBE_TIMEOUT='-timeout 300s' KUBE_COVERPROCS=8 KUBE_TEST_ETCD_PREFIXES="${KUBE_TEST_ETCD_PREFIXES}" KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS}" ./hack/test-go.sh -- -p=2 - ./hack/test-cmd.sh - KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS}" KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=4 LOG_LEVEL=4 ./hack/test-integration.sh - ./hack/test-update-storage-objects.sh From a1cea8dd8795d5173d4a0d0dd07bc9a41b0aafd5 Mon Sep 17 00:00:00 2001 From: James DeFelice Date: Mon, 31 Aug 2015 23:06:59 +0000 Subject: [PATCH 043/101] Flexible resource accounting and pod resource containment: - new: introduce AllocationStrategy, Predicate, and Procurement to scheduler pkg - new: --contain-pod-resources flag (workaround for docker+systemd+mesos problems) - new: --account-for-pod-resources flag (for testing overcommitment) - bugfix: forward -v flag from minion controller to executor --- contrib/mesos/pkg/minion/server.go | 31 +++- contrib/mesos/pkg/scheduler/fcfs.go | 38 ++++- contrib/mesos/pkg/scheduler/mock_test.go | 4 +- contrib/mesos/pkg/scheduler/plugin.go | 36 ++--- contrib/mesos/pkg/scheduler/plugin_test.go | 13 +- .../mesos/pkg/scheduler/podtask/minimal.go | 73 +++++++++ .../mesos/pkg/scheduler/podtask/pod_task.go | 114 ------------- .../pkg/scheduler/podtask/pod_task_test.go | 22 +-- .../mesos/pkg/scheduler/podtask/predicate.go | 110 +++++++++++++ .../pkg/scheduler/podtask/procurement.go | 152 ++++++++++++++++++ contrib/mesos/pkg/scheduler/scheduler.go | 57 +++---- .../mesos/pkg/scheduler/service/service.go | 62 ++++--- contrib/mesos/pkg/scheduler/types.go | 34 ++-- hack/verify-flags/known-flags.txt | 4 +- 14 files changed, 517 insertions(+), 233 deletions(-) create mode 100644 contrib/mesos/pkg/scheduler/podtask/minimal.go create mode 100644 contrib/mesos/pkg/scheduler/podtask/predicate.go create mode 100644 contrib/mesos/pkg/scheduler/podtask/procurement.go diff --git a/contrib/mesos/pkg/minion/server.go b/contrib/mesos/pkg/minion/server.go index 1ec81f7cb44..e7b73eeb831 100644 --- a/contrib/mesos/pkg/minion/server.go +++ b/contrib/mesos/pkg/minion/server.go @@ -55,13 +55,16 @@ type MinionServer struct { kmBinary string tasks []*tasks.Task - pathOverride string // the PATH environment for the sub-processes - cgroupPrefix string // e.g. mesos - cgroupRoot string // e.g. /mesos/{container-id}, determined at runtime + pathOverride string // the PATH environment for the sub-processes + cgroupPrefix string // e.g. mesos + cgroupRoot string // the cgroupRoot that we pass to the kubelet-executor, depends on containPodResources + mesosCgroup string // discovered mesos cgroup root, e.g. /mesos/{container-id} + containPodResources bool logMaxSize resource.Quantity logMaxBackups int logMaxAgeInDays int + logVerbosity int32 // see glog.Level runProxy bool proxyLogV int @@ -74,6 +77,7 @@ func NewMinionServer() *MinionServer { KubeletExecutorServer: exservice.NewKubeletExecutorServer(), privateMountNS: false, // disabled until Docker supports customization of the parent mount namespace cgroupPrefix: config.DefaultCgroupPrefix, + containPodResources: true, logMaxSize: config.DefaultLogMaxSize(), logMaxBackups: config.DefaultLogMaxBackups, logMaxAgeInDays: config.DefaultLogMaxAgeInDays, @@ -131,7 +135,7 @@ func (ms *MinionServer) launchProxyServer() { fmt.Sprintf("--bind-address=%s", bindAddress), fmt.Sprintf("--v=%d", ms.proxyLogV), "--logtostderr=true", - "--resource-container=" + path.Join("/", ms.cgroupRoot, "kube-proxy"), + "--resource-container=" + path.Join("/", ms.mesosCgroup, "kube-proxy"), } if ms.clientConfig.Host != "" { @@ -156,7 +160,7 @@ func (ms *MinionServer) launchExecutorServer() <-chan struct{} { ms.AddExecutorFlags(executorFlags) executorArgs, _ := filterArgsByFlagSet(allArgs, executorFlags) - executorArgs = append(executorArgs, "--resource-container="+path.Join("/", ms.cgroupRoot, "kubelet")) + executorArgs = append(executorArgs, "--resource-container="+path.Join("/", ms.mesosCgroup, "kubelet")) if ms.cgroupRoot != "" { executorArgs = append(executorArgs, "--cgroup-root="+ms.cgroupRoot) } @@ -241,14 +245,23 @@ func (ms *MinionServer) Run(hks hyperkube.Interface, _ []string) error { ms.clientConfig = clientConfig // derive the executor cgroup and use it as: - // - pod container cgroup root (e.g. docker cgroup-parent) + // - pod container cgroup root (e.g. docker cgroup-parent, optionally; see comments below) // - parent of kubelet container // - parent of kube-proxy container - ms.cgroupRoot = findMesosCgroup(ms.cgroupPrefix) + ms.mesosCgroup = findMesosCgroup(ms.cgroupPrefix) + log.Infof("discovered mesos cgroup at %q", ms.mesosCgroup) + + // hack alert, this helps to work around systemd+docker+mesos integration problems + // when docker's cgroup-parent flag is used (!containPodResources = don't use the docker flag) + if ms.containPodResources { + ms.cgroupRoot = ms.mesosCgroup + } + cgroupLogger := log.Infof if ms.cgroupRoot == "" { cgroupLogger = log.Warningf } + cgroupLogger("using cgroup-root %q", ms.cgroupRoot) // run subprocesses until ms.done is closed on return of this function @@ -302,6 +315,9 @@ func termSignalListener(abort <-chan struct{}) <-chan struct{} { func (ms *MinionServer) AddExecutorFlags(fs *pflag.FlagSet) { ms.KubeletExecutorServer.AddFlags(fs) + + // hack to forward log verbosity flag to the executor + fs.Int32Var(&ms.logVerbosity, "v", ms.logVerbosity, "log level for V logs") } func (ms *MinionServer) AddMinionFlags(fs *pflag.FlagSet) { @@ -309,6 +325,7 @@ func (ms *MinionServer) AddMinionFlags(fs *pflag.FlagSet) { fs.StringVar(&ms.cgroupPrefix, "mesos-cgroup-prefix", ms.cgroupPrefix, "The cgroup prefix concatenated with MESOS_DIRECTORY must give the executor cgroup set by Mesos") fs.BoolVar(&ms.privateMountNS, "private-mountns", ms.privateMountNS, "Enter a private mount NS before spawning procs (linux only). Experimental, not yet compatible with k8s volumes.") fs.StringVar(&ms.pathOverride, "path-override", ms.pathOverride, "Override the PATH in the environment of the sub-processes.") + fs.BoolVar(&ms.containPodResources, "contain-pod-resources", ms.containPodResources, "Allocate pod CPU and memory resources from offers and reparent pod containers into mesos cgroups; disable if you're having strange mesos/docker/systemd interactions.") // log file flags fs.Var(resource.NewQuantityFlagValue(&ms.logMaxSize), "max-log-size", "Maximum log file size for the executor and proxy before rotation") diff --git a/contrib/mesos/pkg/scheduler/fcfs.go b/contrib/mesos/pkg/scheduler/fcfs.go index 76553615a8a..db84ebd96d6 100644 --- a/contrib/mesos/pkg/scheduler/fcfs.go +++ b/contrib/mesos/pkg/scheduler/fcfs.go @@ -24,8 +24,42 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" ) +type allocationStrategy struct { + fitPredicate podtask.FitPredicate + procurement podtask.Procurement +} + +func (a *allocationStrategy) FitPredicate() podtask.FitPredicate { + return a.fitPredicate +} + +func (a *allocationStrategy) Procurement() podtask.Procurement { + return a.procurement +} + +func NewAllocationStrategy(fitPredicate podtask.FitPredicate, procurement podtask.Procurement) AllocationStrategy { + if fitPredicate == nil { + panic("fitPredicate is required") + } + if procurement == nil { + panic("procurement is required") + } + return &allocationStrategy{ + fitPredicate: fitPredicate, + procurement: procurement, + } +} + +type fcfsPodScheduler struct { + AllocationStrategy +} + +func NewFCFSPodScheduler(as AllocationStrategy) PodScheduler { + return &fcfsPodScheduler{as} +} + // A first-come-first-serve scheduler: acquires the first offer that can support the task -func FCFSScheduleFunc(r offers.Registry, unused SlaveIndex, task *podtask.T) (offers.Perishable, error) { +func (fps *fcfsPodScheduler) SchedulePod(r offers.Registry, unused SlaveIndex, task *podtask.T) (offers.Perishable, error) { podName := fmt.Sprintf("%s/%s", task.Pod.Namespace, task.Pod.Name) var acceptedOffer offers.Perishable err := r.Walk(func(p offers.Perishable) (bool, error) { @@ -33,7 +67,7 @@ func FCFSScheduleFunc(r offers.Registry, unused SlaveIndex, task *podtask.T) (of if offer == nil { return false, fmt.Errorf("nil offer while scheduling task %v", task.ID) } - if task.AcceptOffer(offer) { + if fps.FitPredicate()(task, offer) { if p.Acquire() { acceptedOffer = p log.V(3).Infof("Pod %s accepted offer %v", podName, offer.Id.GetValue()) diff --git a/contrib/mesos/pkg/scheduler/mock_test.go b/contrib/mesos/pkg/scheduler/mock_test.go index e53024ec2cc..17930a86813 100644 --- a/contrib/mesos/pkg/scheduler/mock_test.go +++ b/contrib/mesos/pkg/scheduler/mock_test.go @@ -42,11 +42,11 @@ func (m *MockScheduler) slaveFor(id string) (slave *Slave, ok bool) { ok = args.Bool(1) return } -func (m *MockScheduler) algorithm() (f PodScheduleFunc) { +func (m *MockScheduler) algorithm() (f PodScheduler) { args := m.Called() x := args.Get(0) if x != nil { - f = x.(PodScheduleFunc) + f = x.(PodScheduler) } return } diff --git a/contrib/mesos/pkg/scheduler/plugin.go b/contrib/mesos/pkg/scheduler/plugin.go index 56cc419abf6..e7f39cff4ed 100644 --- a/contrib/mesos/pkg/scheduler/plugin.go +++ b/contrib/mesos/pkg/scheduler/plugin.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/runtime" annotation "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" - mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" client "k8s.io/kubernetes/pkg/client/unversioned" @@ -56,8 +55,9 @@ const ( // scheduler abstraction to allow for easier unit testing type schedulerInterface interface { sync.Locker // synchronize scheduler plugin operations + SlaveIndex - algorithm() PodScheduleFunc // see types.go + algorithm() PodScheduler offers() offers.Registry tasks() podtask.Registry @@ -76,8 +76,8 @@ type k8smScheduler struct { internal *KubernetesScheduler } -func (k *k8smScheduler) algorithm() PodScheduleFunc { - return k.internal.scheduleFunc +func (k *k8smScheduler) algorithm() PodScheduler { + return k.internal } func (k *k8smScheduler) offers() offers.Registry { @@ -231,10 +231,8 @@ func (b *binder) prepareTaskForLaunch(ctx api.Context, machine string, task *pod } type kubeScheduler struct { - api schedulerInterface - podUpdates queue.FIFO - defaultContainerCPULimit mresource.CPUShares - defaultContainerMemLimit mresource.MegaBytes + api schedulerInterface + podUpdates queue.FIFO } // recoverAssignedSlave recovers the assigned Mesos slave from a pod by searching @@ -318,7 +316,7 @@ func (k *kubeScheduler) doSchedule(task *podtask.T, err error) (string, error) { } } if err == nil && offer == nil { - offer, err = k.api.algorithm()(k.api.offers(), k.api, task) + offer, err = k.api.algorithm().SchedulePod(k.api.offers(), k.api, task) } if err != nil { return "", err @@ -338,18 +336,8 @@ func (k *kubeScheduler) doSchedule(task *podtask.T, err error) (string, error) { return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer) } - // write resource limits into the pod spec which is transferred to the executor. From here - // on we can expect that the pod spec of a task has proper limits for CPU and memory. - // TODO(sttts): For a later separation of the kubelet and the executor also patch the pod on the apiserver - if unlimitedCPU := mresource.LimitPodCPU(&task.Pod, k.defaultContainerCPULimit); unlimitedCPU { - log.Warningf("Pod %s/%s without cpu limits is admitted %.2f cpu shares", task.Pod.Namespace, task.Pod.Name, mresource.PodCPULimit(&task.Pod)) - } - if unlimitedMem := mresource.LimitPodMem(&task.Pod, k.defaultContainerMemLimit); unlimitedMem { - log.Warningf("Pod %s/%s without memory limits is admitted %.2f MB", task.Pod.Namespace, task.Pod.Name, mresource.PodMemLimit(&task.Pod)) - } - task.Offer = offer - task.FillFromDetails(details) + k.api.algorithm().Procurement()(task, details) // TODO(jdef) why is nothing checking the error returned here? if err := k.api.tasks().Update(task); err != nil { offer.Release() @@ -556,7 +544,7 @@ func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) defer k.api.Unlock() switch task, state := k.api.tasks().Get(task.ID); state { case podtask.StatePending: - return !task.Has(podtask.Launched) && task.AcceptOffer(offer) + return !task.Has(podtask.Launched) && k.api.algorithm().FitPredicate()(task, offer) default: // no point in continuing to check for matching offers return true @@ -698,10 +686,8 @@ func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *ht Config: &plugin.Config{ MinionLister: nil, Algorithm: &kubeScheduler{ - api: kapi, - podUpdates: podUpdates, - defaultContainerCPULimit: k.defaultContainerCPULimit, - defaultContainerMemLimit: k.defaultContainerMemLimit, + api: kapi, + podUpdates: podUpdates, }, Binder: &binder{api: kapi}, NextPod: q.yield, diff --git a/contrib/mesos/pkg/scheduler/plugin_test.go b/contrib/mesos/pkg/scheduler/plugin_test.go index 4522a633c55..06da09d5c55 100644 --- a/contrib/mesos/pkg/scheduler/plugin_test.go +++ b/contrib/mesos/pkg/scheduler/plugin_test.go @@ -393,13 +393,14 @@ func TestPlugin_LifeCycle(t *testing.T) { executor.Data = []byte{0, 1, 2} // create scheduler + as := NewAllocationStrategy( + podtask.DefaultPredicate, + podtask.NewDefaultProcurement(mresource.DefaultDefaultContainerCPULimit, mresource.DefaultDefaultContainerMemLimit)) testScheduler := New(Config{ - Executor: executor, - Client: client.NewOrDie(&client.Config{Host: testApiServer.server.URL, Version: testapi.Version()}), - ScheduleFunc: FCFSScheduleFunc, - Schedcfg: *schedcfg.CreateDefaultConfig(), - DefaultContainerCPULimit: mresource.DefaultDefaultContainerCPULimit, - DefaultContainerMemLimit: mresource.DefaultDefaultContainerMemLimit, + Executor: executor, + Client: client.NewOrDie(&client.Config{Host: testApiServer.server.URL, Version: testapi.Version()}), + Scheduler: NewFCFSPodScheduler(as), + Schedcfg: *schedcfg.CreateDefaultConfig(), }) assert.NotNil(testScheduler.client, "client is nil") diff --git a/contrib/mesos/pkg/scheduler/podtask/minimal.go b/contrib/mesos/pkg/scheduler/podtask/minimal.go new file mode 100644 index 00000000000..842455e5919 --- /dev/null +++ b/contrib/mesos/pkg/scheduler/podtask/minimal.go @@ -0,0 +1,73 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podtask + +import ( + log "github.com/golang/glog" + mesos "github.com/mesos/mesos-go/mesosproto" +) + +// bogus numbers that we use to make sure that there's some set of minimal offered resources on the slave +const ( + minimalCpus = 0.01 + minimalMem = 0.25 +) + +var ( + DefaultMinimalPredicate = RequireAllPredicate([]FitPredicate{ + ValidationPredicate, + NodeSelectorPredicate, + MinimalPodResourcesPredicate, + PortsPredicate, + }).Fit + + DefaultMinimalProcurement = AllOrNothingProcurement([]Procurement{ + ValidateProcurement, + NodeProcurement, + MinimalPodResourcesProcurement, + PortsProcurement, + }).Procure +) + +func MinimalPodResourcesPredicate(t *T, offer *mesos.Offer) bool { + var ( + offeredCpus float64 + offeredMem float64 + ) + for _, resource := range offer.Resources { + if resource.GetName() == "cpus" { + offeredCpus = resource.GetScalar().GetValue() + } + + if resource.GetName() == "mem" { + offeredMem = resource.GetScalar().GetValue() + } + } + log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, minimalCpus, minimalMem) + if (minimalCpus > offeredCpus) || (minimalMem > offeredMem) { + log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, minimalCpus, minimalMem) + return false + } + return true +} + +func MinimalPodResourcesProcurement(t *T, details *mesos.Offer) error { + log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", details.Id, t.Pod.Namespace, t.Pod.Name, minimalCpus, minimalMem) + t.Spec.CPU = minimalCpus + t.Spec.Memory = minimalMem + return nil +} diff --git a/contrib/mesos/pkg/scheduler/podtask/pod_task.go b/contrib/mesos/pkg/scheduler/podtask/pod_task.go index 1c08abd7632..ad8a77d42fe 100644 --- a/contrib/mesos/pkg/scheduler/podtask/pod_task.go +++ b/contrib/mesos/pkg/scheduler/podtask/pod_task.go @@ -18,7 +18,6 @@ package podtask import ( "fmt" - "strings" "time" "github.com/gogo/protobuf/proto" @@ -28,7 +27,6 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics" mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/labels" log "github.com/golang/glog" mesos "github.com/mesos/mesos-go/mesosproto" @@ -150,59 +148,6 @@ func (t *T) BuildTaskInfo() *mesos.TaskInfo { return info } -// Fill the Spec in the T, should be called during k8s scheduling, before binding. -func (t *T) FillFromDetails(details *mesos.Offer) error { - if details == nil { - //programming error - panic("offer details are nil") - } - - // compute used resources - cpu := mresource.PodCPULimit(&t.Pod) - mem := mresource.PodMemLimit(&t.Pod) - log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", details.Id, t.Pod.Namespace, t.Pod.Name, cpu, mem) - - t.Spec = Spec{ - SlaveID: details.GetSlaveId().GetValue(), - AssignedSlave: details.GetHostname(), - CPU: cpu, - Memory: mem, - } - - // fill in port mapping - if mapping, err := t.mapper.Generate(t, details); err != nil { - t.Reset() - return err - } else { - ports := []uint64{} - for _, entry := range mapping { - ports = append(ports, entry.OfferPort) - } - t.Spec.PortMap = mapping - t.Spec.Ports = ports - } - - // hostname needs of the executor needs to match that of the offer, otherwise - // the kubelet node status checker/updater is very unhappy - const HOSTNAME_OVERRIDE_FLAG = "--hostname-override=" - hostname := details.GetHostname() // required field, non-empty - hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname - - argv := t.executor.Command.Arguments - overwrite := false - for i, arg := range argv { - if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) { - overwrite = true - argv[i] = hostnameOverride - break - } - } - if !overwrite { - t.executor.Command.Arguments = append(argv, hostnameOverride) - } - return nil -} - // Clear offer-related details from the task, should be called if/when an offer // has already been assigned to a task but for some reason is no longer valid. func (t *T) Reset() { @@ -211,65 +156,6 @@ func (t *T) Reset() { t.Spec = Spec{} } -func (t *T) AcceptOffer(offer *mesos.Offer) bool { - if offer == nil { - return false - } - - // if the user has specified a target host, make sure this offer is for that host - if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName { - return false - } - - // check the NodeSelector - if len(t.Pod.Spec.NodeSelector) > 0 { - slaveLabels := map[string]string{} - for _, a := range offer.Attributes { - if a.GetType() == mesos.Value_TEXT { - slaveLabels[a.GetName()] = a.GetText().GetValue() - } - } - selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector) - if !selector.Matches(labels.Set(slaveLabels)) { - return false - } - } - - // check ports - if _, err := t.mapper.Generate(t, offer); err != nil { - log.V(3).Info(err) - return false - } - - // find offered cpu and mem - var ( - offeredCpus mresource.CPUShares - offeredMem mresource.MegaBytes - ) - for _, resource := range offer.Resources { - if resource.GetName() == "cpus" { - offeredCpus = mresource.CPUShares(*resource.GetScalar().Value) - } - - if resource.GetName() == "mem" { - offeredMem = mresource.MegaBytes(*resource.GetScalar().Value) - } - } - - // calculate cpu and mem sum over all containers of the pod - // TODO (@sttts): also support pod.spec.resources.limit.request - // TODO (@sttts): take into account the executor resources - cpu := mresource.PodCPULimit(&t.Pod) - mem := mresource.PodMemLimit(&t.Pod) - log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem) - if (cpu > offeredCpus) || (mem > offeredMem) { - log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem) - return false - } - - return true -} - func (t *T) Set(f FlagType) { t.Flags[f] = struct{}{} if Launched == f { diff --git a/contrib/mesos/pkg/scheduler/podtask/pod_task_test.go b/contrib/mesos/pkg/scheduler/podtask/pod_task_test.go index 8bdeb5817ec..9b18e283fae 100644 --- a/contrib/mesos/pkg/scheduler/podtask/pod_task_test.go +++ b/contrib/mesos/pkg/scheduler/podtask/pod_task_test.go @@ -146,10 +146,10 @@ func TestEmptyOffer(t *testing.T) { mresource.LimitPodCPU(&task.Pod, mresource.DefaultDefaultContainerCPULimit) mresource.LimitPodMem(&task.Pod, mresource.DefaultDefaultContainerMemLimit) - if ok := task.AcceptOffer(nil); ok { + if ok := DefaultPredicate(task, nil); ok { t.Fatalf("accepted nil offer") } - if ok := task.AcceptOffer(&mesos.Offer{}); ok { + if ok := DefaultPredicate(task, &mesos.Offer{}); ok { t.Fatalf("accepted empty offer") } } @@ -176,7 +176,7 @@ func TestNoPortsInPodOrOffer(t *testing.T) { mutil.NewScalarResource("mem", 0.001), }, } - if ok := task.AcceptOffer(offer); ok { + if ok := DefaultPredicate(task, offer); ok { t.Fatalf("accepted offer %v:", offer) } @@ -186,7 +186,7 @@ func TestNoPortsInPodOrOffer(t *testing.T) { mutil.NewScalarResource("mem", t_min_mem), }, } - if ok := task.AcceptOffer(offer); !ok { + if ok := DefaultPredicate(task, offer); !ok { t.Fatalf("did not accepted offer %v:", offer) } } @@ -203,7 +203,7 @@ func TestAcceptOfferPorts(t *testing.T) { rangeResource("ports", []uint64{1, 1}), }, } - if ok := task.AcceptOffer(offer); !ok { + if ok := DefaultPredicate(task, offer); !ok { t.Fatalf("did not accepted offer %v:", offer) } @@ -218,17 +218,17 @@ func TestAcceptOfferPorts(t *testing.T) { mresource.LimitPodCPU(&task.Pod, mresource.DefaultDefaultContainerCPULimit) mresource.LimitPodMem(&task.Pod, mresource.DefaultDefaultContainerMemLimit) - if ok := task.AcceptOffer(offer); ok { + if ok := DefaultPredicate(task, offer); ok { t.Fatalf("accepted offer %v:", offer) } pod.Spec.Containers[0].Ports[0].HostPort = 1 - if ok := task.AcceptOffer(offer); !ok { + if ok := DefaultPredicate(task, offer); !ok { t.Fatalf("did not accepted offer %v:", offer) } pod.Spec.Containers[0].Ports[0].HostPort = 0 - if ok := task.AcceptOffer(offer); !ok { + if ok := DefaultPredicate(task, offer); !ok { t.Fatalf("did not accepted offer %v:", offer) } @@ -236,12 +236,12 @@ func TestAcceptOfferPorts(t *testing.T) { mutil.NewScalarResource("cpus", t_min_cpu), mutil.NewScalarResource("mem", t_min_mem), } - if ok := task.AcceptOffer(offer); ok { + if ok := DefaultPredicate(task, offer); ok { t.Fatalf("accepted offer %v:", offer) } pod.Spec.Containers[0].Ports[0].HostPort = 1 - if ok := task.AcceptOffer(offer); ok { + if ok := DefaultPredicate(task, offer); ok { t.Fatalf("accepted offer %v:", offer) } } @@ -297,7 +297,7 @@ func TestNodeSelector(t *testing.T) { }, Attributes: ts.attrs, } - if got, want := task.AcceptOffer(offer), ts.ok; got != want { + if got, want := DefaultPredicate(task, offer), ts.ok; got != want { t.Fatalf("expected acceptance of offer %v for selector %v to be %v, got %v:", want, got, ts.attrs, ts.selector) } } diff --git a/contrib/mesos/pkg/scheduler/podtask/predicate.go b/contrib/mesos/pkg/scheduler/podtask/predicate.go new file mode 100644 index 00000000000..4b46cbb4900 --- /dev/null +++ b/contrib/mesos/pkg/scheduler/podtask/predicate.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podtask + +import ( + log "github.com/golang/glog" + mesos "github.com/mesos/mesos-go/mesosproto" + mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource" + "k8s.io/kubernetes/pkg/labels" +) + +var DefaultPredicate = RequireAllPredicate([]FitPredicate{ + ValidationPredicate, + NodeSelectorPredicate, + PodFitsResourcesPredicate, + PortsPredicate, +}).Fit + +// FitPredicate implementations determine if the given task "fits" into offered Mesos resources. +// Neither the task or offer should be modified. +type FitPredicate func(*T, *mesos.Offer) bool + +type RequireAllPredicate []FitPredicate + +func (f RequireAllPredicate) Fit(t *T, offer *mesos.Offer) bool { + for _, p := range f { + if !p(t, offer) { + return false + } + } + return true +} + +func ValidationPredicate(t *T, offer *mesos.Offer) bool { + return t != nil && offer != nil +} + +func NodeSelectorPredicate(t *T, offer *mesos.Offer) bool { + // if the user has specified a target host, make sure this offer is for that host + if t.Pod.Spec.NodeName != "" && offer.GetHostname() != t.Pod.Spec.NodeName { + return false + } + + // check the NodeSelector + if len(t.Pod.Spec.NodeSelector) > 0 { + slaveLabels := map[string]string{} + for _, a := range offer.Attributes { + if a.GetType() == mesos.Value_TEXT { + slaveLabels[a.GetName()] = a.GetText().GetValue() + } + } + selector := labels.SelectorFromSet(t.Pod.Spec.NodeSelector) + if !selector.Matches(labels.Set(slaveLabels)) { + return false + } + } + return true +} + +func PortsPredicate(t *T, offer *mesos.Offer) bool { + // check ports + if _, err := t.mapper.Generate(t, offer); err != nil { + log.V(3).Info(err) + return false + } + return true +} + +func PodFitsResourcesPredicate(t *T, offer *mesos.Offer) bool { + // find offered cpu and mem + var ( + offeredCpus mresource.CPUShares + offeredMem mresource.MegaBytes + ) + for _, resource := range offer.Resources { + if resource.GetName() == "cpus" { + offeredCpus = mresource.CPUShares(*resource.GetScalar().Value) + } + + if resource.GetName() == "mem" { + offeredMem = mresource.MegaBytes(*resource.GetScalar().Value) + } + } + + // calculate cpu and mem sum over all containers of the pod + // TODO (@sttts): also support pod.spec.resources.limit.request + // TODO (@sttts): take into account the executor resources + cpu := mresource.PodCPULimit(&t.Pod) + mem := mresource.PodMemLimit(&t.Pod) + log.V(4).Infof("trying to match offer with pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem) + if (cpu > offeredCpus) || (mem > offeredMem) { + log.V(3).Infof("not enough resources for pod %v/%v: cpus: %.2f mem: %.2f MB", t.Pod.Namespace, t.Pod.Name, cpu, mem) + return false + } + return true +} diff --git a/contrib/mesos/pkg/scheduler/podtask/procurement.go b/contrib/mesos/pkg/scheduler/podtask/procurement.go new file mode 100644 index 00000000000..566be7b5c6e --- /dev/null +++ b/contrib/mesos/pkg/scheduler/podtask/procurement.go @@ -0,0 +1,152 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package podtask + +import ( + "strings" + + log "github.com/golang/glog" + mesos "github.com/mesos/mesos-go/mesosproto" + mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource" +) + +// NewDefaultProcurement returns the default procurement strategy that combines validation +// and responsible Mesos resource procurement. c and m are resource quantities written into +// k8s api.Pod.Spec's that don't declare resources (all containers in k8s-mesos require cpu +// and memory limits). +func NewDefaultProcurement(c mresource.CPUShares, m mresource.MegaBytes) Procurement { + requireSome := &RequireSomePodResources{ + defaultContainerCPULimit: c, + defaultContainerMemLimit: m, + } + return AllOrNothingProcurement([]Procurement{ + ValidateProcurement, + NodeProcurement, + requireSome.Procure, + PodResourcesProcurement, + PortsProcurement, + }).Procure +} + +// Procurement funcs allocate resources for a task from an offer. +// Both the task and/or offer may be modified. +type Procurement func(*T, *mesos.Offer) error + +// AllOrNothingProcurement provides a convenient wrapper around multiple Procurement +// objectives: the failure of any Procurement in the set results in Procure failing. +// see AllOrNothingProcurement.Procure +type AllOrNothingProcurement []Procurement + +// Procure runs each Procurement in the receiver list. The first Procurement func that +// fails triggers T.Reset() and the error is returned, otherwise returns nil. +func (a AllOrNothingProcurement) Procure(t *T, offer *mesos.Offer) error { + for _, p := range a { + if err := p(t, offer); err != nil { + t.Reset() + return err + } + } + return nil +} + +// ValidateProcurement checks that the offered resources are kosher, and if not panics. +// If things check out ok, t.Spec is cleared and nil is returned. +func ValidateProcurement(t *T, offer *mesos.Offer) error { + if offer == nil { + //programming error + panic("offer details are nil") + } + t.Spec = Spec{} + return nil +} + +// NodeProcurement updates t.Spec in preparation for the task to be launched on the +// slave associated with the offer. +func NodeProcurement(t *T, offer *mesos.Offer) error { + t.Spec.SlaveID = offer.GetSlaveId().GetValue() + t.Spec.AssignedSlave = offer.GetHostname() + + // hostname needs of the executor needs to match that of the offer, otherwise + // the kubelet node status checker/updater is very unhappy + const HOSTNAME_OVERRIDE_FLAG = "--hostname-override=" + hostname := offer.GetHostname() // required field, non-empty + hostnameOverride := HOSTNAME_OVERRIDE_FLAG + hostname + + argv := t.executor.Command.Arguments + overwrite := false + for i, arg := range argv { + if strings.HasPrefix(arg, HOSTNAME_OVERRIDE_FLAG) { + overwrite = true + argv[i] = hostnameOverride + break + } + } + if !overwrite { + t.executor.Command.Arguments = append(argv, hostnameOverride) + } + return nil +} + +type RequireSomePodResources struct { + defaultContainerCPULimit mresource.CPUShares + defaultContainerMemLimit mresource.MegaBytes +} + +func (r *RequireSomePodResources) Procure(t *T, offer *mesos.Offer) error { + // write resource limits into the pod spec which is transferred to the executor. From here + // on we can expect that the pod spec of a task has proper limits for CPU and memory. + // TODO(sttts): For a later separation of the kubelet and the executor also patch the pod on the apiserver + // TODO(jdef): changing the state of t.Pod here feels dirty, especially since we don't use a kosher + // method to clone the api.Pod state in T.Clone(). This needs some love. + if unlimitedCPU := mresource.LimitPodCPU(&t.Pod, r.defaultContainerCPULimit); unlimitedCPU { + log.Warningf("Pod %s/%s without cpu limits is admitted %.2f cpu shares", t.Pod.Namespace, t.Pod.Name, mresource.PodCPULimit(&t.Pod)) + } + if unlimitedMem := mresource.LimitPodMem(&t.Pod, r.defaultContainerMemLimit); unlimitedMem { + log.Warningf("Pod %s/%s without memory limits is admitted %.2f MB", t.Pod.Namespace, t.Pod.Name, mresource.PodMemLimit(&t.Pod)) + } + return nil +} + +// PodResourcesProcurement converts k8s pod cpu and memory resource requirements into +// mesos resource allocations. +func PodResourcesProcurement(t *T, offer *mesos.Offer) error { + // compute used resources + cpu := mresource.PodCPULimit(&t.Pod) + mem := mresource.PodMemLimit(&t.Pod) + + log.V(3).Infof("Recording offer(s) %s/%s against pod %v: cpu: %.2f, mem: %.2f MB", offer.Id, t.Pod.Namespace, t.Pod.Name, cpu, mem) + + t.Spec.CPU = cpu + t.Spec.Memory = mem + return nil +} + +// PortsProcurement convert host port mappings into mesos port resource allocations. +func PortsProcurement(t *T, offer *mesos.Offer) error { + // fill in port mapping + if mapping, err := t.mapper.Generate(t, offer); err != nil { + return err + } else { + ports := []uint64{} + for _, entry := range mapping { + ports = append(ports, entry.OfferPort) + } + t.Spec.PortMap = mapping + t.Spec.Ports = ports + } + return nil +} diff --git a/contrib/mesos/pkg/scheduler/scheduler.go b/contrib/mesos/pkg/scheduler/scheduler.go index 7a2ead5cf6d..0148e0d9368 100644 --- a/contrib/mesos/pkg/scheduler/scheduler.go +++ b/contrib/mesos/pkg/scheduler/scheduler.go @@ -39,7 +39,6 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" - mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/uid" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" @@ -118,19 +117,17 @@ type KubernetesScheduler struct { // and the invoking the pod registry interfaces. // In particular, changes to podtask.T objects are currently guarded by this lock. *sync.RWMutex + PodScheduler // Config related, write-once - schedcfg *schedcfg.Config - executor *mesos.ExecutorInfo - executorGroup uint64 - scheduleFunc PodScheduleFunc - client *client.Client - etcdClient tools.EtcdClient - failoverTimeout float64 // in seconds - reconcileInterval int64 - defaultContainerCPULimit mresource.CPUShares - defaultContainerMemLimit mresource.MegaBytes + schedcfg *schedcfg.Config + executor *mesos.ExecutorInfo + executorGroup uint64 + client *client.Client + etcdClient tools.EtcdClient + failoverTimeout float64 // in seconds + reconcileInterval int64 // Mesos context. @@ -157,33 +154,29 @@ type KubernetesScheduler struct { } type Config struct { - Schedcfg schedcfg.Config - Executor *mesos.ExecutorInfo - ScheduleFunc PodScheduleFunc - Client *client.Client - EtcdClient tools.EtcdClient - FailoverTimeout float64 - ReconcileInterval int64 - ReconcileCooldown time.Duration - DefaultContainerCPULimit mresource.CPUShares - DefaultContainerMemLimit mresource.MegaBytes + Schedcfg schedcfg.Config + Executor *mesos.ExecutorInfo + Scheduler PodScheduler + Client *client.Client + EtcdClient tools.EtcdClient + FailoverTimeout float64 + ReconcileInterval int64 + ReconcileCooldown time.Duration } // New creates a new KubernetesScheduler func New(config Config) *KubernetesScheduler { var k *KubernetesScheduler k = &KubernetesScheduler{ - schedcfg: &config.Schedcfg, - RWMutex: new(sync.RWMutex), - executor: config.Executor, - executorGroup: uid.Parse(config.Executor.ExecutorId.GetValue()).Group(), - scheduleFunc: config.ScheduleFunc, - client: config.Client, - etcdClient: config.EtcdClient, - failoverTimeout: config.FailoverTimeout, - reconcileInterval: config.ReconcileInterval, - defaultContainerCPULimit: config.DefaultContainerCPULimit, - defaultContainerMemLimit: config.DefaultContainerMemLimit, + schedcfg: &config.Schedcfg, + RWMutex: new(sync.RWMutex), + executor: config.Executor, + executorGroup: uid.Parse(config.Executor.ExecutorId.GetValue()).Group(), + PodScheduler: config.Scheduler, + client: config.Client, + etcdClient: config.EtcdClient, + failoverTimeout: config.FailoverTimeout, + reconcileInterval: config.ReconcileInterval, offers: offers.CreateRegistry(offers.RegistryConfig{ Compat: func(o *mesos.Offer) bool { // filter the offers: the executor IDs must not identify a kubelet- diff --git a/contrib/mesos/pkg/scheduler/service/service.go b/contrib/mesos/pkg/scheduler/service/service.go index 541fe47a159..e898fcf72cb 100644 --- a/contrib/mesos/pkg/scheduler/service/service.go +++ b/contrib/mesos/pkg/scheduler/service/service.go @@ -57,6 +57,7 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/ha" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/meta" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/metrics" + "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" mresource "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/resource" "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/uid" "k8s.io/kubernetes/pkg/api" @@ -139,6 +140,8 @@ type SchedulerServer struct { KubeletNetworkPluginName string StaticPodsConfigPath string DockerCfgPath string + ContainPodResources bool + AccountForPodResources bool executable string // path to the binary running this service client *client.Client @@ -170,18 +173,20 @@ func NewSchedulerServer() *SchedulerServer { MinionLogMaxBackups: minioncfg.DefaultLogMaxBackups, MinionLogMaxAgeInDays: minioncfg.DefaultLogMaxAgeInDays, - MesosAuthProvider: sasl.ProviderName, - MesosCgroupPrefix: minioncfg.DefaultCgroupPrefix, - MesosMaster: defaultMesosMaster, - MesosUser: defaultMesosUser, - ReconcileInterval: defaultReconcileInterval, - ReconcileCooldown: defaultReconcileCooldown, - Checkpoint: true, - FrameworkName: defaultFrameworkName, - HA: false, - mux: http.NewServeMux(), - KubeletCadvisorPort: 4194, // copied from github.com/GoogleCloudPlatform/kubernetes/blob/release-0.14/cmd/kubelet/app/server.go - KubeletSyncFrequency: 10 * time.Second, + MesosAuthProvider: sasl.ProviderName, + MesosCgroupPrefix: minioncfg.DefaultCgroupPrefix, + MesosMaster: defaultMesosMaster, + MesosUser: defaultMesosUser, + ReconcileInterval: defaultReconcileInterval, + ReconcileCooldown: defaultReconcileCooldown, + Checkpoint: true, + FrameworkName: defaultFrameworkName, + HA: false, + mux: http.NewServeMux(), + KubeletCadvisorPort: 4194, // copied from github.com/GoogleCloudPlatform/kubernetes/blob/release-0.14/cmd/kubelet/app/server.go + KubeletSyncFrequency: 10 * time.Second, + ContainPodResources: true, + AccountForPodResources: true, } // cache this for later use. also useful in case the original binary gets deleted, e.g. // during upgrades, development deployments, etc. @@ -231,6 +236,8 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) { fs.IPVar(&s.ServiceAddress, "service-address", s.ServiceAddress, "The service portal IP address that the scheduler should register with (if unset, chooses randomly)") fs.Var(&s.DefaultContainerCPULimit, "default-container-cpu-limit", "Containers without a CPU resource limit are admitted this much CPU shares") fs.Var(&s.DefaultContainerMemLimit, "default-container-mem-limit", "Containers without a memory resource limit are admitted this much amount of memory in MB") + fs.BoolVar(&s.ContainPodResources, "contain-pod-resources", s.ContainPodResources, "Reparent pod containers into mesos cgroups; disable if you're having strange mesos/docker/systemd interactions.") + fs.BoolVar(&s.AccountForPodResources, "account-for-pod-resources", s.AccountForPodResources, "Allocate pod CPU and memory resources from offers (Default: true)") fs.IntVar(&s.ExecutorLogV, "executor-logv", s.ExecutorLogV, "Logging verbosity of spawned minion and executor processes.") fs.BoolVar(&s.ExecutorBindall, "executor-bindall", s.ExecutorBindall, "When true will set -address of the executor to 0.0.0.0.") @@ -367,6 +374,7 @@ func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.E ci.Arguments = append(ci.Arguments, fmt.Sprintf("--mesos-cgroup-prefix=%v", s.MesosCgroupPrefix)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--cadvisor-port=%v", s.KubeletCadvisorPort)) ci.Arguments = append(ci.Arguments, fmt.Sprintf("--sync-frequency=%v", s.KubeletSyncFrequency)) + ci.Arguments = append(ci.Arguments, fmt.Sprintf("--contain-pod-resources=%t", s.ContainPodResources)) if s.AuthPath != "" { //TODO(jdef) should probably support non-local files, e.g. hdfs:///some/config/file @@ -651,17 +659,27 @@ func (s *SchedulerServer) bootstrap(hks hyperkube.Interface, sc *schedcfg.Config log.Fatalf("misconfigured etcd: %v", err) } + as := scheduler.NewAllocationStrategy( + podtask.DefaultPredicate, + podtask.NewDefaultProcurement(s.DefaultContainerCPULimit, s.DefaultContainerMemLimit)) + + // downgrade allocation strategy if user disables "account-for-pod-resources" + if !s.AccountForPodResources { + as = scheduler.NewAllocationStrategy( + podtask.DefaultMinimalPredicate, + podtask.DefaultMinimalProcurement) + } + + fcfs := scheduler.NewFCFSPodScheduler(as) mesosPodScheduler := scheduler.New(scheduler.Config{ - Schedcfg: *sc, - Executor: executor, - ScheduleFunc: scheduler.FCFSScheduleFunc, - Client: client, - EtcdClient: etcdClient, - FailoverTimeout: s.FailoverTimeout, - ReconcileInterval: s.ReconcileInterval, - ReconcileCooldown: s.ReconcileCooldown, - DefaultContainerCPULimit: s.DefaultContainerCPULimit, - DefaultContainerMemLimit: s.DefaultContainerMemLimit, + Schedcfg: *sc, + Executor: executor, + Scheduler: fcfs, + Client: client, + EtcdClient: etcdClient, + FailoverTimeout: s.FailoverTimeout, + ReconcileInterval: s.ReconcileInterval, + ReconcileCooldown: s.ReconcileCooldown, }) masterUri := s.MesosMaster diff --git a/contrib/mesos/pkg/scheduler/types.go b/contrib/mesos/pkg/scheduler/types.go index 0b9ba9de017..b529eec318a 100644 --- a/contrib/mesos/pkg/scheduler/types.go +++ b/contrib/mesos/pkg/scheduler/types.go @@ -23,17 +23,29 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/scheduler/podtask" ) -// PodScheduleFunc implements how to schedule pods among slaves. -// We can have different implementation for different scheduling policy. -// -// The Schedule function accepts a group of slaves (each contains offers from -// that slave) and a single pod, which aligns well with the k8s scheduling -// algorithm. It returns an offerId that is acceptable for the pod, otherwise -// nil. The caller is responsible for filling in task state w/ relevant offer -// details. -// -// See the FCFSScheduleFunc for example. -type PodScheduleFunc func(r offers.Registry, slaves SlaveIndex, task *podtask.T) (offers.Perishable, error) +type AllocationStrategy interface { + // FitPredicate returns the selector used to determine pod fitness w/ respect to a given offer + FitPredicate() podtask.FitPredicate + + // Procurement returns a func that obtains resources for a task from resource offer + Procurement() podtask.Procurement +} + +type PodScheduler interface { + AllocationStrategy + + // SchedulePod implements how to schedule pods among slaves. + // We can have different implementation for different scheduling policy. + // + // The function accepts a group of slaves (each contains offers from + // that slave) and a single pod, which aligns well with the k8s scheduling + // algorithm. It returns an offerId that is acceptable for the pod, otherwise + // nil. The caller is responsible for filling in task state w/ relevant offer + // details. + // + // See the FCFSPodScheduler for example. + SchedulePod(r offers.Registry, slaves SlaveIndex, task *podtask.T) (offers.Perishable, error) +} // A minimal placeholder type empty struct{} diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index a746b633c99..a25cf09afb6 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -1,5 +1,6 @@ accept-hosts accept-paths +account-for-pod-resources admission-control admission-control-config-file advertise-address @@ -43,6 +44,7 @@ cluster-name cluster-tag concurrent-endpoint-syncs configure-cbr0 +contain-pod-resources container-port container-runtime cors-allowed-origins @@ -263,4 +265,4 @@ whitelist-override-label www-prefix retry_time file_content_in_loop -cpu-cfs-quota \ No newline at end of file +cpu-cfs-quota From 3574999fa34d54c47f43efd9eaff7e1c571c7910 Mon Sep 17 00:00:00 2001 From: Amy Unruh Date: Mon, 10 Aug 2015 11:00:44 -0700 Subject: [PATCH 044/101] Use GCR images from 'google-samples' project; allow switch on whether dns service is supported, or to use env vars to get service host info. Test change to reflect php filename change. --- examples/guestbook/README.md | 56 ++++++++++++++++--- examples/guestbook/frontend-controller.yaml | 10 +++- examples/guestbook/php-redis/Dockerfile | 13 +++-- examples/guestbook/php-redis/controllers.js | 4 +- .../php-redis/{index.php => guestbook.php} | 20 +++++-- examples/guestbook/php-redis/index.html | 2 +- .../guestbook/redis-slave-controller.yaml | 10 +++- examples/guestbook/redis-slave/run.sh | 6 +- test/e2e/kubectl.go | 2 +- 9 files changed, 97 insertions(+), 26 deletions(-) rename examples/guestbook/php-redis/{index.php => guestbook.php} (58%) diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index c93cbece6b4..f6e71023705 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -136,6 +136,12 @@ redis-master-dz33o 1/1 Running 0 2h (Note that an initial `docker pull` to grab a container image may take a few minutes, depending on network conditions. A pod will be reported as `Pending` while its image is being downloaded.) +`kubectl get pods` will show only the pods in the default [namespace](../../docs/user-guide/namespaces.md). To see pods in all namespaces, run: + +``` +kubectl get pods -o wide --all-namespaces=true +``` + #### Optional Interlude You can get information about a pod, including the machine that it is running on, via `kubectl describe pods/`. E.g., for the redis master, you should see something like the following (your pod name will be different): @@ -256,9 +262,15 @@ Kubernetes supports two primary modes of finding a service— environment variab The services in a Kubernetes cluster are discoverable inside other containers [via environment variables](../../docs/user-guide/services.md#environment-variables). An alternative is to use the [cluster's DNS service](../../docs/user-guide/services.md#dns), if it has been enabled for the cluster. This lets all pods do name resolution of services automatically, based on the service name. -We'll use the DNS service for this example. E.g., you can see the service name, `redis-master`, accessed as a `host` value in the PHP script in [Step 5](#step-five-create-the-frontend-replicated-pods). -**Note**: **If your cluster does not have the DNS service enabled, then this example will not work out of the box.** You will need to edit `examples/guestbook/php-redis/index.php` to use environment variables for service discovery instead, then rebuild the container image from the `Dockerfile` in that directory. (However, this is unlikely to be necessary. You can check for the DNS service in the list of the clusters' services.) +This example has been configured to use the DNS service by default. + +If your cluster does not have the DNS service enabled, then you can use environment variables by setting the +`GET_HOSTS_FROM` env value in both +`examples/guestbook/redis-slave-controller.yaml` and `examples/guestbook/frontend-controller.yaml` +from `dns` to `env` before you start up the app. +(However, this is unlikely to be necessary. You can check for the DNS service in the list of the clusters' services by +running `kubectl --namespace=kube-system get rc`, and looking for a controller prefixed `kube-dns`.) ### Step Three: Fire up the replicated slave pods @@ -291,7 +303,15 @@ spec: spec: containers: - name: worker - image: kubernetes/redis-slave:v2 + image: gcr.io/google_samples/gb-redisslave:v1 + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access an environment variable to find the master + # service's host, comment out the 'value: dns' line above, and + # uncomment the line below. + # value: env ports: - containerPort: 6379 ``` @@ -393,7 +413,15 @@ spec: spec: containers: - name: php-redis - image: gcr.io/google_containers/example-guestbook-php-redis:v3 + image: gcr.io/google_samples/gb-frontend:v2 + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access environment variables to find service host + # info, comment out the 'value: dns' line above, and uncomment the + # line below. + # value: env ports: - containerPort: 80 ``` @@ -435,33 +463,43 @@ redis-slave-iqkhy 1/1 Running 0 2h You should see a single redis master pod, two redis slaves, and three frontend pods. -The code for the PHP server that the frontends are running looks like this: +The code for the PHP server that the frontends are running is in `guestbook/php-redis/guestbook.php`. It looks like this: ```php 'tcp', - 'host' => 'redis-master', + 'host' => $host, 'port' => 6379, ]); $client->set($_GET['key'], $_GET['value']); print('{"message": "Updated"}'); } else { + $host = 'redis-slave'; + if (getenv('GET_HOSTS_FROM') == 'env') { + $host = getenv('REDIS_SLAVE_SERVICE_HOST'); + } $client = new Predis\Client([ 'scheme' => 'tcp', - 'host' => 'redis-slave', + 'host' => $host, 'port' => 6379, ]); diff --git a/examples/guestbook/frontend-controller.yaml b/examples/guestbook/frontend-controller.yaml index c5e666f058a..ae8d24986bc 100644 --- a/examples/guestbook/frontend-controller.yaml +++ b/examples/guestbook/frontend-controller.yaml @@ -15,6 +15,14 @@ spec: spec: containers: - name: php-redis - image: gcr.io/google_containers/example-guestbook-php-redis:v3 + image: gcr.io/google_samples/gb-frontend:v2 + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access environment variables to find service host + # info, comment out the 'value: dns' line above, and uncomment the + # line below. + # value: env ports: - containerPort: 80 diff --git a/examples/guestbook/php-redis/Dockerfile b/examples/guestbook/php-redis/Dockerfile index 3cf7c2cfa20..093cd7cbdc0 100644 --- a/examples/guestbook/php-redis/Dockerfile +++ b/examples/guestbook/php-redis/Dockerfile @@ -1,7 +1,10 @@ -FROM brendanburns/php +FROM php:5-apache -ADD index.php /var/www/index.php -ADD controllers.js /var/www/controllers.js -ADD index.html /var/www/index.html +RUN apt-get update +RUN apt-get install -y php-pear +RUN pear channel-discover pear.nrk.io +RUN pear install nrk/Predis -CMD /run.sh +ADD guestbook.php /var/www/html/guestbook.php +ADD controllers.js /var/www/html/controllers.js +ADD index.html /var/www/html/index.html diff --git a/examples/guestbook/php-redis/controllers.js b/examples/guestbook/php-redis/controllers.js index 48481fd5d46..1e4b5504246 100644 --- a/examples/guestbook/php-redis/controllers.js +++ b/examples/guestbook/php-redis/controllers.js @@ -9,7 +9,7 @@ RedisController.prototype.onRedis = function() { this.scope_.messages.push(this.scope_.msg); this.scope_.msg = ""; var value = this.scope_.messages.join(); - this.http_.get("index.php?cmd=set&key=messages&value=" + value) + this.http_.get("guestbook.php?cmd=set&key=messages&value=" + value) .success(angular.bind(this, function(data) { this.scope_.redisResponse = "Updated."; })); @@ -21,7 +21,7 @@ redisApp.controller('RedisCtrl', function ($scope, $http, $location) { $scope.controller.location_ = $location; $scope.controller.http_ = $http; - $scope.controller.http_.get("index.php?cmd=get&key=messages") + $scope.controller.http_.get("guestbook.php?cmd=get&key=messages") .success(function(data) { console.log(data); $scope.messages = data.data.split(","); diff --git a/examples/guestbook/php-redis/index.php b/examples/guestbook/php-redis/guestbook.php similarity index 58% rename from examples/guestbook/php-redis/index.php rename to examples/guestbook/php-redis/guestbook.php index 18bff077579..2ea63f0a0b0 100644 --- a/examples/guestbook/php-redis/index.php +++ b/examples/guestbook/php-redis/guestbook.php @@ -1,27 +1,37 @@ 'tcp', - 'host' => 'redis-master', + 'host' => $host, 'port' => 6379, ]); - + $client->set($_GET['key'], $_GET['value']); print('{"message": "Updated"}'); } else { + $host = 'redis-slave'; + if (getenv('GET_HOSTS_FROM') == 'env') { + $host = getenv('REDIS_SLAVE_SERVICE_HOST'); + } $client = new Predis\Client([ 'scheme' => 'tcp', - 'host' => 'redis-slave', + 'host' => $host, 'port' => 6379, ]); diff --git a/examples/guestbook/php-redis/index.html b/examples/guestbook/php-redis/index.html index fe457984874..4ffb4ed2ab5 100644 --- a/examples/guestbook/php-redis/index.html +++ b/examples/guestbook/php-redis/index.html @@ -4,7 +4,7 @@ - +
diff --git a/examples/guestbook/redis-slave-controller.yaml b/examples/guestbook/redis-slave-controller.yaml index 74fec7c0250..6e5dde18aa7 100644 --- a/examples/guestbook/redis-slave-controller.yaml +++ b/examples/guestbook/redis-slave-controller.yaml @@ -15,6 +15,14 @@ spec: spec: containers: - name: worker - image: kubernetes/redis-slave:v2 + image: gcr.io/google_samples/gb-redisslave:v1 + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access an environment variable to find the master + # service's host, comment out the 'value: dns' line above, and + # uncomment the line below. + # value: env ports: - containerPort: 6379 diff --git a/examples/guestbook/redis-slave/run.sh b/examples/guestbook/redis-slave/run.sh index bf48f27c015..9f79ccef17a 100755 --- a/examples/guestbook/redis-slave/run.sh +++ b/examples/guestbook/redis-slave/run.sh @@ -14,4 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -redis-server --slaveof redis-master 6379 +if [[ ${GET_HOSTS_FROM:-dns} == "env" ]]; then + redis-server --slaveof ${REDIS_MASTER_SERVICE_HOST} 6379 +else + redis-server --slaveof redis-master 6379 +fi diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 82c649e7d3b..26dd7d8f9d0 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -746,7 +746,7 @@ func makeRequestToGuestbook(c *client.Client, cmd, value string, ns string) (str Namespace(ns). Resource("services"). Name("frontend"). - Suffix("/index.php"). + Suffix("/guestbook.php"). Param("cmd", cmd). Param("key", "messages"). Param("value", value). From 8a2e430b1da4d831c978fad6b62e9a45f2998982 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Thu, 3 Sep 2015 19:17:36 -0700 Subject: [PATCH 045/101] remove some descritpion tags from expapi --- pkg/expapi/v1/types.go | 7 +++++-- pkg/expapi/v1/types_swagger_doc_generated.go | 9 +++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/pkg/expapi/v1/types.go b/pkg/expapi/v1/types.go index 4996b5d2aaf..4d4af72a088 100644 --- a/pkg/expapi/v1/types.go +++ b/pkg/expapi/v1/types.go @@ -343,7 +343,10 @@ type DaemonList struct { type ThirdPartyResourceDataList struct { v1.TypeMeta `json:",inline"` - v1.ListMeta `json:"metadata,omitempty" description:"standard list metadata; see http://docs.k8s.io/api-conventions.md#metadata"` + // Standard list metadata + // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata + v1.ListMeta `json:"metadata,omitempty"` - Items []ThirdPartyResourceData `json:"items" description:"items is a list of third party objects"` + // Items is a list of third party objects + Items []ThirdPartyResourceData `json:"items"` } diff --git a/pkg/expapi/v1/types_swagger_doc_generated.go b/pkg/expapi/v1/types_swagger_doc_generated.go index 5bede15e55a..40497a50a9c 100644 --- a/pkg/expapi/v1/types_swagger_doc_generated.go +++ b/pkg/expapi/v1/types_swagger_doc_generated.go @@ -260,6 +260,15 @@ func (ThirdPartyResourceData) SwaggerDoc() map[string]string { return map_ThirdPartyResourceData } +var map_ThirdPartyResourceDataList = map[string]string{ + "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "items": "Items is a list of third party objects", +} + +func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { + return map_ThirdPartyResourceDataList +} + var map_ThirdPartyResourceList = map[string]string{ "metadata": "Standard list metadata.", "items": "Items is the list of horizontal pod autoscalers.", From 2b7e758c3c279e7f9efc17e17b4c565de5b648f0 Mon Sep 17 00:00:00 2001 From: "Timothy St. Clair" Date: Mon, 31 Aug 2015 12:29:18 -0400 Subject: [PATCH 046/101] Fixes the experimental api, which appeared to be completely broken. Fix for rebase with nikhiljindal/deploymentController --- pkg/client/unversioned/client.go | 6 ++ pkg/client/unversioned/client_test.go | 11 ---- pkg/client/unversioned/daemon_test.go | 12 ++-- pkg/client/unversioned/experimental.go | 4 +- pkg/client/unversioned/helper.go | 7 ++- .../horizontalpodautoscaler_test.go | 12 ++-- .../unversioned/testclient/fake_daemons.go | 2 +- .../unversioned/testclient/testclient.go | 28 +++++++-- pkg/kubectl/cmd/apiversions.go | 8 +-- pkg/kubectl/cmd/util/clientcache.go | 25 -------- pkg/kubectl/cmd/util/factory.go | 59 +++---------------- pkg/kubectl/describe.go | 23 ++++---- pkg/kubectl/stop.go | 2 +- pkg/kubectl/stop_test.go | 2 +- test/integration/framework/master_utils.go | 2 +- 15 files changed, 73 insertions(+), 130 deletions(-) diff --git a/pkg/client/unversioned/client.go b/pkg/client/unversioned/client.go index 778ac013c78..53bf188ede8 100644 --- a/pkg/client/unversioned/client.go +++ b/pkg/client/unversioned/client.go @@ -46,6 +46,7 @@ type Interface interface { PersistentVolumesInterface PersistentVolumeClaimsNamespacer ComponentStatusesInterface + Experimental() ExperimentalInterface } func (c *Client) ReplicationControllers(namespace string) ReplicationControllerInterface { @@ -122,6 +123,7 @@ type APIStatus interface { // Client is the implementation of a Kubernetes client. type Client struct { *RESTClient + *ExperimentalClient } // ServerVersion retrieves and parses the server's version. @@ -192,3 +194,7 @@ func IsTimeout(err error) bool { } return false } + +func (c *Client) Experimental() ExperimentalInterface { + return c.ExperimentalClient +} diff --git a/pkg/client/unversioned/client_test.go b/pkg/client/unversioned/client_test.go index d4100cc9296..5671518704f 100644 --- a/pkg/client/unversioned/client_test.go +++ b/pkg/client/unversioned/client_test.go @@ -54,7 +54,6 @@ type Response struct { type testClient struct { *Client - *ExperimentalClient Request testRequest Response Response Error bool @@ -87,16 +86,6 @@ func (c *testClient) Setup() *testClient { Version: version, }) } - if c.ExperimentalClient == nil { - version := c.Version - if len(version) == 0 { - version = testapi.Version() - } - c.ExperimentalClient = NewExperimentalOrDie(&Config{ - Host: c.server.URL, - Version: version, - }) - } c.QueryValidator = map[string]func(string, string) bool{} return c } diff --git a/pkg/client/unversioned/daemon_test.go b/pkg/client/unversioned/daemon_test.go index 1a30c1b3725..42ee7a75ce1 100644 --- a/pkg/client/unversioned/daemon_test.go +++ b/pkg/client/unversioned/daemon_test.go @@ -55,7 +55,7 @@ func TestListDaemons(t *testing.T) { }, }, } - receivedControllerList, err := c.Setup().Daemons(ns).List(labels.Everything()) + receivedControllerList, err := c.Setup().Experimental().Daemons(ns).List(labels.Everything()) c.Validate(t, receivedControllerList, err) } @@ -80,14 +80,14 @@ func TestGetDaemon(t *testing.T) { }, }, } - receivedController, err := c.Setup().Daemons(ns).Get("foo") + receivedController, err := c.Setup().Experimental().Daemons(ns).Get("foo") c.Validate(t, receivedController, err) } func TestGetDaemonWithNoName(t *testing.T) { ns := api.NamespaceDefault c := &testClient{Error: true} - receivedPod, err := c.Setup().Daemons(ns).Get("") + receivedPod, err := c.Setup().Experimental().Daemons(ns).Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } @@ -118,7 +118,7 @@ func TestUpdateDaemon(t *testing.T) { }, }, } - receivedController, err := c.Setup().Daemons(ns).Update(requestController) + receivedController, err := c.Setup().Experimental().Daemons(ns).Update(requestController) c.Validate(t, receivedController, err) } @@ -128,7 +128,7 @@ func TestDeleteDaemon(t *testing.T) { Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().Daemons(ns).Delete("foo") + err := c.Setup().Experimental().Daemons(ns).Delete("foo") c.Validate(t, nil, err) } @@ -155,6 +155,6 @@ func TestCreateDaemon(t *testing.T) { }, }, } - receivedController, err := c.Setup().Daemons(ns).Create(requestController) + receivedController, err := c.Setup().Experimental().Daemons(ns).Create(requestController) c.Validate(t, receivedController, err) } diff --git a/pkg/client/unversioned/experimental.go b/pkg/client/unversioned/experimental.go index d2fff69f985..bd30ace7d8b 100644 --- a/pkg/client/unversioned/experimental.go +++ b/pkg/client/unversioned/experimental.go @@ -119,9 +119,7 @@ func NewExperimentalOrDie(c *Config) *ExperimentalClient { } func setExperimentalDefaults(config *Config) error { - if config.Prefix == "" { - config.Prefix = "/experimental" - } + config.Prefix = "/experimental" if config.UserAgent == "" { config.UserAgent = DefaultKubernetesUserAgent() } diff --git a/pkg/client/unversioned/helper.go b/pkg/client/unversioned/helper.go index ac09a0924c9..cae4fafc38e 100644 --- a/pkg/client/unversioned/helper.go +++ b/pkg/client/unversioned/helper.go @@ -139,7 +139,12 @@ func New(c *Config) (*Client, error) { if err != nil { return nil, err } - return &Client{client}, nil + experimentalConfig := *c + experimentalClient, err := NewExperimental(&experimentalConfig) + if err != nil { + return nil, err + } + return &Client{RESTClient: client, ExperimentalClient: experimentalClient}, nil } // MatchesServerVersion queries the server to compares the build version diff --git a/pkg/client/unversioned/horizontalpodautoscaler_test.go b/pkg/client/unversioned/horizontalpodautoscaler_test.go index 23ff02d8eed..d1fdd5aa16b 100644 --- a/pkg/client/unversioned/horizontalpodautoscaler_test.go +++ b/pkg/client/unversioned/horizontalpodautoscaler_test.go @@ -49,7 +49,7 @@ func TestHorizontalPodAutoscalerCreate(t *testing.T) { Response: Response{StatusCode: 200, Body: &horizontalPodAutoscaler}, } - response, err := c.Setup().HorizontalPodAutoscalers(ns).Create(&horizontalPodAutoscaler) + response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Create(&horizontalPodAutoscaler) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -74,7 +74,7 @@ func TestHorizontalPodAutoscalerGet(t *testing.T) { Response: Response{StatusCode: 200, Body: horizontalPodAutoscaler}, } - response, err := c.Setup().HorizontalPodAutoscalers(ns).Get("abc") + response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Get("abc") c.Validate(t, response, err) } @@ -99,7 +99,7 @@ func TestHorizontalPodAutoscalerList(t *testing.T) { }, Response: Response{StatusCode: 200, Body: horizontalPodAutoscalerList}, } - response, err := c.Setup().HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) + response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, response, err) } @@ -116,7 +116,7 @@ func TestHorizontalPodAutoscalerUpdate(t *testing.T) { Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: horizontalPodAutoscaler}, } - response, err := c.Setup().HorizontalPodAutoscalers(ns).Update(horizontalPodAutoscaler) + response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Update(horizontalPodAutoscaler) c.Validate(t, response, err) } @@ -126,7 +126,7 @@ func TestHorizontalPodAutoscalerDelete(t *testing.T) { Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().HorizontalPodAutoscalers(ns).Delete("foo", nil) + err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Delete("foo", nil) c.Validate(t, nil, err) } @@ -138,6 +138,6 @@ func TestHorizontalPodAutoscalerWatch(t *testing.T) { Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().HorizontalPodAutoscalers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup().Experimental().HorizontalPodAutoscalers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/testclient/fake_daemons.go b/pkg/client/unversioned/testclient/fake_daemons.go index 382d62f3922..8b0b3bc8014 100644 --- a/pkg/client/unversioned/testclient/fake_daemons.go +++ b/pkg/client/unversioned/testclient/fake_daemons.go @@ -27,7 +27,7 @@ import ( // FakeDaemons implements DaemonInterface. Meant to be embedded into a struct to get a default // implementation. This makes faking out just the method you want to test easier. type FakeDaemons struct { - Fake *Fake + Fake *FakeExperimental Namespace string } diff --git a/pkg/client/unversioned/testclient/testclient.go b/pkg/client/unversioned/testclient/testclient.go index 5814fb5cb1e..2773890482b 100644 --- a/pkg/client/unversioned/testclient/testclient.go +++ b/pkg/client/unversioned/testclient/testclient.go @@ -171,10 +171,6 @@ func (c *Fake) ReplicationControllers(namespace string) client.ReplicationContro return &FakeReplicationControllers{Fake: c, Namespace: namespace} } -func (c *Fake) Daemons(namespace string) client.DaemonInterface { - return &FakeDaemons{Fake: c, Namespace: namespace} -} - func (c *Fake) Nodes() client.NodeInterface { return &FakeNodes{Fake: c} } @@ -219,6 +215,10 @@ func (c *Fake) Namespaces() client.NamespaceInterface { return &FakeNamespaces{Fake: c} } +func (c *Fake) Experimental() client.ExperimentalInterface { + return &FakeExperimental{c} +} + func (c *Fake) ServerVersion() (*version.Info, error) { action := ActionImpl{} action.Verb = "get" @@ -241,3 +241,23 @@ func (c *Fake) ServerAPIVersions() (*api.APIVersions, error) { func (c *Fake) ComponentStatuses() client.ComponentStatusInterface { return &FakeComponentStatuses{Fake: c} } + +type FakeExperimental struct { + *Fake +} + +func (c *FakeExperimental) Daemons(namespace string) client.DaemonInterface { + return &FakeDaemons{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) HorizontalPodAutoscalers(namespace string) client.HorizontalPodAutoscalerInterface { + panic("unimplemented") +} + +func (c *FakeExperimental) Scales(namespace string) client.ScaleInterface { + panic("unimplemented") +} + +func (c *FakeExperimental) Deployments(namespace string) client.DeploymentInterface { + panic("unimplemented") +} diff --git a/pkg/kubectl/cmd/apiversions.go b/pkg/kubectl/cmd/apiversions.go index 032c3b597bb..9694f0e4d7b 100644 --- a/pkg/kubectl/cmd/apiversions.go +++ b/pkg/kubectl/cmd/apiversions.go @@ -58,12 +58,8 @@ func RunApiVersions(f *cmdutil.Factory, w io.Writer) error { } var expAPIVersions *api.APIVersions - showExpVersions := false - expClient, err := f.ExperimentalClient() - if err == nil { - expAPIVersions, err = expClient.ServerAPIVersions() - showExpVersions = err == nil - } + expAPIVersions, err = client.Experimental().ServerAPIVersions() + showExpVersions := (err == nil) fmt.Fprintf(w, "Available Server Api Versions: %#v\n", *apiVersions) if showExpVersions { diff --git a/pkg/kubectl/cmd/util/clientcache.go b/pkg/kubectl/cmd/util/clientcache.go index 49253b7e3da..c43fcca264e 100644 --- a/pkg/kubectl/cmd/util/clientcache.go +++ b/pkg/kubectl/cmd/util/clientcache.go @@ -89,28 +89,3 @@ func (c *ClientCache) ClientForVersion(version string) (*client.Client, error) { c.clients[config.Version] = client return client, nil } - -type ExperimentalClientCache struct { - loader clientcmd.ClientConfig - client *client.ExperimentalClient - err error - init bool -} - -func NewExperimentalClientCache(loader clientcmd.ClientConfig) *ExperimentalClientCache { - return &ExperimentalClientCache{loader: loader} -} - -func (cc *ExperimentalClientCache) Client() (*client.ExperimentalClient, error) { - if cc.init { - return cc.client, cc.err - } - cfg, err := cc.loader.ClientConfig() - if err != nil { - cc.err = err - } else { - cc.client, cc.err = client.NewExperimental(cfg) - } - cc.init = true - return cc.client, cc.err -} diff --git a/pkg/kubectl/cmd/util/factory.go b/pkg/kubectl/cmd/util/factory.go index 4b4c6dea9bc..3c16f49f4a5 100644 --- a/pkg/kubectl/cmd/util/factory.go +++ b/pkg/kubectl/cmd/util/factory.go @@ -17,7 +17,6 @@ limitations under the License. package util import ( - "errors" "flag" "fmt" "io" @@ -57,8 +56,6 @@ type Factory struct { Object func() (meta.RESTMapper, runtime.ObjectTyper) // Returns a client for accessing Kubernetes resources or an error. Client func() (*client.Client, error) - // Returns a client for accessing experimental Kubernetes resources or an error. - ExperimentalClient func() (*client.ExperimentalClient, error) // Returns a client.Config for accessing the Kubernetes server. ClientConfig func() (*client.Config, error) // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended @@ -110,28 +107,7 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { } clients := NewClientCache(clientConfig) - expClients := NewExperimentalClientCache(clientConfig) - noClientErr := errors.New("could not get client") - getBothClients := func(group string, version string) (*client.Client, *client.ExperimentalClient, error) { - switch group { - case "api": - client, err := clients.ClientForVersion(version) - return client, nil, err - - case "experimental": - client, err := clients.ClientForVersion(version) - if err != nil { - return nil, nil, err - } - expClient, err := expClients.Client() - if err != nil { - return nil, nil, err - } - return client, expClient, err - } - return nil, nil, noClientErr - } return &Factory{ clients: clients, flags: flags, @@ -147,30 +123,20 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { Client: func() (*client.Client, error) { return clients.ClientForVersion("") }, - ExperimentalClient: func() (*client.ExperimentalClient, error) { - return expClients.Client() - }, ClientConfig: func() (*client.Config, error) { return clients.ClientConfigForVersion("") }, RESTClient: func(mapping *meta.RESTMapping) (resource.RESTClient, error) { group, err := api.RESTMapper.GroupForResource(mapping.Resource) + client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } switch group { case "api": - client, err := clients.ClientForVersion(mapping.APIVersion) - if err != nil { - return nil, err - } return client.RESTClient, nil case "experimental": - client, err := expClients.Client() - if err != nil { - return nil, err - } - return client.RESTClient, nil + return client.ExperimentalClient.RESTClient, nil } return nil, fmt.Errorf("unable to get RESTClient for resource '%s'", mapping.Resource) }, @@ -179,11 +145,11 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { if err != nil { return nil, err } - client, expClient, err := getBothClients(group, mapping.APIVersion) + client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } - if describer, ok := kubectl.DescriberFor(mapping.Kind, client, expClient); ok { + if describer, ok := kubectl.DescriberFor(group, mapping.Kind, client); ok { return describer, nil } return nil, fmt.Errorf("no description has been implemented for %q", mapping.Kind) @@ -235,26 +201,18 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { return meta.NewAccessor().Labels(object) }, Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) { - group, err := api.RESTMapper.GroupForResource(mapping.Resource) - if err != nil { - return nil, err - } - client, _, err := getBothClients(group, mapping.APIVersion) + client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } return kubectl.ScalerFor(mapping.Kind, kubectl.NewScalerClient(client)) }, Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) { - group, err := api.RESTMapper.GroupForResource(mapping.Resource) + client, err := clients.ClientForVersion(mapping.APIVersion) if err != nil { return nil, err } - client, expClient, err := getBothClients(group, mapping.APIVersion) - if err != nil { - return nil, err - } - return kubectl.ReaperFor(mapping.Kind, client, expClient) + return kubectl.ReaperFor(mapping.Kind, client) }, Validator: func(validate bool) (validation.Schema, error) { if validate { @@ -262,8 +220,7 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory { if err != nil { return nil, err } - expClient, _ := expClients.Client() - return &clientSwaggerSchema{client, expClient, api.Scheme}, nil + return &clientSwaggerSchema{client, client.ExperimentalClient, api.Scheme}, nil } return validation.NullSchema{}, nil }, diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index b1a5ccf9680..d47c2f4fb4b 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -82,12 +82,9 @@ func describerMap(c *client.Client) map[string]Describer { return m } -func expDescriberMap(c *client.Client, exp *client.ExperimentalClient) map[string]Describer { +func expDescriberMap(c *client.Client) map[string]Describer { return map[string]Describer{ - "HorizontalPodAutoscaler": &HorizontalPodAutoscalerDescriber{ - client: c, - experimental: exp, - }, + "HorizontalPodAutoscaler": &HorizontalPodAutoscalerDescriber{c}, } } @@ -104,16 +101,17 @@ func DescribableResources() []string { // Describer returns the default describe functions for each of the standard // Kubernetes types. -func DescriberFor(kind string, c *client.Client, ec *client.ExperimentalClient) (Describer, bool) { +func DescriberFor(group string, kind string, c *client.Client) (Describer, bool) { var f Describer var ok bool - if c != nil { + switch group { + case "api": f, ok = describerMap(c)[kind] + case "experimental": + f, ok = expDescriberMap(c)[kind] } - if !ok && c != nil && ec != nil { - f, ok = expDescriberMap(c, ec)[kind] - } + return f, ok } @@ -1153,12 +1151,11 @@ func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (strin // HorizontalPodAutoscalerDescriber generates information about a horizontal pod autoscaler. type HorizontalPodAutoscalerDescriber struct { - client *client.Client - experimental *client.ExperimentalClient + client *client.Client } func (d *HorizontalPodAutoscalerDescriber) Describe(namespace, name string) (string, error) { - hpa, err := d.experimental.HorizontalPodAutoscalers(namespace).Get(name) + hpa, err := d.client.Experimental().HorizontalPodAutoscalers(namespace).Get(name) if err != nil { return "", err } diff --git a/pkg/kubectl/stop.go b/pkg/kubectl/stop.go index 7cf92bf20d4..bca8ff277d9 100644 --- a/pkg/kubectl/stop.go +++ b/pkg/kubectl/stop.go @@ -52,7 +52,7 @@ func IsNoSuchReaperError(err error) bool { return ok } -func ReaperFor(kind string, c client.Interface, ec *client.ExperimentalClient) (Reaper, error) { +func ReaperFor(kind string, c client.Interface) (Reaper, error) { switch kind { case "ReplicationController": return &ReplicationControllerReaper{c, Interval, Timeout}, nil diff --git a/pkg/kubectl/stop_test.go b/pkg/kubectl/stop_test.go index e4acce66140..705d7def237 100644 --- a/pkg/kubectl/stop_test.go +++ b/pkg/kubectl/stop_test.go @@ -365,7 +365,7 @@ func TestSimpleStop(t *testing.T) { } for _, test := range tests { fake := test.fake - reaper, err := ReaperFor(test.kind, fake, nil) + reaper, err := ReaperFor(test.kind, fake) if err != nil { t.Errorf("unexpected error: %v (%s)", err, test.test) } diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index d2d89b9ca95..31b7dc8275b 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -190,7 +190,7 @@ func RCFromManifest(fileName string) *api.ReplicationController { // StopRC stops the rc via kubectl's stop library func StopRC(rc *api.ReplicationController, restClient *client.Client) error { - reaper, err := kubectl.ReaperFor("ReplicationController", restClient, nil) + reaper, err := kubectl.ReaperFor("ReplicationController", restClient) if err != nil || reaper == nil { return err } From 2684018bb5f754a88df5e5eec514a1fb33d043a2 Mon Sep 17 00:00:00 2001 From: Jerzy Szczepkowski Date: Tue, 1 Sep 2015 13:47:17 +0200 Subject: [PATCH 047/101] Configurations of jenkins e2e cluster and default private e2e cluster made consistent. Configurations of jenkins e2e cluster and default private e2e cluster made consistent. --- cluster/gce/config-test.sh | 6 +++--- hack/jenkins/e2e.sh | 3 --- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 4f3930caf34..5bb1902e2bf 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -18,9 +18,9 @@ # gcloud multiplexing for shared GCE/GKE tests. GCLOUD=gcloud ZONE=${KUBE_GCE_ZONE:-us-central1-b} -MASTER_SIZE=${MASTER_SIZE:-n1-standard-1} -MINION_SIZE=${MINION_SIZE:-n1-standard-1} -NUM_MINIONS=${NUM_MINIONS:-2} +MASTER_SIZE=${MASTER_SIZE:-n1-standard-2} +MINION_SIZE=${MINION_SIZE:-n1-standard-2} +NUM_MINIONS=${NUM_MINIONS:-3} MASTER_DISK_TYPE=pd-ssd MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB} MINION_DISK_TYPE=pd-standard diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 47403915057..c39195f7c85 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -69,9 +69,6 @@ if [[ ${JOB_NAME} =~ ^kubernetes-.*-gce ]]; then KUBERNETES_PROVIDER="gce" : ${E2E_MIN_STARTUP_PODS:="1"} : ${E2E_ZONE:="us-central1-f"} - : ${MASTER_SIZE:="n1-standard-2"} - : ${MINION_SIZE:="n1-standard-2"} - : ${NUM_MINIONS:="3"} : ${NUM_MINIONS_PARALLEL:="6"} # Number of nodes required to run all of the tests in parallel fi From 2b2d6b677d3f2bdb95bafeb2802268a671f82330 Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Thu, 3 Sep 2015 15:27:01 +0200 Subject: [PATCH 048/101] Refactor registry tests to reduce dependency on go-etcd. --- pkg/api/rest/resttest/resttest.go | 3 +- pkg/registry/event/etcd/etcd_test.go | 245 +++---------- .../experimental/controller/etcd/etcd_test.go | 64 ++-- pkg/registry/namespace/etcd/etcd_test.go | 76 ++-- pkg/registry/pod/etcd/etcd_test.go | 85 +---- .../service/allocator/etcd/etcd_test.go | 55 +-- .../service/ipallocator/etcd/etcd_test.go | 57 ++- pkg/registry/service/registry_test.go | 335 ------------------ .../thirdpartyresourcedata/etcd/etcd_test.go | 131 +++---- 9 files changed, 188 insertions(+), 863 deletions(-) delete mode 100644 pkg/registry/service/registry_test.go diff --git a/pkg/api/rest/resttest/resttest.go b/pkg/api/rest/resttest/resttest.go index adf2d7e3e9d..1c1ab3a4759 100644 --- a/pkg/api/rest/resttest/resttest.go +++ b/pkg/api/rest/resttest/resttest.go @@ -23,7 +23,6 @@ import ( "testing" "time" - "github.com/coreos/go-etcd/etcd" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/rest" @@ -544,7 +543,7 @@ func (t *Tester) testDeleteNoGraceful(obj runtime.Object, setFn SetFunc, getFn G func (t *Tester) testDeleteNonExist(obj runtime.Object) { objectMeta := t.getObjectMetaOrFail(obj) - t.withStorageError(&etcd.EtcdError{ErrorCode: tools.EtcdErrorCodeNotFound}, func() { + t.withStorageError(tools.EtcdErrorNotFound, func() { _, err := t.storage.(rest.GracefulDeleter).Delete(t.TestContext(), objectMeta.Name, nil) if err == nil || !errors.IsNotFound(err) { t.Errorf("unexpected error: %v", err) diff --git a/pkg/registry/event/etcd/etcd_test.go b/pkg/registry/event/etcd/etcd_test.go index a3cd46ff2bd..fcc46e8e18d 100644 --- a/pkg/registry/event/etcd/etcd_test.go +++ b/pkg/registry/event/etcd/etcd_test.go @@ -17,20 +17,12 @@ limitations under the License. package etcd import ( - "reflect" "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/testapi" - etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" - "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" ) var testTTL uint64 = 60 @@ -41,199 +33,56 @@ func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { return NewREST(etcdStorage, testTTL), fakeClient } -func TestEventCreate(t *testing.T) { - eventA := &api.Event{ - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Reason: "forTesting", - InvolvedObject: api.ObjectReference{Name: "bar", Namespace: api.NamespaceDefault}, - } - eventB := &api.Event{ - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Reason: "forTesting", - InvolvedObject: api.ObjectReference{Name: "bar", Namespace: api.NamespaceDefault}, - } - - nodeWithEventA := tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), eventA), - ModifiedIndex: 1, - CreatedIndex: 1, - TTL: int64(testTTL), - }, +func validNewEvent(namespace string) *api.Event { + return &api.Event{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: namespace, }, - E: nil, - } - - emptyNode := tools.EtcdResponseWithError{ - R: &etcd.Response{}, - E: tools.EtcdErrorNotFound, - } - - ctx := api.NewDefaultContext() - key := "foo" - path, err := etcdgeneric.NamespaceKeyFunc(ctx, "/events", key) - path = etcdtest.AddPrefix(path) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - table := map[string]struct { - existing tools.EtcdResponseWithError - expect tools.EtcdResponseWithError - toCreate runtime.Object - errOK func(error) bool - }{ - "normal": { - existing: emptyNode, - expect: nodeWithEventA, - toCreate: eventA, - errOK: func(err error) bool { return err == nil }, + Reason: "forTesting", + InvolvedObject: api.ObjectReference{ + Name: "bar", + Namespace: namespace, }, - "preExisting": { - existing: nodeWithEventA, - expect: nodeWithEventA, - toCreate: eventB, - errOK: errors.IsAlreadyExists, - }, - } - - for name, item := range table { - storage, fakeClient := newStorage(t) - fakeClient.Data[path] = item.existing - _, err := storage.Create(ctx, item.toCreate) - if !item.errOK(err) { - t.Errorf("%v: unexpected error: %v", name, err) - } - - // nullify fields set by infrastructure - received := fakeClient.Data[path] - var event api.Event - if err := testapi.Codec().DecodeInto([]byte(received.R.Node.Value), &event); err != nil { - t.Errorf("unexpected error: %v", err) - } - event.ObjectMeta.CreationTimestamp = util.Time{} - event.ObjectMeta.UID = "" - received.R.Node.Value = runtime.EncodeOrDie(testapi.Codec(), &event) - - if e, a := item.expect, received; !reflect.DeepEqual(e, a) { - t.Errorf("%v:\n%s", name, util.ObjectDiff(e, a)) - } } } -func TestEventUpdate(t *testing.T) { - eventA := &api.Event{ - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Reason: "forTesting", - InvolvedObject: api.ObjectReference{Name: "foo", Namespace: api.NamespaceDefault}, - } - eventB := &api.Event{ - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Reason: "for testing again", - InvolvedObject: api.ObjectReference{Name: "foo", Namespace: api.NamespaceDefault}, - } - eventC := &api.Event{ - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault, ResourceVersion: "1"}, - Reason: "for testing again something else", - InvolvedObject: api.ObjectReference{Name: "foo", Namespace: api.NamespaceDefault}, - } - - nodeWithEventA := tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), eventA), - ModifiedIndex: 1, - CreatedIndex: 1, - TTL: int64(testTTL), - }, - }, - E: nil, - } - - nodeWithEventB := tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), eventB), - ModifiedIndex: 1, - CreatedIndex: 1, - TTL: int64(testTTL), - }, - }, - E: nil, - } - - nodeWithEventC := tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), eventC), - ModifiedIndex: 1, - CreatedIndex: 1, - TTL: int64(testTTL), - }, - }, - E: nil, - } - - emptyNode := tools.EtcdResponseWithError{ - R: &etcd.Response{}, - E: tools.EtcdErrorNotFound, - } - - ctx := api.NewDefaultContext() - key := "foo" - path, err := etcdgeneric.NamespaceKeyFunc(ctx, "/events", key) - path = etcdtest.AddPrefix(path) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - - table := map[string]struct { - existing tools.EtcdResponseWithError - expect tools.EtcdResponseWithError - toUpdate runtime.Object - errOK func(error) bool - }{ - "doesNotExist": { - existing: emptyNode, - expect: nodeWithEventA, - toUpdate: eventA, - errOK: func(err error) bool { return err == nil }, - }, - "doesNotExist2": { - existing: emptyNode, - expect: nodeWithEventB, - toUpdate: eventB, - errOK: func(err error) bool { return err == nil }, - }, - "replaceExisting": { - existing: nodeWithEventA, - expect: nodeWithEventC, - toUpdate: eventC, - errOK: func(err error) bool { return err == nil }, - }, - } - - for name, item := range table { - storage, fakeClient := newStorage(t) - fakeClient.Data[path] = item.existing - _, _, err := storage.Update(ctx, item.toUpdate) - if !item.errOK(err) { - t.Errorf("%v: unexpected error: %v", name, err) - } - - // nullify fields set by infrastructure - received := fakeClient.Data[path] - var event api.Event - if err := testapi.Codec().DecodeInto([]byte(received.R.Node.Value), &event); err != nil { - t.Errorf("unexpected error: %v", err) - } - event.ObjectMeta.CreationTimestamp = util.Time{} - event.ObjectMeta.UID = "" - received.R.Node.Value = runtime.EncodeOrDie(testapi.Codec(), &event) - - if e, a := item.expect, received; !reflect.DeepEqual(e, a) { - t.Errorf("%v:\n%s", name, util.ObjectGoPrintDiff(e, a)) - } - } +func TestCreate(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + event := validNewEvent(test.TestNamespace()) + event.ObjectMeta = api.ObjectMeta{} + test.TestCreate( + // valid + event, + // invalid + &api.Event{}, + ) +} + +func TestUpdate(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd).AllowCreateOnUpdate() + test.TestUpdate( + // valid + validNewEvent(test.TestNamespace()), + // valid updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*api.Event) + object.Reason = "forDifferentTesting" + return object + }, + // invalid updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*api.Event) + object.InvolvedObject.Namespace = "different-namespace" + return object + }, + ) +} + +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewEvent(test.TestNamespace())) } diff --git a/pkg/registry/experimental/controller/etcd/etcd_test.go b/pkg/registry/experimental/controller/etcd/etcd_test.go index 3bcb277a315..fa09096ae6f 100644 --- a/pkg/registry/experimental/controller/etcd/etcd_test.go +++ b/pkg/registry/experimental/controller/etcd/etcd_test.go @@ -21,29 +21,17 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/storage" - etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" - - "k8s.io/kubernetes/pkg/expapi" - - "github.com/coreos/go-etcd/etcd" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { - fakeEtcdClient := tools.NewFakeEtcdClient(t) - fakeEtcdClient.TestIndex = true - etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) - return fakeEtcdClient, etcdStorage -} - -func newStorage(t *testing.T) (*RcREST, *ScaleREST, *tools.FakeEtcdClient, storage.Interface) { - fakeEtcdClient, etcdStorage := newEtcdStorage(t) - storage := NewStorage(etcdStorage) - return storage.ReplicationController, storage.Scale, fakeEtcdClient, etcdStorage +func newStorage(t *testing.T) (*ScaleREST, *tools.FakeEtcdClient) { + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + return NewStorage(etcdStorage).Scale, fakeClient } var validPodTemplate = api.PodTemplate{ @@ -90,43 +78,32 @@ var validScale = expapi.Scale{ } func TestGet(t *testing.T) { - expect := &validScale - - fakeEtcdClient, etcdStorage := newEtcdStorage(t) + storage, fakeClient := newStorage(t) + ctx := api.WithNamespace(api.NewContext(), "test") key := etcdtest.AddPrefix("/controllers/test/foo") - fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &validController), - ModifiedIndex: 1, - }, - }, + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &validController), 0); err != nil { + t.Fatalf("unexpected error: %v", err) } - storage := NewStorage(etcdStorage).Scale - obj, err := storage.Get(api.WithNamespace(api.NewContext(), "test"), "foo") + expect := &validScale + obj, err := storage.Get(ctx, "foo") scale := obj.(*expapi.Scale) if err != nil { t.Fatalf("unexpected error: %v", err) } if e, a := expect, scale; !api.Semantic.DeepEqual(e, a) { - t.Errorf("Unexpected scale: %s", util.ObjectDiff(e, a)) + t.Errorf("unexpected scale: %s", util.ObjectDiff(e, a)) } } func TestUpdate(t *testing.T) { - fakeEtcdClient, etcdStorage := newEtcdStorage(t) - storage := NewStorage(etcdStorage).Scale + storage, fakeClient := newStorage(t) + ctx := api.WithNamespace(api.NewContext(), "test") key := etcdtest.AddPrefix("/controllers/test/foo") - fakeEtcdClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &validController), - ModifiedIndex: 1, - }, - }, + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &validController), 0); err != nil { + t.Fatalf("unexpected error: %v", err) } replicas := 12 update := expapi.Scale{ @@ -136,13 +113,12 @@ func TestUpdate(t *testing.T) { }, } - _, _, err := storage.Update(api.WithNamespace(api.NewContext(), "test"), &update) - if err != nil { - t.Fatalf("Unexpected error: %v", err) + if _, _, err := storage.Update(ctx, &update); err != nil { + t.Fatalf("unexpected error: %v", err) } - response, err := fakeEtcdClient.Get(key, false, false) + response, err := fakeClient.Get(key, false, false) if err != nil { - t.Fatalf("Unexpected error: %v", err) + t.Fatalf("unexpected error: %v", err) } var controller api.ReplicationController diff --git a/pkg/registry/namespace/etcd/etcd_test.go b/pkg/registry/namespace/etcd/etcd_test.go index 4112a20e34c..dfe9ed8714d 100644 --- a/pkg/registry/namespace/etcd/etcd_test.go +++ b/pkg/registry/namespace/etcd/etcd_test.go @@ -28,8 +28,6 @@ import ( "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { @@ -62,11 +60,11 @@ func TestCreate(t *testing.T) { } func TestCreateSetsFields(t *testing.T) { - storage, fakeClient := newStorage(t) + storage, _ := newStorage(t) namespace := validNewNamespace() ctx := api.NewContext() _, err := storage.Create(ctx, namespace) - if err != fakeClient.Err { + if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -129,58 +127,46 @@ func TestWatch(t *testing.T) { func TestDeleteNamespaceWithIncompleteFinalizers(t *testing.T) { storage, fakeClient := newStorage(t) - fakeClient.ChangeIndex = 1 - key := etcdtest.AddPrefix("/namespaces/foo") + key := etcdtest.AddPrefix("namespaces/foo") + ctx := api.NewContext() now := util.Now() - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &api.Namespace{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - DeletionTimestamp: &now, - }, - Spec: api.NamespaceSpec{ - Finalizers: []api.FinalizerName{api.FinalizerKubernetes}, - }, - Status: api.NamespaceStatus{Phase: api.NamespaceActive}, - }), - ModifiedIndex: 1, - CreatedIndex: 1, - }, + namespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + DeletionTimestamp: &now, }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{api.FinalizerKubernetes}, + }, + Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } - _, err := storage.Delete(api.NewContext(), "foo", nil) - if err == nil { - t.Fatalf("expected error: %v", err) + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), namespace), 0); err != nil { + t.Fatalf("unexpected error: %v", err) + } + if _, err := storage.Delete(ctx, "foo", nil); err == nil { + t.Errorf("unexpected error: %v", err) } } func TestDeleteNamespaceWithCompleteFinalizers(t *testing.T) { storage, fakeClient := newStorage(t) - fakeClient.ChangeIndex = 1 - key := etcdtest.AddPrefix("/namespaces/foo") + key := etcdtest.AddPrefix("namespaces/foo") + ctx := api.NewContext() now := util.Now() - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &api.Namespace{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - DeletionTimestamp: &now, - }, - Spec: api.NamespaceSpec{ - Finalizers: []api.FinalizerName{}, - }, - Status: api.NamespaceStatus{Phase: api.NamespaceActive}, - }), - ModifiedIndex: 1, - CreatedIndex: 1, - }, + namespace := &api.Namespace{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + DeletionTimestamp: &now, }, + Spec: api.NamespaceSpec{ + Finalizers: []api.FinalizerName{}, + }, + Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } - _, err := storage.Delete(api.NewContext(), "foo", nil) - if err != nil { + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), namespace), 0); err != nil { t.Fatalf("unexpected error: %v", err) } + if _, err := storage.Delete(ctx, "foo", nil); err != nil { + t.Errorf("unexpected error: %v", err) + } } diff --git a/pkg/registry/pod/etcd/etcd_test.go b/pkg/registry/pod/etcd/etcd_test.go index 2559a0e38c8..ee737d6ce1b 100644 --- a/pkg/registry/pod/etcd/etcd_test.go +++ b/pkg/registry/pod/etcd/etcd_test.go @@ -34,8 +34,6 @@ import ( "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" ) func newStorage(t *testing.T) (*REST, *BindingREST, *StatusREST, *tools.FakeEtcdClient) { @@ -256,14 +254,10 @@ func TestResourceLocation(t *testing.T) { ctx := api.NewDefaultContext() for _, tc := range testCases { storage, _, _, fakeClient := newStorage(t) - key, _ := storage.Etcd.KeyFunc(ctx, "foo") + key, _ := storage.KeyFunc(ctx, tc.pod.Name) key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &tc.pod), - }, - }, + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &tc.pod), 0); err != nil { + t.Fatalf("unexpected error: %v", err) } redirector := rest.Redirector(storage) @@ -284,33 +278,6 @@ func TestResourceLocation(t *testing.T) { } } -func TestDeletePod(t *testing.T) { - storage, _, _, fakeClient := newStorage(t) - fakeClient.ChangeIndex = 1 - ctx := api.NewDefaultContext() - key, _ := storage.Etcd.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ - ObjectMeta: api.ObjectMeta{ - Name: "foo", - Namespace: api.NamespaceDefault, - }, - Spec: api.PodSpec{NodeName: "machine"}, - }), - ModifiedIndex: 1, - CreatedIndex: 1, - }, - }, - } - _, err := storage.Delete(api.NewDefaultContext(), "foo", nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - func TestGet(t *testing.T) { storage, _, _, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) @@ -351,12 +318,7 @@ func TestEtcdCreate(t *testing.T) { fakeClient.TestIndex = true key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: nil, - }, - E: tools.EtcdErrorNotFound, - } + fakeClient.ExpectNotFoundGet(key) _, err := storage.Create(ctx, validNewPod()) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -395,12 +357,7 @@ func TestEtcdCreateBindingNoPod(t *testing.T) { key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: nil, - }, - E: tools.EtcdErrorNotFound, - } + fakeClient.ExpectNotFoundGet(key) // Assume that a pod has undergone the following: // - Create (apiserver) // - Schedule (scheduler) @@ -443,12 +400,7 @@ func TestEtcdCreateWithContainersNotFound(t *testing.T) { fakeClient.TestIndex = true key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: nil, - }, - E: tools.EtcdErrorNotFound, - } + fakeClient.ExpectNotFoundGet(key) _, err := storage.Create(ctx, validNewPod()) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -490,12 +442,7 @@ func TestEtcdCreateWithConflict(t *testing.T) { ctx := api.NewDefaultContext() fakeClient.TestIndex = true key, _ := storage.KeyFunc(ctx, "foo") - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: nil, - }, - E: tools.EtcdErrorNotFound, - } + fakeClient.ExpectNotFoundGet(key) _, err := storage.Create(ctx, validNewPod()) if err != nil { @@ -528,12 +475,7 @@ func TestEtcdCreateWithExistingContainers(t *testing.T) { fakeClient.TestIndex = true key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: nil, - }, - E: tools.EtcdErrorNotFound, - } + fakeClient.ExpectNotFoundGet(key) _, err := storage.Create(ctx, validNewPod()) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -564,9 +506,7 @@ func TestEtcdCreateWithExistingContainers(t *testing.T) { } func TestEtcdCreateBinding(t *testing.T) { - storage, bindingStorage, _, fakeClient := newStorage(t) ctx := api.NewDefaultContext() - fakeClient.TestIndex = true testCases := map[string]struct { binding api.Binding @@ -609,14 +549,11 @@ func TestEtcdCreateBinding(t *testing.T) { }, } for k, test := range testCases { + storage, bindingStorage, _, fakeClient := newStorage(t) key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: nil, - }, - E: tools.EtcdErrorNotFound, - } + fakeClient.ExpectNotFoundGet(key) + if _, err := storage.Create(ctx, validNewPod()); err != nil { t.Fatalf("%s: unexpected error: %v", k, err) } diff --git a/pkg/registry/service/allocator/etcd/etcd_test.go b/pkg/registry/service/allocator/etcd/etcd_test.go index 8eaf5d08ad7..e1efedea0b3 100644 --- a/pkg/registry/service/allocator/etcd/etcd_test.go +++ b/pkg/registry/service/allocator/etcd/etcd_test.go @@ -20,32 +20,26 @@ import ( "strings" "testing" - "github.com/coreos/go-etcd/etcd" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/service/allocator" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/storage" - etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { - fakeEtcdClient := tools.NewFakeEtcdClient(t) - fakeEtcdClient.TestIndex = true - etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) - return fakeEtcdClient, etcdStorage +func newStorage(t *testing.T) (*Etcd, *tools.FakeEtcdClient, allocator.Interface) { + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + mem := allocator.NewAllocationMap(100, "rangeSpecValue") + etcd := NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", etcdStorage) + return etcd, fakeClient, mem } -func newStorage(t *testing.T) (*Etcd, allocator.Interface, *tools.FakeEtcdClient) { - fakeEtcdClient, s := newEtcdStorage(t) - - mem := allocator.NewAllocationMap(100, "rangeSpecValue") - etcd := NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", s) - - return etcd, mem, fakeEtcdClient +func validNewRangeAllocation() *api.RangeAllocation { + return &api.RangeAllocation{ + Range: "rangeSpecValue", + } } func key() string { @@ -54,31 +48,18 @@ func key() string { } func TestEmpty(t *testing.T) { - storage, _, ecli := newStorage(t) - ecli.ExpectNotFoundGet(key()) + storage, fakeClient, _ := newStorage(t) + fakeClient.ExpectNotFoundGet(key()) if _, err := storage.Allocate(1); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocation at this time") { t.Fatal(err) } } -func initialObject(ecli *tools.FakeEtcdClient) { - ecli.Data[key()] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - CreatedIndex: 1, - ModifiedIndex: 2, - Value: runtime.EncodeOrDie(testapi.Codec(), &api.RangeAllocation{ - Range: "rangeSpecValue", - }), - }, - }, - E: nil, - } -} - func TestStore(t *testing.T) { - storage, backing, ecli := newStorage(t) - initialObject(ecli) + storage, fakeClient, backing := newStorage(t) + if _, err := fakeClient.Set(key(), runtime.EncodeOrDie(testapi.Codec(), validNewRangeAllocation()), 0); err != nil { + t.Fatalf("unexpected error: %v", err) + } if _, err := storage.Allocate(2); err != nil { t.Fatal(err) @@ -94,7 +75,7 @@ func TestStore(t *testing.T) { t.Fatal("Expected allocation to fail") } - obj := ecli.Data[key()] + obj := fakeClient.Data[key()] if obj.R == nil || obj.R.Node == nil { t.Fatalf("%s is empty: %#v", key(), obj) } @@ -106,7 +87,7 @@ func TestStore(t *testing.T) { if err := storage.storage.Get(key(), allocation, false); err != nil { t.Fatal(err) } - if allocation.ResourceVersion != "1" { + if allocation.ResourceVersion != "2" { t.Fatalf("%#v", allocation) } if allocation.Range != "rangeSpecValue" { diff --git a/pkg/registry/service/ipallocator/etcd/etcd_test.go b/pkg/registry/service/ipallocator/etcd/etcd_test.go index d0cd0c865ae..59e73b999dc 100644 --- a/pkg/registry/service/ipallocator/etcd/etcd_test.go +++ b/pkg/registry/service/ipallocator/etcd/etcd_test.go @@ -21,29 +21,19 @@ import ( "strings" "testing" - "github.com/coreos/go-etcd/etcd" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/service/allocator" allocator_etcd "k8s.io/kubernetes/pkg/registry/service/allocator/etcd" "k8s.io/kubernetes/pkg/registry/service/ipallocator" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/storage" - etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { - fakeEtcdClient := tools.NewFakeEtcdClient(t) - fakeEtcdClient.TestIndex = true - etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) - return fakeEtcdClient, etcdStorage -} - -func newStorage(t *testing.T) (ipallocator.Interface, allocator.Interface, *tools.FakeEtcdClient) { - fakeEtcdClient, etcdStorage := newEtcdStorage(t) +func newStorage(t *testing.T) (*tools.FakeEtcdClient, ipallocator.Interface, allocator.Interface) { + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) _, cidr, err := net.ParseCIDR("192.168.1.0/24") if err != nil { t.Fatal(err) @@ -57,7 +47,14 @@ func newStorage(t *testing.T) (ipallocator.Interface, allocator.Interface, *tool return etcd }) - return storage, backing, fakeEtcdClient + return fakeClient, storage, backing +} + +func validNewRangeAllocation() *api.RangeAllocation { + _, cidr, _ := net.ParseCIDR("192.168.1.0/24") + return &api.RangeAllocation{ + Range: cidr.String(), + } } func key() string { @@ -66,44 +63,30 @@ func key() string { } func TestEmpty(t *testing.T) { - storage, _, ecli := newStorage(t) - ecli.ExpectNotFoundGet(key()) + fakeClient, storage, _ := newStorage(t) + fakeClient.ExpectNotFoundGet(key()) if err := storage.Allocate(net.ParseIP("192.168.1.2")); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocation at this time") { t.Fatal(err) } } func TestErrors(t *testing.T) { - storage, _, _ := newStorage(t) + _, storage, _ := newStorage(t) if err := storage.Allocate(net.ParseIP("192.168.0.0")); err != ipallocator.ErrNotInRange { t.Fatal(err) } } -func initialObject(ecli *tools.FakeEtcdClient) { - _, cidr, _ := net.ParseCIDR("192.168.1.0/24") - ecli.Data[key()] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - CreatedIndex: 1, - ModifiedIndex: 2, - Value: runtime.EncodeOrDie(testapi.Codec(), &api.RangeAllocation{ - Range: cidr.String(), - }), - }, - }, - E: nil, - } -} - func TestStore(t *testing.T) { - storage, r, ecli := newStorage(t) - initialObject(ecli) + fakeClient, storage, backing := newStorage(t) + if _, err := fakeClient.Set(key(), runtime.EncodeOrDie(testapi.Codec(), validNewRangeAllocation()), 0); err != nil { + t.Fatalf("unexpected error: %v", err) + } if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != nil { t.Fatal(err) } - ok, err := r.Allocate(1) + ok, err := backing.Allocate(1) if err != nil { t.Fatal(err) } @@ -114,7 +97,7 @@ func TestStore(t *testing.T) { t.Fatal(err) } - obj := ecli.Data[key()] + obj := fakeClient.Data[key()] if obj.R == nil || obj.R.Node == nil { t.Fatalf("%s is empty: %#v", key(), obj) } diff --git a/pkg/registry/service/registry_test.go b/pkg/registry/service/registry_test.go deleted file mode 100644 index 7453757008f..00000000000 --- a/pkg/registry/service/registry_test.go +++ /dev/null @@ -1,335 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package service - -import ( - "strconv" - "testing" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" - etcdservice "k8s.io/kubernetes/pkg/registry/service/etcd" - "k8s.io/kubernetes/pkg/runtime" - etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" - "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" - "k8s.io/kubernetes/pkg/util" - - "github.com/coreos/go-etcd/etcd" -) - -func NewTestEtcdRegistry(client tools.EtcdClient) (Registry, *etcdservice.REST) { - storage := etcdstorage.NewEtcdStorage(client, testapi.Codec(), etcdtest.PathPrefix()) - rest := etcdservice.NewREST(storage) - registry := NewRegistry(rest) - return registry, rest -} - -func makeTestService(name string) *api.Service { - return &api.Service{ - ObjectMeta: api.ObjectMeta{Name: name, Namespace: "default"}, - Spec: api.ServiceSpec{ - Ports: []api.ServicePort{ - {Name: "port", Protocol: api.ProtocolTCP, Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345)}, - }, - Type: api.ServiceTypeClusterIP, - SessionAffinity: api.ServiceAffinityNone, - }, - } -} - -func TestEtcdListServicesNotFound(t *testing.T) { - fakeClient := tools.NewFakeEtcdClient(t) - registry, rest := NewTestEtcdRegistry(fakeClient) - ctx := api.NewDefaultContext() - key := rest.KeyRootFunc(ctx) - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{}, - E: tools.EtcdErrorNotFound, - } - services, err := registry.ListServices(ctx, labels.Everything(), fields.Everything()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(services.Items) != 0 { - t.Errorf("Unexpected services list: %#v", services) - } -} - -func TestEtcdListServices(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - registry, rest := NewTestEtcdRegistry(fakeClient) - key := rest.KeyRootFunc(ctx) - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Nodes: []*etcd.Node{ - { - Value: runtime.EncodeOrDie(testapi.Codec(), makeTestService("foo")), - }, - { - Value: runtime.EncodeOrDie(testapi.Codec(), makeTestService("bar")), - }, - }, - }, - }, - E: nil, - } - services, err := registry.ListServices(ctx, labels.Everything(), fields.Everything()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(services.Items) != 2 || services.Items[0].Name != "foo" || services.Items[1].Name != "bar" { - t.Errorf("Unexpected service list: %#v", services) - } -} - -func TestEtcdCreateService(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - registry, rest := NewTestEtcdRegistry(fakeClient) - _, err := registry.CreateService(ctx, makeTestService("foo")) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - key, _ := rest.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - resp, err := fakeClient.Get(key, false, false) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - var service api.Service - err = testapi.Codec().DecodeInto([]byte(resp.Node.Value), &service) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if service.Name != "foo" { - t.Errorf("Unexpected service: %#v %s", service, resp.Node.Value) - } -} - -func TestEtcdCreateServiceAlreadyExisting(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - registry, rest := NewTestEtcdRegistry(fakeClient) - key, _ := rest.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), makeTestService("foo")), 0) - _, err := registry.CreateService(ctx, makeTestService("foo")) - if !errors.IsAlreadyExists(err) { - t.Errorf("expected already exists err, got %#v", err) - } -} - -// TestEtcdGetServiceDifferentNamespace ensures same-name services in different namespaces do not clash -func TestEtcdGetServiceDifferentNamespace(t *testing.T) { - fakeClient := tools.NewFakeEtcdClient(t) - registry, rest := NewTestEtcdRegistry(fakeClient) - - ctx1 := api.NewDefaultContext() - ctx2 := api.WithNamespace(api.NewContext(), "other") - - key1, _ := rest.KeyFunc(ctx1, "foo") - key2, _ := rest.KeyFunc(ctx2, "foo") - - key1 = etcdtest.AddPrefix(key1) - key2 = etcdtest.AddPrefix(key2) - - fakeClient.Set(key1, runtime.EncodeOrDie(testapi.Codec(), &api.Service{ObjectMeta: api.ObjectMeta{Namespace: "default", Name: "foo"}}), 0) - fakeClient.Set(key2, runtime.EncodeOrDie(testapi.Codec(), &api.Service{ObjectMeta: api.ObjectMeta{Namespace: "other", Name: "foo"}}), 0) - - service1, err := registry.GetService(ctx1, "foo") - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if service1.Name != "foo" { - t.Errorf("Unexpected service: %#v", service1) - } - if service1.Namespace != "default" { - t.Errorf("Unexpected service: %#v", service1) - } - - service2, err := registry.GetService(ctx2, "foo") - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if service2.Name != "foo" { - t.Errorf("Unexpected service: %#v", service2) - } - if service2.Namespace != "other" { - t.Errorf("Unexpected service: %#v", service2) - } - -} - -func TestEtcdGetService(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - registry, rest := NewTestEtcdRegistry(fakeClient) - key, _ := rest.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), makeTestService("foo")), 0) - service, err := registry.GetService(ctx, "foo") - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if service.Name != "foo" { - t.Errorf("Unexpected service: %#v", service) - } -} - -func TestEtcdGetServiceNotFound(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - registry, rest := NewTestEtcdRegistry(fakeClient) - key, _ := rest.KeyFunc(ctx, "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: nil, - }, - E: tools.EtcdErrorNotFound, - } - _, err := registry.GetService(ctx, "foo") - if !errors.IsNotFound(err) { - t.Errorf("Unexpected error returned: %#v", err) - } -} - -func TestEtcdDeleteService(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - registry, _ := NewTestEtcdRegistry(fakeClient) - key, _ := etcdgeneric.NamespaceKeyFunc(ctx, "/services/specs", "foo") - key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), makeTestService("foo")), 0) - path, _ := etcdgeneric.NamespaceKeyFunc(ctx, "/services/endpoints", "foo") - endpointsKey := etcdtest.AddPrefix(path) - fakeClient.Set(endpointsKey, runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "foo"}}), 0) - - err := registry.DeleteService(ctx, "foo") - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if len(fakeClient.DeletedKeys) != 1 { - t.Errorf("Expected 2 delete, found %#v", fakeClient.DeletedKeys) - } - if fakeClient.DeletedKeys[0] != key { - t.Errorf("Unexpected key: %s, expected %s", fakeClient.DeletedKeys[0], key) - } -} - -func TestEtcdUpdateService(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - fakeClient.TestIndex = true - registry, rest := NewTestEtcdRegistry(fakeClient) - key, _ := rest.KeyFunc(ctx, "uniquefoo") - key = etcdtest.AddPrefix(key) - resp, _ := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), makeTestService("uniquefoo")), 0) - testService := api.Service{ - ObjectMeta: api.ObjectMeta{ - Name: "uniquefoo", - ResourceVersion: strconv.FormatUint(resp.Node.ModifiedIndex, 10), - Labels: map[string]string{ - "baz": "bar", - }, - }, - Spec: api.ServiceSpec{ - Ports: []api.ServicePort{ - {Name: "port", Protocol: api.ProtocolTCP, Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345)}, - }, - Selector: map[string]string{ - "baz": "bar", - }, - SessionAffinity: "None", - Type: api.ServiceTypeClusterIP, - }, - } - _, err := registry.UpdateService(ctx, &testService) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - svc, err := registry.GetService(ctx, "uniquefoo") - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - // Clear modified indices before the equality test. - svc.ResourceVersion = "" - testService.ResourceVersion = "" - if !api.Semantic.DeepEqual(*svc, testService) { - t.Errorf("Unexpected service: got\n %#v\n, wanted\n %#v", svc, testService) - } -} - -func TestEtcdWatchServices(t *testing.T) { - ctx := api.NewDefaultContext() - fakeClient := tools.NewFakeEtcdClient(t) - registry, _ := NewTestEtcdRegistry(fakeClient) - watching, err := registry.WatchServices(ctx, - labels.Everything(), - fields.SelectorFromSet(fields.Set{"name": "foo"}), - "1", - ) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - fakeClient.WaitForWatchCompletion() - - select { - case _, ok := <-watching.ResultChan(): - if !ok { - t.Errorf("watching channel should be open") - } - default: - } - fakeClient.WatchInjectError <- nil - if _, ok := <-watching.ResultChan(); ok { - t.Errorf("watching channel should be closed") - } - watching.Stop() -} - -// TODO We need a test for the compare and swap behavior. This basically requires two things: -// 1) Add a per-operation synchronization channel to the fake etcd client, such that any operation waits on that -// channel, this will enable us to orchestrate the flow of etcd requests in the test. -// 2) We need to make the map from key to (response, error) actually be a [](response, error) and pop -// our way through the responses. That will enable us to hand back multiple different responses for -// the same key. -// Once that infrastructure is in place, the test looks something like: -// Routine #1 Routine #2 -// Read -// Wait for sync on update Read -// Update -// Update -// In the buggy case, this will result in lost data. In the correct case, the second update should fail -// and be retried. diff --git a/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go b/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go index 41d6b98c971..6dc2bd81643 100644 --- a/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go +++ b/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go @@ -20,36 +20,19 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/rest/resttest" - "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/v1" + // Ensure that expapi/v1 package is initialized. + _ "k8s.io/kubernetes/pkg/expapi/v1" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/storage" - etcdstorage "k8s.io/kubernetes/pkg/storage/etcd" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/tools/etcdtest" - - "github.com/coreos/go-etcd/etcd" ) -var scheme *runtime.Scheme -var codec runtime.Codec - -func init() { - // Ensure that expapi/v1 packege is used, so that it will get initialized and register HorizontalPodAutoscaler object. - _ = v1.ThirdPartyResourceData{} -} - -func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient, storage.Interface) { - fakeEtcdClient := tools.NewFakeEtcdClient(t) - fakeEtcdClient.TestIndex = true - etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) - storage := NewREST(etcdStorage, "foo", "bar") - return storage, fakeEtcdClient, etcdStorage +func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + return NewREST(etcdStorage, "foo", "bar"), fakeClient } func validNewThirdPartyResourceData(name string) *expapi.ThirdPartyResourceData { @@ -63,8 +46,8 @@ func validNewThirdPartyResourceData(name string) *expapi.ThirdPartyResourceData } func TestCreate(t *testing.T) { - storage, fakeEtcdClient, _ := newStorage(t) - test := registrytest.New(t, fakeEtcdClient, storage.Etcd) + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) rsrc := validNewThirdPartyResourceData("foo") rsrc.ObjectMeta = api.ObjectMeta{} test.TestCreate( @@ -76,7 +59,7 @@ func TestCreate(t *testing.T) { } func TestUpdate(t *testing.T) { - storage, fakeClient, _ := newStorage(t) + storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) test.TestUpdate( // valid @@ -90,75 +73,41 @@ func TestUpdate(t *testing.T) { ) } -func TestGet(t *testing.T) { - storage, fakeEtcdClient, _ := newStorage(t) - test := resttest.New(t, storage, fakeEtcdClient.SetError) - rsrc := validNewThirdPartyResourceData("foo") - test.TestGet(rsrc) +func TestDelete(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestDelete(validNewThirdPartyResourceData("foo")) } -func TestEmptyList(t *testing.T) { - ctx := api.NewDefaultContext() - registry, fakeClient, _ := newStorage(t) - fakeClient.ChangeIndex = 1 - key := registry.KeyRootFunc(ctx) - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{}, - E: fakeClient.NewError(tools.EtcdErrorCodeNotFound), - } - rsrcList, err := registry.List(ctx, labels.Everything(), fields.Everything()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if len(rsrcList.(*expapi.ThirdPartyResourceDataList).Items) != 0 { - t.Errorf("Unexpected non-zero autoscaler list: %#v", rsrcList) - } - if rsrcList.(*expapi.ThirdPartyResourceDataList).ResourceVersion != "1" { - t.Errorf("Unexpected resource version: %#v", rsrcList) - } +func TestGet(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestGet(validNewThirdPartyResourceData("foo")) } func TestList(t *testing.T) { - ctx := api.NewDefaultContext() - registry, fakeClient, _ := newStorage(t) - fakeClient.ChangeIndex = 1 - key := registry.KeyRootFunc(ctx) - key = etcdtest.AddPrefix(key) - fakeClient.Data[key] = tools.EtcdResponseWithError{ - R: &etcd.Response{ - Node: &etcd.Node{ - Nodes: []*etcd.Node{ - { - Value: runtime.EncodeOrDie(testapi.Codec(), &expapi.ThirdPartyResourceData{ - ObjectMeta: api.ObjectMeta{Name: "foo"}, - }), - }, - { - Value: runtime.EncodeOrDie(testapi.Codec(), &expapi.ThirdPartyResourceData{ - ObjectMeta: api.ObjectMeta{Name: "bar"}, - }), - }, - }, - }, - }, - } - obj, err := registry.List(ctx, labels.Everything(), fields.Everything()) - if err != nil { - t.Fatalf("Unexpected error %v", err) - } - rsrcList := obj.(*expapi.ThirdPartyResourceDataList) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if len(rsrcList.Items) != 2 { - t.Errorf("Unexpected ThirdPartyResourceData list: %#v", rsrcList) - } - if rsrcList.Items[0].Name != "foo" { - t.Errorf("Unexpected ThirdPartyResourceData: %#v", rsrcList.Items[0]) - } - if rsrcList.Items[1].Name != "bar" { - t.Errorf("Unexpected ThirdPartyResourceData: %#v", rsrcList.Items[1]) - } + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestList(validNewThirdPartyResourceData("foo")) +} + +func TestWatch(t *testing.T) { + storage, fakeClient := newStorage(t) + test := registrytest.New(t, fakeClient, storage.Etcd) + test.TestWatch( + validNewThirdPartyResourceData("foo"), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"foo": "bar"}, + }, + // matching fields + []fields.Set{}, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": "foo"}, + }, + ) } From b813ebadee3e4fdafed515df2c6f298ff13d1ef1 Mon Sep 17 00:00:00 2001 From: Piotr Szczesniak Date: Fri, 4 Sep 2015 10:26:19 +0200 Subject: [PATCH 049/101] Revert "GCE tokens behavior to new format" --- cluster/gce/configure-vm.sh | 3 +-- pkg/cloudprovider/providers/gce/gce.go | 3 +-- pkg/cloudprovider/providers/gce/token_source.go | 13 +++++-------- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index c199a6643c5..6e41453649f 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -540,11 +540,10 @@ grains: - kubernetes-master cloud: gce EOF - if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then + if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then cat </etc/gce.conf [global] token-url = ${TOKEN_URL} -token-body = ${TOKEN_BODY} project-id = ${PROJECT_ID} network-name = ${NODE_NETWORK} EOF diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 3829956b98e..fbe172ab23f 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -61,7 +61,6 @@ type GCECloud struct { type Config struct { Global struct { TokenURL string `gcfg:"token-url"` - TokenBody string `gcfg:"token-body"` ProjectID string `gcfg:"project-id"` NetworkName string `gcfg:"network-name"` } @@ -160,7 +159,7 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { } } if cfg.Global.TokenURL != "" { - tokenSource = newAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) + tokenSource = newAltTokenSource(cfg.Global.TokenURL) } } client := oauth2.NewClient(oauth2.NoContext, tokenSource) diff --git a/pkg/cloudprovider/providers/gce/token_source.go b/pkg/cloudprovider/providers/gce/token_source.go index e5e327d03c8..4bf33246ca0 100644 --- a/pkg/cloudprovider/providers/gce/token_source.go +++ b/pkg/cloudprovider/providers/gce/token_source.go @@ -19,7 +19,6 @@ package gce_cloud import ( "encoding/json" "net/http" - "strings" "time" "k8s.io/kubernetes/pkg/util" @@ -60,7 +59,6 @@ func init() { type altTokenSource struct { oauthClient *http.Client tokenURL string - tokenBody string throttle util.RateLimiter } @@ -75,7 +73,7 @@ func (a *altTokenSource) Token() (*oauth2.Token, error) { } func (a *altTokenSource) token() (*oauth2.Token, error) { - req, err := http.NewRequest("POST", a.tokenURL, strings.NewReader(a.tokenBody)) + req, err := http.NewRequest("GET", a.tokenURL, nil) if err != nil { return nil, err } @@ -88,24 +86,23 @@ func (a *altTokenSource) token() (*oauth2.Token, error) { return nil, err } var tok struct { - AccessToken string `json:"accessToken"` - ExpireTime time.Time `json:"expireTime"` + AccessToken string `json:"accessToken"` + ExpiryTimeSeconds int64 `json:"expiryTimeSeconds,string"` } if err := json.NewDecoder(res.Body).Decode(&tok); err != nil { return nil, err } return &oauth2.Token{ AccessToken: tok.AccessToken, - Expiry: tok.ExpireTime, + Expiry: time.Unix(tok.ExpiryTimeSeconds, 0), }, nil } -func newAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { +func newAltTokenSource(tokenURL string) oauth2.TokenSource { client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) a := &altTokenSource{ oauthClient: client, tokenURL: tokenURL, - tokenBody: tokenBody, throttle: util.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst), } return oauth2.ReuseTokenSource(nil, a) From 0a64995b7b6cb2e21610743c4ee599b40e7c9971 Mon Sep 17 00:00:00 2001 From: Paulo Pires Date: Fri, 21 Aug 2015 12:30:11 +0100 Subject: [PATCH 050/101] Revamped Elasticsearch example that now uses an Alpine Linux container with JRE 8u51 and Elasticsearch 1.7.1. Replaced Go discovery mechanism for Elasticsearch discovery plug-in that supports Kubernetes. --- examples/elasticsearch/Dockerfile | 18 - examples/elasticsearch/Makefile | 14 - examples/elasticsearch/README.md | 415 +++++------------- examples/elasticsearch/elasticsearch.yml | 385 ---------------- .../elasticsearch/elasticsearch_discovery.go | 79 ---- examples/elasticsearch/es-rc.yaml | 54 +++ examples/elasticsearch/es-svc.yaml | 17 + examples/elasticsearch/music-rc.yaml | 32 -- examples/elasticsearch/music-service.yaml | 15 - examples/elasticsearch/mytunes-namespace.yaml | 6 - .../production_cluster/README.md | 222 ++++++++++ .../production_cluster/es-client-rc.yaml | 55 +++ .../production_cluster/es-data-rc.yaml | 50 +++ .../production_cluster/es-discovery-svc.yaml | 15 + .../production_cluster/es-master-rc.yaml | 52 +++ .../production_cluster/es-svc.yaml | 16 + .../production_cluster/service-account.yaml | 4 + examples/elasticsearch/run.sh | 24 - examples/elasticsearch/service-account.yaml | 4 + examples/examples_test.go | 6 +- hack/verify-flags/exceptions.txt | 8 +- 21 files changed, 618 insertions(+), 873 deletions(-) delete mode 100644 examples/elasticsearch/Dockerfile delete mode 100644 examples/elasticsearch/Makefile delete mode 100644 examples/elasticsearch/elasticsearch.yml delete mode 100644 examples/elasticsearch/elasticsearch_discovery.go create mode 100644 examples/elasticsearch/es-rc.yaml create mode 100644 examples/elasticsearch/es-svc.yaml delete mode 100644 examples/elasticsearch/music-rc.yaml delete mode 100644 examples/elasticsearch/music-service.yaml delete mode 100644 examples/elasticsearch/mytunes-namespace.yaml create mode 100644 examples/elasticsearch/production_cluster/README.md create mode 100644 examples/elasticsearch/production_cluster/es-client-rc.yaml create mode 100644 examples/elasticsearch/production_cluster/es-data-rc.yaml create mode 100644 examples/elasticsearch/production_cluster/es-discovery-svc.yaml create mode 100644 examples/elasticsearch/production_cluster/es-master-rc.yaml create mode 100644 examples/elasticsearch/production_cluster/es-svc.yaml create mode 100644 examples/elasticsearch/production_cluster/service-account.yaml delete mode 100755 examples/elasticsearch/run.sh create mode 100644 examples/elasticsearch/service-account.yaml diff --git a/examples/elasticsearch/Dockerfile b/examples/elasticsearch/Dockerfile deleted file mode 100644 index fd47488abcc..00000000000 --- a/examples/elasticsearch/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM java:7-jre - -RUN apt-get update && \ - apt-get install -y curl && \ - apt-get clean - -RUN cd / && \ - curl -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.5.2.tar.gz && \ - tar xf elasticsearch-1.5.2.tar.gz && \ - rm elasticsearch-1.5.2.tar.gz - -COPY elasticsearch.yml /elasticsearch-1.5.2/config/elasticsearch.yml -COPY run.sh / -COPY elasticsearch_discovery / - -EXPOSE 9200 9300 - -CMD ["/run.sh"] \ No newline at end of file diff --git a/examples/elasticsearch/Makefile b/examples/elasticsearch/Makefile deleted file mode 100644 index 6da1855353e..00000000000 --- a/examples/elasticsearch/Makefile +++ /dev/null @@ -1,14 +0,0 @@ -.PHONY: elasticsearch_discovery build push all - -TAG = 1.2 - -build: elasticsearch_discovery - docker build -t kubernetes/elasticsearch:$(TAG) . - -push: - docker push kubernetes/elasticsearch:$(TAG) - -elasticsearch_discovery: - go build elasticsearch_discovery.go - -all: elasticsearch_discovery build push diff --git a/examples/elasticsearch/README.md b/examples/elasticsearch/README.md index 6da92054de5..c15c3f763cb 100644 --- a/examples/elasticsearch/README.md +++ b/examples/elasticsearch/README.md @@ -33,206 +33,130 @@ Documentation for other releases can be found at # Elasticsearch for Kubernetes -This directory contains the source for a Docker image that creates an instance -of [Elasticsearch](https://www.elastic.co/products/elasticsearch) 1.5.2 which can -be used to automatically form clusters when used -with [replication controllers](../../docs/user-guide/replication-controller.md). This will not work with the library Elasticsearch image -because multicast discovery will not find the other pod IPs needed to form a cluster. This -image detects other Elasticsearch [pods](../../docs/user-guide/pods.md) running in a specified [namespace](../../docs/user-guide/namespaces.md) with a given -label selector. The detected instances are used to form a list of peer hosts which -are used as part of the unicast discovery mechanism for Elasticsearch. The detection -of the peer nodes is done by a program which communicates with the Kubernetes API -server to get a list of matching Elasticsearch pods. +Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so. +Current Elasticsearch version is `1.7.1`. -Here is an example replication controller specification that creates 4 instances of Elasticsearch. +[A more robust example that follows Elasticsearch best-practices of separating nodes concern is also available](production_cluster/README.md). - +WARNING Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../docs/design/persistent-storage.md). -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - labels: - name: music-db - namespace: mytunes - name: music-db -spec: - replicas: 4 - selector: - name: music-db - template: - metadata: - labels: - name: music-db - spec: - containers: - - name: es - image: kubernetes/elasticsearch:1.2 - env: - - name: "CLUSTER_NAME" - value: "mytunes-db" - - name: "SELECTOR" - value: "name=music-db" - - name: "NAMESPACE" - value: "mytunes" - ports: - - name: es - containerPort: 9200 - - name: es-transport - containerPort: 9300 +## Docker image + +This example uses [this pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes) will not be supported. Feel free to fork to fit your own needs, but mind yourself that you will need to change Kubernetes descriptors accordingly. + +## Deploy + +Let's kickstart our cluster with 1 instance of Elasticsearch. + +``` +kubectl create -f examples/elasticsearch/service-account.yaml +kubectl create -f examples/elasticsearch/es-svc.yaml +kubectl create -f examples/elasticsearch/es-rc.yaml ``` -[Download example](music-rc.yaml) - +Let's see if it worked: -The `CLUSTER_NAME` variable gives a name to the cluster and allows multiple separate clusters to -exist in the same namespace. -The `SELECTOR` variable should be set to a label query that identifies the Elasticsearch -nodes that should participate in this cluster. For our example we specify `name=music-db` to -match all pods that have the label `name` set to the value `music-db`. -The `NAMESPACE` variable identifies the namespace -to be used to search for Elasticsearch pods and this should be the same as the namespace specified -for the replication controller (in this case `mytunes`). - - -Replace `NAMESPACE` with the actual namespace to be used. In this example we shall use -the namespace `mytunes`. - -```yaml -kind: Namespace -apiVersion: v1 -metadata: - name: mytunes - labels: - name: mytunes ``` - -First, let's create the namespace: - -```console -$ kubectl create -f examples/elasticsearch/mytunes-namespace.yaml -namespaces/mytunes -``` - -Now you are ready to create the replication controller which will then create the pods: - -```console -$ kubectl create -f examples/elasticsearch/music-rc.yaml --namespace=mytunes -replicationcontrollers/music-db -``` - -Let's check to see if the replication controller and pods are running: - -```console -$ kubectl get rc,pods --namespace=mytunes -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -music-db es kubernetes/elasticsearch:1.2 name=music-db 4 +$ kubectl get pods NAME READY STATUS RESTARTS AGE -music-db-5p46b 1/1 Running 0 34s -music-db-8re0f 1/1 Running 0 34s -music-db-eq8j0 1/1 Running 0 34s -music-db-uq5px 1/1 Running 0 34s +es-kfymw 1/1 Running 0 7m +kube-dns-p3v1u 3/3 Running 0 19m ``` -It's also useful to have a [service](../../docs/user-guide/services.md) with an load balancer for accessing the Elasticsearch -cluster. - - - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: music-server - namespace: mytunes - labels: - name: music-db -spec: - selector: - name: music-db - ports: - - name: db - port: 9200 - targetPort: es - type: LoadBalancer +``` +$ kubectl logs es-kfymw +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z] +[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ... +[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites [] +[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] +[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized +[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ... +[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]} +[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA +[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master) +[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]} +[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started +[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state ``` -[Download example](music-service.yaml) - +So we have a 1-node Elasticsearch cluster ready to handle some work. -Let's create the service with an external load balancer: +## Scale -```console -$ kubectl create -f examples/elasticsearch/music-service.yaml --namespace=mytunes -services/music-server -``` - -Let's check the status of the service: - -```console -$ kubectl get service --namespace=mytunes -NAME LABELS SELECTOR IP(S) PORT(S) -music-server name=music-db name=music-db 10.0.185.179 9200/TCP +Scaling is as easy as: ``` - -Although this service has an IP address `10.0.185.179` internal to the cluster we don't yet have -an external IP address provisioned. Let's wait a bit and try again... - -```console -$ kubectl get service --namespace=mytunes -NAME LABELS SELECTOR IP(S) PORT(S) -music-server name=music-db name=music-db 10.0.185.179 9200/TCP - 104.197.114.130 +kubectl scale --replicas=3 rc es ``` -Now we have an external IP address `104.197.114.130` available for accessing the service -from outside the cluster. +Did it work? -Let's see what we've got: - -```console -$ kubectl get pods,rc,services --namespace=mytunes +``` +$ kubectl get pods NAME READY STATUS RESTARTS AGE -music-db-5p46b 1/1 Running 0 7m -music-db-8re0f 1/1 Running 0 7m -music-db-eq8j0 1/1 Running 0 7m -music-db-uq5px 1/1 Running 0 7m -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -music-db es kubernetes/elasticsearch:1.2 name=music-db 4 -NAME LABELS SELECTOR IP(S) PORT(S) -music-server name=music-db name=music-db 10.0.185.179 9200/TCP - 104.197.114.130 -NAME TYPE DATA -default-token-gcilu kubernetes.io/service-account-token 2 +es-78e0s 1/1 Running 0 8m +es-kfymw 1/1 Running 0 17m +es-rjmer 1/1 Running 0 8m +kube-dns-p3v1u 3/3 Running 0 30m ``` -This shows 4 instances of Elasticsearch running. After making sure that port 9200 is accessible for this cluster (e.g. using a firewall rule for Google Compute Engine) we can make queries via the service which will be fielded by the matching Elasticsearch pods. +Let's take a look at logs: -```console -$ curl 104.197.114.130:9200 +``` +$ kubectl logs es-kfymw +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z] +[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ... +[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites [] +[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] +[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized +[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ... +[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]} +[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA +[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master) +[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]} +[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started +[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state +[2015-08-30 10:08:02,517][INFO ][cluster.service ] [Hammerhead] added {[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true}]) +[2015-08-30 10:10:10,645][INFO ][cluster.service ] [Hammerhead] added {[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true}]) +``` + +So we have a 3-node Elasticsearch cluster ready to handle more work. + +## Access the service + +*Don't forget* that services in Kubernetes are only acessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now. + +``` +$ kubectl get service elasticsearch +NAME LABELS SELECTOR IP(S) PORT(S) +elasticsearch component=elasticsearch component=elasticsearch 10.100.108.94 9200/TCP + 9300/TCP +``` + +From any host on your cluster (that's running `kube-proxy`), run: + +``` +$ curl 10.100.108.94:9200 +``` + +You should see something similar to the following: + + +```json { "status" : 200, - "name" : "Warpath", - "cluster_name" : "mytunes-db", + "name" : "Hammerhead", + "cluster_name" : "myesdb", "version" : { - "number" : "1.5.2", - "build_hash" : "62ff9868b4c8a0c45860bebb259e21980778ab1c", - "build_timestamp" : "2015-04-27T09:21:06Z", - "build_snapshot" : false, - "lucene_version" : "4.10.4" - }, - "tagline" : "You Know, for Search" -} -$ curl 104.197.114.130:9200 -{ - "status" : 200, - "name" : "Callisto", - "cluster_name" : "mytunes-db", - "version" : { - "number" : "1.5.2", - "build_hash" : "62ff9868b4c8a0c45860bebb259e21980778ab1c", - "build_timestamp" : "2015-04-27T09:21:06Z", + "number" : "1.7.1", + "build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19", + "build_timestamp" : "2015-07-29T09:54:16Z", "build_snapshot" : false, "lucene_version" : "4.10.4" }, @@ -240,128 +164,33 @@ $ curl 104.197.114.130:9200 } ``` -We can query the nodes to confirm that an Elasticsearch cluster has been formed. +Or if you want to check cluster information: -```console -$ curl 104.197.114.130:9200/_nodes?pretty=true + +``` +curl 10.100.108.94:9200/_cluster/health?pretty +``` + +You should see something similar to the following: + +```json { - "cluster_name" : "mytunes-db", - "nodes" : { - "u-KrvywFQmyaH5BulSclsA" : { - "name" : "Jonas Harrow", -... - "discovery" : { - "zen" : { - "ping" : { - "unicast" : { - "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] - }, -... - "name" : "Warpath", -... - "discovery" : { - "zen" : { - "ping" : { - "unicast" : { - "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] - }, -... - "name" : "Callisto", -... - "discovery" : { - "zen" : { - "ping" : { - "unicast" : { - "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] - }, -... - "name" : "Vapor", -... - "discovery" : { - "zen" : { - "ping" : { - "unicast" : { - "hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ] -... + "cluster_name" : "myesdb", + "status" : "green", + "timed_out" : false, + "number_of_nodes" : 3, + "number_of_data_nodes" : 3, + "active_primary_shards" : 0, + "active_shards" : 0, + "relocating_shards" : 0, + "initializing_shards" : 0, + "unassigned_shards" : 0, + "delayed_unassigned_shards" : 0, + "number_of_pending_tasks" : 0, + "number_of_in_flight_fetch" : 0 +} ``` -Let's ramp up the number of Elasticsearch nodes from 4 to 10: - -```console -$ kubectl scale --replicas=10 replicationcontrollers music-db --namespace=mytunes -scaled -$ kubectl get pods --namespace=mytunes -NAME READY STATUS RESTARTS AGE -music-db-0n8rm 0/1 Running 0 9s -music-db-4izba 1/1 Running 0 9s -music-db-5dqes 0/1 Running 0 9s -music-db-5p46b 1/1 Running 0 10m -music-db-8re0f 1/1 Running 0 10m -music-db-eq8j0 1/1 Running 0 10m -music-db-p9ajw 0/1 Running 0 9s -music-db-p9u1k 1/1 Running 0 9s -music-db-rav1q 0/1 Running 0 9s -music-db-uq5px 1/1 Running 0 10m -``` - -Let's check to make sure that these 10 nodes are part of the same Elasticsearch cluster: - -```console -$ curl 104.197.114.130:9200/_nodes?pretty=true | grep name -"cluster_name" : "mytunes-db", - "name" : "Killraven", - "name" : "Killraven", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Tefral the Surveyor", - "name" : "Tefral the Surveyor", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Jonas Harrow", - "name" : "Jonas Harrow", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Warpath", - "name" : "Warpath", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Brute I", - "name" : "Brute I", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Callisto", - "name" : "Callisto", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Vapor", - "name" : "Vapor", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Timeslip", - "name" : "Timeslip", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Magik", - "name" : "Magik", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", - "name" : "Brother Voodoo", - "name" : "Brother Voodoo", - "name" : "mytunes-db" - "vm_name" : "OpenJDK 64-Bit Server VM", - "name" : "eth0", -``` - - [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/README.md?pixel)]() - \ No newline at end of file + diff --git a/examples/elasticsearch/elasticsearch.yml b/examples/elasticsearch/elasticsearch.yml deleted file mode 100644 index ff0237a2eb2..00000000000 --- a/examples/elasticsearch/elasticsearch.yml +++ /dev/null @@ -1,385 +0,0 @@ -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at . -# -# The installation procedure is covered at -# . -# -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. -# -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: -# -#node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. -# -cluster.name: ${CLUSTER_NAME} - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: -# -#node.name: "Franz Kafka" - -# Every node can be configured to allow or deny being eligible as the master, -# and to allow or deny to store the data. -# -# Allow this node to be eligible as a master node (enabled by default): -# -node.master: ${NODE_MASTER} -# -# Allow this node to store data (enabled by default): -# -node.data: ${NODE_DATA} - -# You can exploit these settings to design advanced cluster topologies. -# -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. -# -#node.master: false -#node.data: true -# -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. -# -#node.master: true -#node.data: false -# -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) -# -#node.master: false -#node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as , -# , -# and -# to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: -# -#node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -#node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. -# -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. -# -# See and -# -# for more information. - -# Set the number of shards (splits) of an index (5 by default): -# -#index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): -# -#index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: -# -#index.number_of_shards: 1 -#index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: -# -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. -# -# The "number_of_shards" is a one-time setting for an index. -# -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. -# -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API () to inspect -# the index status. - - -#################################### Paths #################################### - -# Path to directory containing configuration (this file and logging.yml): -# -#path.conf: /path/to/conf - -# Path to directory where to store index data allocated for this node. -# -#path.data: /path/to/data -# -# Can optionally include more than one location, causing data to be striped across -# the locations (a la RAID 0) on a file level, favouring locations with most free -# space on creation. For example: -# -#path.data: /path/to/data1,/path/to/data2 - -# Path to temporary files: -# -#path.work: /path/to/work - -# Path to log files: -# -#path.logs: /path/to/logs - -# Path to where plugins are installed: -# -#path.plugins: /path/to/plugins - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. -# -#plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. -# -# Set this property to true to lock the memory: -# -#bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. -# -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): -# -#network.bind_host: 192.168.0.1 - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. -# -#network.publish_host: 192.168.0.1 - -# Set both 'bind_host' and 'publish_host': -# -#network.host: 192.168.0.1 - -# Set a custom port for the node to node communication (9300 by default): -# -transport.tcp.port: ${TRANSPORT_PORT} - -# Enable compression for all communication between nodes (disabled by default): -# -#transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: -# -http.port: ${HTTP_PORT} - -# Set a custom allowed content length: -# -#http.max_content_length: 100mb - -# Disable HTTP completely: -# -#http.enabled: false - - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# There are several types of gateway implementations. For more information, see -# . - -# The default gateway type is the "local" gateway (recommended): -# -#gateway.type: local - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: -# -#gateway.recover_after_nodes: 1 - -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): -# -#gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): -# -#gateway.expected_nodes: 2 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: -# -# 1. During the initial recovery -# -#cluster.routing.allocation.node_initial_primaries_recoveries: 4 -# -# 2. During adding/removing nodes, rebalancing, etc -# -#cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): -# -#indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: -# -#indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. This should be set to a quorum/majority of -# the master-eligible nodes in the cluster. -# -#discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: -# -#discovery.zen.ping.timeout: 3s - -# For more information, see -# - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. -# -# 1. Disable multicast discovery (enabled by default): -# -discovery.zen.ping.multicast.enabled: ${MULTICAST} -# -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: -# -#discovery.zen.ping.unicast.hosts: ${UNICAST_HOSTS} - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. -# -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# -# -# See -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see . - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see . - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s - -################################## Security ################################ - -# Uncomment if you want to enable JSONP as a valid return transport on the -# http server. With this enabled, it may pose a security risk, so disabling -# it unless you need it is recommended (it is disabled by default). -# -#http.jsonp.enable: true diff --git a/examples/elasticsearch/elasticsearch_discovery.go b/examples/elasticsearch/elasticsearch_discovery.go deleted file mode 100644 index 80823fd2dc3..00000000000 --- a/examples/elasticsearch/elasticsearch_discovery.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "fmt" - "strings" - "time" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" - client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" -) - -var ( - namespace = flag.String("namespace", api.NamespaceDefault, "The namespace containing Elasticsearch pods") - selector = flag.String("selector", "", "Selector (label query) for selecting Elasticsearch pods") -) - -func main() { - flag.Parse() - glog.Info("Elasticsearch discovery") - - glog.Infof("Namespace: %q", *namespace) - glog.Infof("selector: %q", *selector) - - c, err := client.NewInCluster() - if err != nil { - glog.Fatalf("Failed to make client: %v", err) - } - - l, err := labels.Parse(*selector) - if err != nil { - glog.Fatalf("Failed to parse selector %q: %v", *selector, err) - } - pods, err := c.Pods(*namespace).List(l, fields.Everything()) - if err != nil { - glog.Fatalf("Failed to list pods: %v", err) - } - - glog.Infof("Elasticsearch pods in namespace %s with selector %q", *namespace, *selector) - podIPs := []string{} - for i := range pods.Items { - p := &pods.Items[i] - for attempt := 0; attempt < 10; attempt++ { - glog.Infof("%d: %s PodIP: %s", i, p.Name, p.Status.PodIP) - if p.Status.PodIP != "" { - podIPs = append(podIPs, fmt.Sprintf(`"%s"`, p.Status.PodIP)) - break - } - time.Sleep(1 * time.Second) - p, err = c.Pods(*namespace).Get(p.Name) - if err != nil { - glog.Warningf("Failed to get pod %s: %v", p.Name, err) - } - } - if p.Status.PodIP == "" { - glog.Warningf("Failed to obtain PodIP for %s", p.Name) - } - } - fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(podIPs, ", ")) -} diff --git a/examples/elasticsearch/es-rc.yaml b/examples/elasticsearch/es-rc.yaml new file mode 100644 index 00000000000..25b9652d0a0 --- /dev/null +++ b/examples/elasticsearch/es-rc.yaml @@ -0,0 +1,54 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: es + labels: + component: elasticsearch +spec: + replicas: 1 + selector: + component: elasticsearch + template: + metadata: + labels: + component: elasticsearch + spec: + serviceAccount: elasticsearch + containers: + - name: es + securityContext: + capabilities: + add: + - IPC_LOCK + image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4 + env: + - name: KUBERNETES_CA_CERTIFICATE_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "CLUSTER_NAME" + value: "myesdb" + - name: "DISCOVERY_SERVICE" + value: "elasticsearch" + - name: NODE_MASTER + value: "true" + - name: NODE_DATA + value: "true" + - name: HTTP_ENABLE + value: "true" + ports: + - containerPort: 9200 + name: http + protocol: TCP + - containerPort: 9300 + name: transport + protocol: TCP + volumeMounts: + - mountPath: /data + name: storage + volumes: + - name: storage + source: + emptyDir: {} diff --git a/examples/elasticsearch/es-svc.yaml b/examples/elasticsearch/es-svc.yaml new file mode 100644 index 00000000000..3a5dd45649d --- /dev/null +++ b/examples/elasticsearch/es-svc.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch + labels: + component: elasticsearch +spec: + type: LoadBalancer + selector: + component: elasticsearch + ports: + - name: http + port: 9200 + protocol: TCP + - name: transport + port: 9300 + protocol: TCP diff --git a/examples/elasticsearch/music-rc.yaml b/examples/elasticsearch/music-rc.yaml deleted file mode 100644 index 1c30d7fe9fa..00000000000 --- a/examples/elasticsearch/music-rc.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - labels: - name: music-db - namespace: mytunes - name: music-db -spec: - replicas: 4 - selector: - name: music-db - template: - metadata: - labels: - name: music-db - spec: - containers: - - name: es - image: kubernetes/elasticsearch:1.2 - env: - - name: "CLUSTER_NAME" - value: "mytunes-db" - - name: "SELECTOR" - value: "name=music-db" - - name: "NAMESPACE" - value: "mytunes" - ports: - - name: es - containerPort: 9200 - - name: es-transport - containerPort: 9300 - diff --git a/examples/elasticsearch/music-service.yaml b/examples/elasticsearch/music-service.yaml deleted file mode 100644 index f71a5766f76..00000000000 --- a/examples/elasticsearch/music-service.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: music-server - namespace: mytunes - labels: - name: music-db -spec: - selector: - name: music-db - ports: - - name: db - port: 9200 - targetPort: es - type: LoadBalancer diff --git a/examples/elasticsearch/mytunes-namespace.yaml b/examples/elasticsearch/mytunes-namespace.yaml deleted file mode 100644 index d69f4db6cb5..00000000000 --- a/examples/elasticsearch/mytunes-namespace.yaml +++ /dev/null @@ -1,6 +0,0 @@ -kind: Namespace -apiVersion: v1 -metadata: - name: mytunes - labels: - name: mytunes diff --git a/examples/elasticsearch/production_cluster/README.md b/examples/elasticsearch/production_cluster/README.md new file mode 100644 index 00000000000..11abd839f77 --- /dev/null +++ b/examples/elasticsearch/production_cluster/README.md @@ -0,0 +1,222 @@ + + + + +WARNING +WARNING +WARNING +WARNING +WARNING + +

PLEASE NOTE: This document applies to the HEAD of the source tree

+ +If you are using a released version of Kubernetes, you should +refer to the docs that go with that version. + + +The latest 1.0.x release of this document can be found +[here](http://releases.k8s.io/release-1.0/examples/elasticsearch/production_cluster/README.md). + +Documentation for other releases can be found at +[releases.k8s.io](http://releases.k8s.io). + +-- + + + + + +# Elasticsearch for Kubernetes + +Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so. +Current Elasticsearch version is `1.7.1`. + +Before we start, one needs to know that Elasticsearch best-practices recommend to separate nodes in three roles: +* `Master` nodes - intended for clustering management only, no data, no HTTP API +* `Client` nodes - intended for client usage, no data, with HTTP API +* `Data` nodes - intended for storing and indexing your data, no HTTP API + +This is enforced throughout this document. + +WARNING Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../../docs/design/persistent-storage.md). + +## Docker image + +This example uses [this pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes) will not be supported. Feel free to fork to fit your own needs, but mind yourself that you will need to change Kubernetes descriptors accordingly. + +## Deploy + +``` +kubectl create -f examples/elasticsearch/production_cluster/service-account.yaml +kubectl create -f examples/elasticsearch/production_cluster/es-discovery-svc.yaml +kubectl create -f examples/elasticsearch/production_cluster/es-svc.yaml +kubectl create -f examples/elasticsearch/production_cluster/es-master-rc.yaml +``` + +Wait until `es-master` is provisioned, and + +``` +kubectl create -f examples/elasticsearch/production_cluster/es-client-rc.yaml +``` + +Wait until `es-client` is provisioned, and + +``` +kubectl create -f examples/elasticsearch/production_cluster/es-data-rc.yaml +``` + +Wait until `es-data` is provisioned. + +Now, I leave up to you how to validate the cluster, but a first step is to wait for containers to be in ```RUNNING``` state and check the Elasticsearch master logs: + +``` +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +es-client-2ep9o 1/1 Running 0 2m +es-data-r9tgv 1/1 Running 0 1m +es-master-vxl6c 1/1 Running 0 6m +``` + +``` +$ kubectl logs es-master-vxl6c +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z] +[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ... +[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites [] +[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] +[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized +[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ... +[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]} +[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ +[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master) +[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started +[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state +[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}]) +[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}]) +``` + +As you can assert, the cluster is up and running. Easy, wasn't it? + +## Scale + +Scaling each type of node to handle your cluster is as easy as: + +``` +kubectl scale --replicas=3 rc es-master +kubectl scale --replicas=2 rc es-client +kubectl scale --replicas=2 rc es-data +``` + +Did it work? + +``` +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +es-client-2ep9o 1/1 Running 0 4m +es-client-ye5s1 1/1 Running 0 50s +es-data-8az22 1/1 Running 0 47s +es-data-r9tgv 1/1 Running 0 3m +es-master-57h7k 1/1 Running 0 52s +es-master-kuwse 1/1 Running 0 52s +es-master-vxl6c 1/1 Running 0 8m +``` + +Let's take another look of the Elasticsearch master logs: + +``` +$ kubectl logs es-master-vxl6c +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender. +[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z] +[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ... +[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites [] +[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4] +[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized +[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ... +[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]} +[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ +[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master) +[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started +[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state +[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}]) +[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}]) +[2015-08-21 11:04:40,781][INFO ][cluster.service ] [Arc] added {[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true}]) +[2015-08-21 11:04:41,076][INFO ][cluster.service ] [Arc] added {[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true}]) +[2015-08-21 11:04:53,966][INFO ][cluster.service ] [Arc] added {[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false}]) +[2015-08-21 11:04:56,803][INFO ][cluster.service ] [Arc] added {[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false}]) +``` + +## Access the service + +*Don't forget* that services in Kubernetes are only acessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now. + +``` +$ kubectl get service elasticsearch +NAME LABELS SELECTOR IP(S) PORT(S) +elasticsearch component=elasticsearch,role=client component=elasticsearch,role=client 10.100.134.2 9200/TCP +``` + +From any host on your cluster (that's running `kube-proxy`), run: + +``` +curl http://10.100.134.2:9200 +``` + +You should see something similar to the following: + + +```json +{ + "status" : 200, + "name" : "Cagliostro", + "cluster_name" : "myesdb", + "version" : { + "number" : "1.7.1", + "build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19", + "build_timestamp" : "2015-07-29T09:54:16Z", + "build_snapshot" : false, + "lucene_version" : "4.10.4" + }, + "tagline" : "You Know, for Search" +} +``` + +Or if you want to check cluster information: + + +``` +curl http://10.100.134.2:9200/_cluster/health?pretty +``` + +You should see something similar to the following: + +```json +{ + "cluster_name" : "myesdb", + "status" : "green", + "timed_out" : false, + "number_of_nodes" : 7, + "number_of_data_nodes" : 2, + "active_primary_shards" : 0, + "active_shards" : 0, + "relocating_shards" : 0, + "initializing_shards" : 0, + "unassigned_shards" : 0, + "delayed_unassigned_shards" : 0, + "number_of_pending_tasks" : 0, + "number_of_in_flight_fetch" : 0 +} +``` + + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/production_cluster/README.md?pixel)]() + diff --git a/examples/elasticsearch/production_cluster/es-client-rc.yaml b/examples/elasticsearch/production_cluster/es-client-rc.yaml new file mode 100644 index 00000000000..227dce16a1b --- /dev/null +++ b/examples/elasticsearch/production_cluster/es-client-rc.yaml @@ -0,0 +1,55 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: es-client + labels: + component: elasticsearch + role: client +spec: + replicas: 1 + selector: + component: elasticsearch + role: client + template: + metadata: + labels: + component: elasticsearch + role: client + spec: + serviceAccount: elasticsearch + containers: + - name: es-client + securityContext: + capabilities: + add: + - IPC_LOCK + image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4 + env: + - name: KUBERNETES_CA_CERTIFICATE_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "CLUSTER_NAME" + value: "myesdb" + - name: NODE_MASTER + value: "false" + - name: NODE_DATA + value: "false" + - name: HTTP_ENABLE + value: "true" + ports: + - containerPort: 9200 + name: http + protocol: TCP + - containerPort: 9300 + name: transport + protocol: TCP + volumeMounts: + - mountPath: /data + name: storage + volumes: + - name: storage + source: + emptyDir: {} diff --git a/examples/elasticsearch/production_cluster/es-data-rc.yaml b/examples/elasticsearch/production_cluster/es-data-rc.yaml new file mode 100644 index 00000000000..7cd099ead6b --- /dev/null +++ b/examples/elasticsearch/production_cluster/es-data-rc.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: es-data + labels: + component: elasticsearch + role: data +spec: + replicas: 1 + selector: + component: elasticsearch + role: data + template: + metadata: + labels: + component: elasticsearch + role: data + spec: + serviceAccount: elasticsearch + containers: + - name: es-data + securityContext: + capabilities: + add: + - IPC_LOCK + image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4 + env: + - name: KUBERNETES_CA_CERTIFICATE_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "CLUSTER_NAME" + value: "myesdb" + - name: NODE_MASTER + value: "false" + - name: HTTP_ENABLE + value: "false" + ports: + - containerPort: 9300 + name: transport + protocol: TCP + volumeMounts: + - mountPath: /data + name: storage + volumes: + - name: storage + source: + emptyDir: {} diff --git a/examples/elasticsearch/production_cluster/es-discovery-svc.yaml b/examples/elasticsearch/production_cluster/es-discovery-svc.yaml new file mode 100644 index 00000000000..cfdc5daa255 --- /dev/null +++ b/examples/elasticsearch/production_cluster/es-discovery-svc.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch-discovery + labels: + component: elasticsearch + role: master +spec: + selector: + component: elasticsearch + role: master + ports: + - name: transport + port: 9300 + protocol: TCP diff --git a/examples/elasticsearch/production_cluster/es-master-rc.yaml b/examples/elasticsearch/production_cluster/es-master-rc.yaml new file mode 100644 index 00000000000..dfa474aa760 --- /dev/null +++ b/examples/elasticsearch/production_cluster/es-master-rc.yaml @@ -0,0 +1,52 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: es-master + labels: + component: elasticsearch + role: master +spec: + replicas: 1 + selector: + component: elasticsearch + role: master + template: + metadata: + labels: + component: elasticsearch + role: master + spec: + serviceAccount: elasticsearch + containers: + - name: es-master + securityContext: + capabilities: + add: + - IPC_LOCK + image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4 + env: + - name: KUBERNETES_CA_CERTIFICATE_FILE + value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: "CLUSTER_NAME" + value: "myesdb" + - name: NODE_MASTER + value: "true" + - name: NODE_DATA + value: "false" + - name: HTTP_ENABLE + value: "false" + ports: + - containerPort: 9300 + name: transport + protocol: TCP + volumeMounts: + - mountPath: /data + name: storage + volumes: + - name: storage + source: + emptyDir: {} diff --git a/examples/elasticsearch/production_cluster/es-svc.yaml b/examples/elasticsearch/production_cluster/es-svc.yaml new file mode 100644 index 00000000000..03bc4efda79 --- /dev/null +++ b/examples/elasticsearch/production_cluster/es-svc.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch + labels: + component: elasticsearch + role: client +spec: + type: LoadBalancer + selector: + component: elasticsearch + role: client + ports: + - name: http + port: 9200 + protocol: TCP diff --git a/examples/elasticsearch/production_cluster/service-account.yaml b/examples/elasticsearch/production_cluster/service-account.yaml new file mode 100644 index 00000000000..7b7b80b2009 --- /dev/null +++ b/examples/elasticsearch/production_cluster/service-account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: elasticsearch diff --git a/examples/elasticsearch/run.sh b/examples/elasticsearch/run.sh deleted file mode 100755 index 77a9c9ccb6b..00000000000 --- a/examples/elasticsearch/run.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# Copyright 2015 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -export CLUSTER_NAME=${CLUSTER_NAME:-elasticsearch-default} -export NODE_MASTER=${NODE_MASTER:-true} -export NODE_DATA=${NODE_DATA:-true} -export MULTICAST=${MULTICAST:-false} -/elasticsearch_discovery --namespace="${NAMESPACE}" --selector="${SELECTOR}" >> /elasticsearch-1.5.2/config/elasticsearch.yml -export HTTP_PORT=${HTTP_PORT:-9200} -export TRANSPORT_PORT=${TRANSPORT_PORT:-9300} -/elasticsearch-1.5.2/bin/elasticsearch diff --git a/examples/elasticsearch/service-account.yaml b/examples/elasticsearch/service-account.yaml new file mode 100644 index 00000000000..7b7b80b2009 --- /dev/null +++ b/examples/elasticsearch/service-account.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: elasticsearch diff --git a/examples/examples_test.go b/examples/examples_test.go index f7d957c9d58..de30f42f92a 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -237,9 +237,9 @@ func TestExampleObjectSchemas(t *testing.T) { "dapi-pod": &api.Pod{}, }, "../examples/elasticsearch": { - "mytunes-namespace": &api.Namespace{}, - "music-rc": &api.ReplicationController{}, - "music-service": &api.Service{}, + "es-rc": &api.ReplicationController{}, + "es-svc": &api.Service{}, + "service-account": nil, }, "../examples/explorer": { "pod": &api.Pod{}, diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 701941273c6..2389d196559 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -63,10 +63,10 @@ docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js:var docs/getting-started-guides/logging-elasticsearch.md: "cluster_name" : "kubernetes-logging", docs/user-guide/accessing-the-cluster.md: "cluster_name" : "kubernetes_logging", examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname) -examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", -examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", -examples/elasticsearch/README.md: "cluster_name" : "mytunes-db", -examples/elasticsearch/README.md:"cluster_name" : "mytunes-db", +examples/elasticsearch/README.md: "cluster_name" : "myesdb", +examples/elasticsearch/README.md: "cluster_name" : "myesdb", +examples/elasticsearch/production_cluster/README.md: "cluster_name" : "myesdb", +examples/elasticsearch/production_cluster/README.md: "cluster_name" : "myesdb", hack/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]} hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} hack/local-up-cluster.sh: runtime_config="--runtime-config=\"${RUNTIME_CONFIG}\"" From b6feefbd9f90c7d206d1f58ed78726904d7b077b Mon Sep 17 00:00:00 2001 From: markturansky Date: Fri, 4 Sep 2015 09:49:30 -0400 Subject: [PATCH 051/101] fixed empty PV/C lists by removing omitempty --- api/swagger-spec/v1.json | 6 ++++++ pkg/api/types.go | 4 ++-- pkg/api/v1/types.go | 4 ++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 75b7819e695..2a74f5eda0c 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -11879,6 +11879,9 @@ "v1.PersistentVolumeClaimList": { "id": "v1.PersistentVolumeClaimList", "description": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + "required": [ + "items" + ], "properties": { "kind": { "type": "string", @@ -11990,6 +11993,9 @@ "v1.PersistentVolumeList": { "id": "v1.PersistentVolumeList", "description": "PersistentVolumeList is a list of PersistentVolume items.", + "required": [ + "items" + ], "properties": { "kind": { "type": "string", diff --git a/pkg/api/types.go b/pkg/api/types.go index bd05acb150c..cd36da099c3 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -326,7 +326,7 @@ type PersistentVolumeStatus struct { type PersistentVolumeList struct { TypeMeta `json:",inline"` ListMeta `json:"metadata,omitempty"` - Items []PersistentVolume `json:"items,omitempty"` + Items []PersistentVolume `json:"items"` } // PersistentVolumeClaim is a user's request for and claim to a persistent volume @@ -344,7 +344,7 @@ type PersistentVolumeClaim struct { type PersistentVolumeClaimList struct { TypeMeta `json:",inline"` ListMeta `json:"metadata,omitempty"` - Items []PersistentVolumeClaim `json:"items,omitempty"` + Items []PersistentVolumeClaim `json:"items"` } // PersistentVolumeClaimSpec describes the common attributes of storage devices diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index a09760c1b43..d70760b9e2a 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -419,7 +419,7 @@ type PersistentVolumeList struct { ListMeta `json:"metadata,omitempty"` // List of persistent volumes. // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md - Items []PersistentVolume `json:"items,omitempty"` + Items []PersistentVolume `json:"items"` } // PersistentVolumeClaim is a user's request for and claim to a persistent volume @@ -447,7 +447,7 @@ type PersistentVolumeClaimList struct { ListMeta `json:"metadata,omitempty"` // A list of persistent volume claims. // More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims - Items []PersistentVolumeClaim `json:"items,omitempty"` + Items []PersistentVolumeClaim `json:"items"` } // PersistentVolumeClaimSpec describes the common attributes of storage devices From 2bf984a5abdcd552773b7919ed065da20f8e2d2a Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Fri, 28 Aug 2015 16:17:14 -0400 Subject: [PATCH 052/101] bump(fsouza/go-dockerclient): 76fd6c6 --- Godeps/Godeps.json | 2 +- .../fsouza/go-dockerclient/.travis.yml | 1 + .../github.com/fsouza/go-dockerclient/AUTHORS | 4 + .../github.com/fsouza/go-dockerclient/auth.go | 11 +- .../fsouza/go-dockerclient/auth_test.go | 2 +- .../fsouza/go-dockerclient/change.go | 2 +- .../fsouza/go-dockerclient/client.go | 31 ++-- .../fsouza/go-dockerclient/container.go | 175 ++++++++++-------- .../fsouza/go-dockerclient/container_test.go | 5 +- .../github.com/fsouza/go-dockerclient/exec.go | 122 ++++++------ .../fsouza/go-dockerclient/image.go | 111 ++++++----- .../fsouza/go-dockerclient/image_test.go | 2 + .../github.com/fsouza/go-dockerclient/misc.go | 4 +- .../fsouza/go-dockerclient/testing/server.go | 27 ++- .../go-dockerclient/testing/server_test.go | 77 +++++++- .../fsouza/go-dockerclient/volume.go | 118 ++++++++++++ .../fsouza/go-dockerclient/volume_test.go | 142 ++++++++++++++ 17 files changed, 610 insertions(+), 226 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 1a0e19fb676..755b472e61f 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -224,7 +224,7 @@ }, { "ImportPath": "github.com/fsouza/go-dockerclient", - "Rev": "42d06e2b125654477366c320dcea99107a86e9c2" + "Rev": "76fd6c68cf24c48ee6a2b25def997182a29f940e" }, { "ImportPath": "github.com/garyburd/redigo/internal", diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml index 3926838acee..e03407b3e79 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml @@ -3,6 +3,7 @@ sudo: false go: - 1.3.1 - 1.4 + - 1.5 - tip env: - GOARCH=amd64 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS index 2febb1f039c..a149a7723c8 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS @@ -13,6 +13,7 @@ Brian Lalor Brian Palmer Burke Libbey Carlos Diaz-Padron +Cesar Wong Cezar Sa Espinola Cheah Chu Yeow cheneydeng @@ -33,6 +34,7 @@ Fabio Rehm Fatih Arslan Flavia Missi Francisco Souza +Grégoire Delattre Guillermo Álvarez Fernández He Simei Ivan Mikushin @@ -66,12 +68,14 @@ Paul Morie Paul Weil Peter Edge Peter Jihoon Kim +Phil Lu Philippe Lafoucrière Rafe Colton Rob Miller Robert Williamson Salvador Gironès Sam Rijs +Samuel Karp Simon Eskildsen Simon Menke Skolos diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go index fccd5574019..d2af8780c2a 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go @@ -16,7 +16,8 @@ import ( "strings" ) -var AuthParseError error = errors.New("Failed to read authentication from dockercfg") +// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. +var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg") // AuthConfiguration represents authentication options to use in the PushImage // method. It represents the authentication in the Docker index server. @@ -33,6 +34,10 @@ type AuthConfigurations struct { Configs map[string]AuthConfiguration `json:"configs"` } +// AuthConfigurations119 is used to serialize a set of AuthConfigurations +// for Docker API >= 1.19. +type AuthConfigurations119 map[string]AuthConfiguration + // dockerConfig represents a registry authentation configuration from the // .dockercfg file. type dockerConfig struct { @@ -103,7 +108,7 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { } userpass := strings.Split(string(data), ":") if len(userpass) != 2 { - return nil, AuthParseError + return nil, ErrCannotParseDockercfg } c.Configs[reg] = AuthConfiguration{ Email: conf.Email, @@ -117,7 +122,7 @@ func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { // AuthCheck validates the given credentials. It returns nil if successful. // -// See https://goo.gl/vPoEfJ for more details. +// See https://goo.gl/m2SleN for more details. func (c *Client) AuthCheck(conf *AuthConfiguration) error { if conf == nil { return fmt.Errorf("conf is nil") diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go index fc0ffab84a3..b3d4f8fc86a 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go @@ -41,7 +41,7 @@ func TestAuthBadConfig(t *testing.T) { auth := base64.StdEncoding.EncodeToString([]byte("userpass")) read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) ac, err := NewAuthConfigurations(read) - if err != AuthParseError { + if err != ErrCannotParseDockercfg { t.Errorf("Incorrect error returned %v\n", err) } if ac != nil { diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go index e7b056c3f88..d133594d480 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go @@ -23,7 +23,7 @@ const ( // Change represents a change in a container. // -// See http://goo.gl/QkW9sH for more details. +// See https://goo.gl/9GsTIF for more details. type Change struct { Path string Kind ChangeType diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go index 986bbb3d284..c3d86f40b8a 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go @@ -4,7 +4,7 @@ // Package docker provides a client for the Docker remote API. // -// See http://goo.gl/G3plxW for more details on the remote API. +// See https://goo.gl/G3plxW for more details on the remote API. package docker import ( @@ -45,6 +45,8 @@ var ( ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") apiVersion112, _ = NewAPIVersion("1.12") + + apiVersion119, _ = NewAPIVersion("1.19") ) // APIVersion is an internal representation of a version of the Remote API. @@ -326,7 +328,7 @@ func (c *Client) checkAPIVersion() error { // Ping pings the docker server // -// See http://goo.gl/stJENm for more details. +// See https://goo.gl/kQCfJj for more details. func (c *Client) Ping() error { path := "/_ping" body, status, err := c.do("GET", path, doOptions{}) @@ -462,9 +464,13 @@ func (c *Client) stream(method, path string, streamOptions streamOptions) error address := c.endpointURL.Path if streamOptions.stdout == nil { streamOptions.stdout = ioutil.Discard + } else if t, ok := streamOptions.stdout.(io.Closer); ok { + defer t.Close() } if streamOptions.stderr == nil { streamOptions.stderr = ioutil.Discard + } else if t, ok := streamOptions.stderr.(io.Closer); ok { + defer t.Close() } if protocol == "unix" { dial, err := net.Dial(protocol, address) @@ -583,6 +589,8 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error return err } req.Header.Set("Content-Type", "plain/text") + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") protocol := c.endpointURL.Scheme address := c.endpointURL.Path if protocol != "unix" { @@ -612,13 +620,16 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error defer rwc.Close() errChanOut := make(chan error, 1) errChanIn := make(chan error, 1) - exit := make(chan bool) go func() { - defer close(exit) - defer close(errChanOut) + defer func() { + if hijackOptions.in != nil { + if closer, ok := hijackOptions.in.(io.Closer); ok { + closer.Close() + } + } + }() var err error if hijackOptions.setRawTerminal { - // When TTY is ON, use regular copy _, err = io.Copy(hijackOptions.stdout, br) } else { _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) @@ -626,17 +637,15 @@ func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error errChanOut <- err }() go func() { + var err error if hijackOptions.in != nil { - _, err := io.Copy(rwc, hijackOptions.in) - errChanIn <- err - } else { - errChanIn <- nil + _, err = io.Copy(rwc, hijackOptions.in) } + errChanIn <- err rwc.(interface { CloseWrite() error }).CloseWrite() }() - <-exit errIn := <-errChanIn errOut := <-errChanOut if errIn != nil { diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go index 89430975ba5..b74a992c0af 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go @@ -23,7 +23,7 @@ var ErrContainerAlreadyExists = errors.New("container already exists") // ListContainersOptions specify parameters to the ListContainers function. // -// See http://goo.gl/6Y4Gz7 for more details. +// See https://goo.gl/47a6tO for more details. type ListContainersOptions struct { All bool Size bool @@ -41,24 +41,24 @@ type APIPort struct { IP string `json:"IP,omitempty" yaml:"IP,omitempty"` } -// APIContainers represents a container. -// -// See http://goo.gl/QeFH7U for more details. +// APIContainers represents each container in the list returned by +// ListContainers. type APIContainers struct { - ID string `json:"Id" yaml:"Id"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Command string `json:"Command,omitempty" yaml:"Command,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - Status string `json:"Status,omitempty" yaml:"Status,omitempty"` - Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"` - SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"` - SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"` - Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` + ID string `json:"Id" yaml:"Id"` + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Command string `json:"Command,omitempty" yaml:"Command,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + Status string `json:"Status,omitempty" yaml:"Status,omitempty"` + Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"` + SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"` + SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"` + Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels, omitempty"` } // ListContainers returns a slice of containers matching the given criteria. // -// See http://goo.gl/6Y4Gz7 for more details. +// See https://goo.gl/47a6tO for more details. func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { path := "/containers/json?" + queryString(opts) body, _, err := c.do("GET", path, doOptions{}) @@ -213,9 +213,21 @@ type Config struct { NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` } +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +type Mount struct { + Source string + Destination string + Mode string + RW bool +} + // LogConfig defines the log driver type and the configuration for it. type LogConfig struct { Type string `json:"Type,omitempty" yaml:"Type,omitempty"` @@ -279,7 +291,7 @@ type Container struct { // RenameContainerOptions specify parameters to the RenameContainer function. // -// See http://goo.gl/L00hoj for more details. +// See https://goo.gl/laSOIy for more details. type RenameContainerOptions struct { // ID of container to rename ID string `qs:"-"` @@ -290,7 +302,7 @@ type RenameContainerOptions struct { // RenameContainer updates and existing containers name // -// See http://goo.gl/L00hoj for more details. +// See https://goo.gl/laSOIy for more details. func (c *Client) RenameContainer(opts RenameContainerOptions) error { _, _, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{}) return err @@ -298,7 +310,7 @@ func (c *Client) RenameContainer(opts RenameContainerOptions) error { // InspectContainer returns information about a container by its ID. // -// See http://goo.gl/CxVuJ5 for more details. +// See https://goo.gl/RdIq0b for more details. func (c *Client) InspectContainer(id string) (*Container, error) { path := "/containers/" + id + "/json" body, status, err := c.do("GET", path, doOptions{}) @@ -318,7 +330,7 @@ func (c *Client) InspectContainer(id string) (*Container, error) { // ContainerChanges returns changes in the filesystem of the given container. // -// See http://goo.gl/QkW9sH for more details. +// See https://goo.gl/9GsTIF for more details. func (c *Client) ContainerChanges(id string) ([]Change, error) { path := "/containers/" + id + "/changes" body, status, err := c.do("GET", path, doOptions{}) @@ -338,7 +350,7 @@ func (c *Client) ContainerChanges(id string) ([]Change, error) { // CreateContainerOptions specify parameters to the CreateContainer function. // -// See http://goo.gl/2xxQQK for more details. +// See https://goo.gl/WxQzrr for more details. type CreateContainerOptions struct { Name string Config *Config `qs:"-"` @@ -348,7 +360,7 @@ type CreateContainerOptions struct { // CreateContainer creates a new container, returning the container instance, // or an error in case of failure. // -// See http://goo.gl/mErxNp for more details. +// See https://goo.gl/WxQzrr for more details. func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { path := "/containers/create?" + queryString(opts) body, status, err := c.do( @@ -434,41 +446,46 @@ type Device struct { // HostConfig contains the container options related to starting a container on // a given host type HostConfig struct { - Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"` - CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"` - CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"` - ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"` - LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"` - Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` - PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"` - Links []string `json:"Links,omitempty" yaml:"Links,omitempty"` - PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only - DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"` - ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"` - VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"` - IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"` - PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"` - UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"` - RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` - Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` - LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` - ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"` - SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` - CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"` - CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"` - Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` + Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"` + CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"` + CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"` + ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"` + LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"` + Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` + PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"` + Links []string `json:"Links,omitempty" yaml:"Links,omitempty"` + PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only + DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"` + ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"` + VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"` + IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"` + PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"` + UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"` + RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` + Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` + LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` + ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"` + SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` + CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"` + OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"` + CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"` + CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"` + CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"` + BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"` + Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` } // StartContainer starts a container, returning an error in case of failure. // -// See http://goo.gl/iM5GYs for more details. +// See https://goo.gl/MrBAJv for more details. func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { path := "/containers/" + id + "/start" _, status, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true}) @@ -487,7 +504,7 @@ func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { // StopContainer stops a container, killing it after the given timeout (in // seconds). // -// See http://goo.gl/EbcpXt for more details. +// See https://goo.gl/USqsFt for more details. func (c *Client) StopContainer(id string, timeout uint) error { path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) _, status, err := c.do("POST", path, doOptions{}) @@ -506,7 +523,7 @@ func (c *Client) StopContainer(id string, timeout uint) error { // RestartContainer stops a container, killing it after the given timeout (in // seconds), during the stop process. // -// See http://goo.gl/VOzR2n for more details. +// See https://goo.gl/QzsDnz for more details. func (c *Client) RestartContainer(id string, timeout uint) error { path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) _, status, err := c.do("POST", path, doOptions{}) @@ -521,7 +538,7 @@ func (c *Client) RestartContainer(id string, timeout uint) error { // PauseContainer pauses the given container. // -// See http://goo.gl/AM5t42 for more details. +// See https://goo.gl/OF7W9X for more details. func (c *Client) PauseContainer(id string) error { path := fmt.Sprintf("/containers/%s/pause", id) _, status, err := c.do("POST", path, doOptions{}) @@ -536,7 +553,7 @@ func (c *Client) PauseContainer(id string) error { // UnpauseContainer unpauses the given container. // -// See http://goo.gl/eBrNSL for more details. +// See https://goo.gl/7dwyPA for more details. func (c *Client) UnpauseContainer(id string) error { path := fmt.Sprintf("/containers/%s/unpause", id) _, status, err := c.do("POST", path, doOptions{}) @@ -552,7 +569,7 @@ func (c *Client) UnpauseContainer(id string) error { // TopResult represents the list of processes running in a container, as // returned by /containers//top. // -// See http://goo.gl/qu4gse for more details. +// See https://goo.gl/Rb46aY for more details. type TopResult struct { Titles []string Processes [][]string @@ -560,7 +577,7 @@ type TopResult struct { // TopContainer returns processes running inside a container // -// See http://goo.gl/qu4gse for more details. +// See https://goo.gl/Rb46aY for more details. func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { var args string var result TopResult @@ -584,7 +601,7 @@ func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { // Stats represents container statistics, returned by /containers//stats. // -// See http://goo.gl/DFMiYD for more details. +// See https://goo.gl/GNmLHb for more details. type Stats struct { Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` Network struct { @@ -674,7 +691,7 @@ type BlkioStatsEntry struct { // StatsOptions specify parameters to the Stats function. // -// See http://goo.gl/DFMiYD for more details. +// See https://goo.gl/GNmLHb for more details. type StatsOptions struct { ID string Stats chan<- *Stats @@ -690,9 +707,10 @@ type StatsOptions struct { // This function is blocking, similar to a streaming call for logs, and should be run // on a separate goroutine from the caller. Note that this function will block until // the given container is removed, not just exited. When finished, this function -// will close the given channel. Alternatively, function can be stopped by signaling on the Done channel +// will close the given channel. Alternatively, function can be stopped by +// signaling on the Done channel. // -// See http://goo.gl/DFMiYD for more details. +// See https://goo.gl/GNmLHb for more details. func (c *Client) Stats(opts StatsOptions) (retErr error) { errC := make(chan error, 1) readCloser, writeCloser := io.Pipe() @@ -763,7 +781,7 @@ func (c *Client) Stats(opts StatsOptions) (retErr error) { // KillContainerOptions represents the set of options that can be used in a // call to KillContainer. // -// See http://goo.gl/TFkECx for more details. +// See https://goo.gl/hkS9i8 for more details. type KillContainerOptions struct { // The ID of the container. ID string `qs:"-"` @@ -773,9 +791,10 @@ type KillContainerOptions struct { Signal Signal } -// KillContainer kills a container, returning an error in case of failure. +// KillContainer sends a signal to a container, returning an error in case of +// failure. // -// See http://goo.gl/TFkECx for more details. +// See https://goo.gl/hkS9i8 for more details. func (c *Client) KillContainer(opts KillContainerOptions) error { path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) _, status, err := c.do("POST", path, doOptions{}) @@ -790,7 +809,7 @@ func (c *Client) KillContainer(opts KillContainerOptions) error { // RemoveContainerOptions encapsulates options to remove a container. // -// See http://goo.gl/ZB83ji for more details. +// See https://goo.gl/RQyX62 for more details. type RemoveContainerOptions struct { // The ID of the container. ID string `qs:"-"` @@ -806,7 +825,7 @@ type RemoveContainerOptions struct { // RemoveContainer removes a container, returning an error in case of failure. // -// See http://goo.gl/ZB83ji for more details. +// See https://goo.gl/RQyX62 for more details. func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { path := "/containers/" + opts.ID + "?" + queryString(opts) _, status, err := c.do("DELETE", path, doOptions{}) @@ -822,7 +841,7 @@ func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { // CopyFromContainerOptions is the set of options that can be used when copying // files or folders from a container. // -// See http://goo.gl/rINMlw for more details. +// See https://goo.gl/4L7b07 for more details. type CopyFromContainerOptions struct { OutputStream io.Writer `json:"-"` Container string `json:"-"` @@ -832,7 +851,7 @@ type CopyFromContainerOptions struct { // CopyFromContainer copy files or folders from a container, using a given // resource. // -// See http://goo.gl/rINMlw for more details. +// See https://goo.gl/4L7b07 for more details. func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { if opts.Container == "" { return &NoSuchContainer{ID: opts.Container} @@ -852,7 +871,7 @@ func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { // WaitContainer blocks until the given container stops, return the exit code // of the container status. // -// See http://goo.gl/J88DHU for more details. +// See https://goo.gl/Gc1rge for more details. func (c *Client) WaitContainer(id string) (int, error) { body, status, err := c.do("POST", "/containers/"+id+"/wait", doOptions{}) if status == http.StatusNotFound { @@ -871,7 +890,7 @@ func (c *Client) WaitContainer(id string) (int, error) { // CommitContainerOptions aggregates parameters to the CommitContainer method. // -// See http://goo.gl/Jn8pe8 for more details. +// See https://goo.gl/mqfoCw for more details. type CommitContainerOptions struct { Container string Repository string `qs:"repo"` @@ -883,7 +902,7 @@ type CommitContainerOptions struct { // CommitContainer creates a new image from a container's changes. // -// See http://goo.gl/Jn8pe8 for more details. +// See https://goo.gl/mqfoCw for more details. func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { path := "/commit?" + queryString(opts) body, status, err := c.do("POST", path, doOptions{data: opts.Run}) @@ -904,7 +923,7 @@ func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { // AttachToContainerOptions is the set of options that can be used when // attaching to a container. // -// See http://goo.gl/RRAhws for more details. +// See https://goo.gl/NKpkFk for more details. type AttachToContainerOptions struct { Container string `qs:"-"` InputStream io.Reader `qs:"-"` @@ -939,7 +958,7 @@ type AttachToContainerOptions struct { // AttachToContainer attaches to a container, using the given options. // -// See http://goo.gl/RRAhws for more details. +// See https://goo.gl/NKpkFk for more details. func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { if opts.Container == "" { return &NoSuchContainer{ID: opts.Container} @@ -957,7 +976,7 @@ func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { // LogsOptions represents the set of options used when getting logs from a // container. // -// See http://goo.gl/rLhKSU for more details. +// See https://goo.gl/yl8PGm for more details. type LogsOptions struct { Container string `qs:"-"` OutputStream io.Writer `qs:"-"` @@ -975,7 +994,7 @@ type LogsOptions struct { // Logs gets stdout and stderr logs from the specified container. // -// See http://goo.gl/rLhKSU for more details. +// See https://goo.gl/yl8PGm for more details. func (c *Client) Logs(opts LogsOptions) error { if opts.Container == "" { return &NoSuchContainer{ID: opts.Container} @@ -992,6 +1011,8 @@ func (c *Client) Logs(opts LogsOptions) error { } // ResizeContainerTTY resizes the terminal to the given height and width. +// +// See https://goo.gl/xERhCc for more details. func (c *Client) ResizeContainerTTY(id string, height, width int) error { params := make(url.Values) params.Set("h", strconv.Itoa(height)) @@ -1003,7 +1024,7 @@ func (c *Client) ResizeContainerTTY(id string, height, width int) error { // ExportContainerOptions is the set of parameters to the ExportContainer // method. // -// See http://goo.gl/hnzE62 for more details. +// See https://goo.gl/dOkTyk for more details. type ExportContainerOptions struct { ID string OutputStream io.Writer @@ -1012,7 +1033,7 @@ type ExportContainerOptions struct { // ExportContainer export the contents of container id as tar archive // and prints the exported contents to stdout. // -// See http://goo.gl/hnzE62 for more details. +// See https://goo.gl/dOkTyk for more details. func (c *Client) ExportContainer(opts ExportContainerOptions) error { if opts.ID == "" { return &NoSuchContainer{ID: opts.ID} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go index 00966aa19ce..d9dbea86fdb 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go @@ -1122,10 +1122,7 @@ func TestAttachToContainerRawTerminalFalse(t *testing.T) { Stream: true, RawTerminal: false, } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } + client.AttachToContainer(opts) expected := map[string][]string{ "stdin": {"1"}, "stdout": {"1"}, diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go index bc7c5cfcd80..84047f04efb 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Docs can currently be found at https://github.com/docker/docker/blob/master/docs/sources/reference/api/docker_remote_api_v1.15.md#exec-create - package docker import ( @@ -15,9 +13,15 @@ import ( "strconv" ) +// Exec is the type representing a `docker exec` instance and containing the +// instance ID +type Exec struct { + ID string `json:"Id,omitempty" yaml:"Id,omitempty"` +} + // CreateExecOptions specify parameters to the CreateExecContainer function. // -// See http://goo.gl/8izrzI for more details +// See https://goo.gl/1KSIb7 for more details type CreateExecOptions struct { AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` @@ -28,9 +32,31 @@ type CreateExecOptions struct { User string `json:"User,omitempty" yaml:"User,omitempty"` } +// CreateExec sets up an exec instance in a running container `id`, returning the exec +// instance, or an error in case of failure. +// +// See https://goo.gl/1KSIb7 for more details +func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { + path := fmt.Sprintf("/containers/%s/exec", opts.Container) + body, status, err := c.do("POST", path, doOptions{data: opts}) + if status == http.StatusNotFound { + return nil, &NoSuchContainer{ID: opts.Container} + } + if err != nil { + return nil, err + } + var exec Exec + err = json.Unmarshal(body, &exec) + if err != nil { + return nil, err + } + + return &exec, nil +} + // StartExecOptions specify parameters to the StartExecContainer function. // -// See http://goo.gl/JW8Lxl for more details +// See https://goo.gl/iQCnto for more details type StartExecOptions struct { Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"` @@ -51,67 +77,11 @@ type StartExecOptions struct { Success chan struct{} `json:"-"` } -// Exec is the type representing a `docker exec` instance and containing the -// instance ID -type Exec struct { - ID string `json:"Id,omitempty" yaml:"Id,omitempty"` -} - -// ExecProcessConfig is a type describing the command associated to a Exec -// instance. It's used in the ExecInspect type. -// -// See http://goo.gl/ypQULN for more details -type ExecProcessConfig struct { - Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` - User string `json:"user,omitempty" yaml:"user,omitempty"` - Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` - EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` - Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"` -} - -// ExecInspect is a type with details about a exec instance, including the -// exit code if the command has finished running. It's returned by a api -// call to /exec/(id)/json -// -// See http://goo.gl/ypQULN for more details -type ExecInspect struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"` - OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"` - ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"` - Container Container `json:"Container,omitempty" yaml:"Container,omitempty"` -} - -// CreateExec sets up an exec instance in a running container `id`, returning the exec -// instance, or an error in case of failure. -// -// See http://goo.gl/8izrzI for more details -func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { - path := fmt.Sprintf("/containers/%s/exec", opts.Container) - body, status, err := c.do("POST", path, doOptions{data: opts}) - if status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - if err != nil { - return nil, err - } - var exec Exec - err = json.Unmarshal(body, &exec) - if err != nil { - return nil, err - } - - return &exec, nil -} - // StartExec starts a previously set up exec instance id. If opts.Detach is // true, it returns after starting the exec command. Otherwise, it sets up an // interactive session with the exec command. // -// See http://goo.gl/JW8Lxl for more details +// See https://goo.gl/iQCnto for more details func (c *Client) StartExec(id string, opts StartExecOptions) error { if id == "" { return &NoSuchExec{ID: id} @@ -144,7 +114,7 @@ func (c *Client) StartExec(id string, opts StartExecOptions) error { // is valid only if Tty was specified as part of creating and starting the exec // command. // -// See http://goo.gl/YDSx1f for more details +// See https://goo.gl/e1JpsA for more details func (c *Client) ResizeExecTTY(id string, height, width int) error { params := make(url.Values) params.Set("h", strconv.Itoa(height)) @@ -155,9 +125,35 @@ func (c *Client) ResizeExecTTY(id string, height, width int) error { return err } +// ExecProcessConfig is a type describing the command associated to a Exec +// instance. It's used in the ExecInspect type. +type ExecProcessConfig struct { + Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` + User string `json:"user,omitempty" yaml:"user,omitempty"` + Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` + EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` + Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"` +} + +// ExecInspect is a type with details about a exec instance, including the +// exit code if the command has finished running. It's returned by a api +// call to /exec/(id)/json +// +// See https://goo.gl/gPtX9R for more details +type ExecInspect struct { + ID string `json:"ID,omitempty" yaml:"ID,omitempty"` + Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` + ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"` + OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"` + ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"` + Container Container `json:"Container,omitempty" yaml:"Container,omitempty"` +} + // InspectExec returns low-level information about the exec command id. // -// See http://goo.gl/ypQULN for more details +// See https://goo.gl/gPtX9R for more details func (c *Client) InspectExec(id string) (*ExecInspect, error) { path := fmt.Sprintf("/exec/%s/json", id) body, status, err := c.do("GET", path, doOptions{}) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go index 4bceb0d3de1..f8be846901f 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go @@ -11,7 +11,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" @@ -46,16 +45,6 @@ type Image struct { VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` } -// ImageHistory represent a layer in an image's history returned by the -// ImageHistory call. -type ImageHistory struct { - ID string `json:"Id" yaml:"Id"` - Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` -} - // ImagePre012 serves the same purpose as the Image type except that it is for // earlier versions of the Docker API (pre-012 to be specific) type ImagePre012 struct { @@ -72,15 +61,6 @@ type ImagePre012 struct { Size int64 `json:"size,omitempty"` } -// ListImagesOptions specify parameters to the ListImages function. -// -// See http://goo.gl/HRVN1Z for more details. -type ListImagesOptions struct { - All bool - Filters map[string][]string - Digests bool -} - var ( // ErrNoSuchImage is the error returned when the image does not exist. ErrNoSuchImage = errors.New("no such image") @@ -102,9 +82,18 @@ var ( ErrMustSpecifyNames = errors.New("must specify at least one name to export") ) +// ListImagesOptions specify parameters to the ListImages function. +// +// See https://goo.gl/xBe1u3 for more details. +type ListImagesOptions struct { + All bool + Filters map[string][]string + Digests bool +} + // ListImages returns the list of available images in the server. // -// See http://goo.gl/HRVN1Z for more details. +// See https://goo.gl/xBe1u3 for more details. func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { path := "/images/json?" + queryString(opts) body, _, err := c.do("GET", path, doOptions{}) @@ -119,9 +108,19 @@ func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { return images, nil } +// ImageHistory represent a layer in an image's history returned by the +// ImageHistory call. +type ImageHistory struct { + ID string `json:"Id" yaml:"Id"` + Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"` + Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` + CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"` + Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` +} + // ImageHistory returns the history of the image by its name or ID. // -// See http://goo.gl/2oJmNs for more details. +// See https://goo.gl/8bnTId for more details. func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { body, status, err := c.do("GET", "/images/"+name+"/history", doOptions{}) if status == http.StatusNotFound { @@ -140,7 +139,7 @@ func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { // RemoveImage removes an image by its name or ID. // -// See http://goo.gl/znj0wM for more details. +// See https://goo.gl/V3ZWnK for more details. func (c *Client) RemoveImage(name string) error { _, status, err := c.do("DELETE", "/images/"+name, doOptions{}) if status == http.StatusNotFound { @@ -152,7 +151,7 @@ func (c *Client) RemoveImage(name string) error { // RemoveImageOptions present the set of options available for removing an image // from a registry. // -// See http://goo.gl/6V48bF for more details. +// See https://goo.gl/V3ZWnK for more details. type RemoveImageOptions struct { Force bool `qs:"force"` NoPrune bool `qs:"noprune"` @@ -161,7 +160,7 @@ type RemoveImageOptions struct { // RemoveImageExtended removes an image by its name or ID. // Extra params can be passed, see RemoveImageOptions // -// See http://goo.gl/znj0wM for more details. +// See https://goo.gl/V3ZWnK for more details. func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) _, status, err := c.do("DELETE", uri, doOptions{}) @@ -173,7 +172,7 @@ func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error // InspectImage returns an image by its name or ID. // -// See http://goo.gl/Q112NY for more details. +// See https://goo.gl/jHPcg6 for more details. func (c *Client) InspectImage(name string) (*Image, error) { body, status, err := c.do("GET", "/images/"+name+"/json", doOptions{}) if status == http.StatusNotFound { @@ -216,7 +215,7 @@ func (c *Client) InspectImage(name string) (*Image, error) { // PushImageOptions represents options to use in the PushImage method. // -// See http://goo.gl/pN8A3P for more details. +// See https://goo.gl/zPtZaT for more details. type PushImageOptions struct { // Name of the image Name string @@ -236,7 +235,7 @@ type PushImageOptions struct { // An empty instance of AuthConfiguration may be used for unauthenticated // pushes. // -// See http://goo.gl/pN8A3P for more details. +// See https://goo.gl/zPtZaT for more details. func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { if opts.Name == "" { return ErrNoSuchImage @@ -259,7 +258,7 @@ func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error // PullImageOptions present the set of options available for pulling an image // from a registry. // -// See http://goo.gl/ACyYNS for more details. +// See https://goo.gl/iJkZjD for more details. type PullImageOptions struct { Repository string `qs:"fromImage"` Registry string @@ -268,9 +267,10 @@ type PullImageOptions struct { RawJSONStream bool `qs:"-"` } -// PullImage pulls an image from a remote registry, logging progress to opts.OutputStream. +// PullImage pulls an image from a remote registry, logging progress to +// opts.OutputStream. // -// See http://goo.gl/ACyYNS for more details. +// See https://goo.gl/iJkZjD for more details. func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { if opts.Repository == "" { return ErrNoSuchImage @@ -296,14 +296,14 @@ func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, // LoadImageOptions represents the options for LoadImage Docker API Call // -// See http://goo.gl/Y8NNCq for more details. +// See https://goo.gl/JyClMX for more details. type LoadImageOptions struct { InputStream io.Reader } // LoadImage imports a tarball docker image // -// See http://goo.gl/Y8NNCq for more details. +// See https://goo.gl/JyClMX for more details. func (c *Client) LoadImage(opts LoadImageOptions) error { return c.stream("POST", "/images/load", streamOptions{ setRawTerminal: true, @@ -311,17 +311,17 @@ func (c *Client) LoadImage(opts LoadImageOptions) error { }) } -// ExportImageOptions represent the options for ExportImage Docker API call +// ExportImageOptions represent the options for ExportImage Docker API call. // -// See http://goo.gl/mi6kvk for more details. +// See https://goo.gl/le7vK8 for more details. type ExportImageOptions struct { Name string OutputStream io.Writer } -// ExportImage exports an image (as a tar file) into the stream +// ExportImage exports an image (as a tar file) into the stream. // -// See http://goo.gl/mi6kvk for more details. +// See https://goo.gl/le7vK8 for more details. func (c *Client) ExportImage(opts ExportImageOptions) error { return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ setRawTerminal: true, @@ -331,7 +331,7 @@ func (c *Client) ExportImage(opts ExportImageOptions) error { // ExportImagesOptions represent the options for ExportImages Docker API call // -// See http://goo.gl/YeZzQK for more details. +// See https://goo.gl/huC7HA for more details. type ExportImagesOptions struct { Names []string OutputStream io.Writer `qs:"-"` @@ -339,7 +339,7 @@ type ExportImagesOptions struct { // ExportImages exports one or more images (as a tar file) into the stream // -// See http://goo.gl/YeZzQK for more details. +// See https://goo.gl/huC7HA for more details. func (c *Client) ExportImages(opts ExportImagesOptions) error { if opts.Names == nil || len(opts.Names) == 0 { return ErrMustSpecifyNames @@ -353,7 +353,7 @@ func (c *Client) ExportImages(opts ExportImagesOptions) error { // ImportImageOptions present the set of informations available for importing // an image from a source file or the stdin. // -// See http://goo.gl/PhBKnS for more details. +// See https://goo.gl/iJkZjD for more details. type ImportImageOptions struct { Repository string `qs:"repo"` Source string `qs:"fromSrc"` @@ -366,7 +366,7 @@ type ImportImageOptions struct { // ImportImage imports an image from a url, a file or stdin // -// See http://goo.gl/PhBKnS for more details. +// See https://goo.gl/iJkZjD for more details. func (c *Client) ImportImage(opts ImportImageOptions) error { if opts.Repository == "" { return ErrNoSuchImage @@ -379,8 +379,7 @@ func (c *Client) ImportImage(opts ImportImageOptions) error { if err != nil { return err } - b, err := ioutil.ReadAll(f) - opts.InputStream = bytes.NewBuffer(b) + opts.InputStream = f opts.Source = "-" } return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream) @@ -415,12 +414,12 @@ type BuildImageOptions struct { // BuildImage builds an image from a tarball's url or a Dockerfile in the input // stream. // -// See http://goo.gl/7nuGXa for more details. +// See https://goo.gl/xySxCe for more details. func (c *Client) BuildImage(opts BuildImageOptions) error { if opts.OutputStream == nil { return ErrMissingOutputStream } - headers, err := headersWithAuth(opts.Auth, opts.AuthConfigs) + headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs)) if err != nil { return err } @@ -452,9 +451,19 @@ func (c *Client) BuildImage(opts BuildImageOptions) error { }) } +func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { + if c.serverAPIVersion == nil { + c.checkAPIVersion() + } + if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { + return AuthConfigurations119(authConfigs.Configs) + } + return authConfigs +} + // TagImageOptions present the set of options to tag an image. // -// See http://goo.gl/5g6qFy for more details. +// See https://goo.gl/98ZzkU for more details. type TagImageOptions struct { Repo string Tag string @@ -463,7 +472,7 @@ type TagImageOptions struct { // TagImage adds a tag to the image identified by the given name. // -// See http://goo.gl/5g6qFy for more details. +// See https://goo.gl/98ZzkU for more details. func (c *Client) TagImage(name string, opts TagImageOptions) error { if name == "" { return ErrNoSuchImage @@ -497,7 +506,7 @@ func headersWithAuth(auths ...interface{}) (map[string]string, error) { return nil, err } headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - case AuthConfigurations: + case AuthConfigurations, AuthConfigurations119: var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(auth); err != nil { return nil, err @@ -509,9 +518,9 @@ func headersWithAuth(auths ...interface{}) (map[string]string, error) { return headers, nil } -// APIImageSearch reflect the result of a search on the dockerHub +// APIImageSearch reflect the result of a search on the Docker Hub. // -// See http://goo.gl/xI5lLZ for more details. +// See https://goo.gl/AYjyrF for more details. type APIImageSearch struct { Description string `json:"description,omitempty" yaml:"description,omitempty"` IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"` @@ -522,7 +531,7 @@ type APIImageSearch struct { // SearchImages search the docker hub with a specific given term. // -// See http://goo.gl/xI5lLZ for more details. +// See https://goo.gl/AYjyrF for more details. func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { body, _, err := c.do("GET", "/images/search?term="+term, doOptions{}) if err != nil { diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go index d6bce64cfc8..bf010a2f994 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go @@ -21,11 +21,13 @@ import ( func newTestClient(rt *FakeRoundTripper) Client { endpoint := "http://localhost:4243" u, _ := parseEndpoint("http://localhost:4243", false) + testAPIVersion, _ := NewAPIVersion("1.17") client := Client{ HTTPClient: &http.Client{Transport: rt}, endpoint: endpoint, endpointURL: u, SkipServerVersionCheck: true, + serverAPIVersion: testAPIVersion, } return client } diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go index 42d1c7e48e3..df22cf4945b 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go @@ -11,7 +11,7 @@ import ( // Version returns version information about the docker server. // -// See http://goo.gl/BOZrF5 for more details. +// See https://goo.gl/ND9R8L for more details. func (c *Client) Version() (*Env, error) { body, _, err := c.do("GET", "/version", doOptions{}) if err != nil { @@ -26,7 +26,7 @@ func (c *Client) Version() (*Env, error) { // Info returns system-wide information about the Docker server. // -// See http://goo.gl/wmqZsW for more details. +// See https://goo.gl/ElTHi2 for more details. func (c *Client) Info() (*Env, error) { body, _, err := c.do("GET", "/info", doOptions{}) if err != nil { diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go index 41d87277159..05cd2e25bf0 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go @@ -12,6 +12,7 @@ import ( "encoding/json" "errors" "fmt" + "io/ioutil" mathrand "math/rand" "net" "net/http" @@ -532,7 +533,7 @@ func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) { } container.HostConfig = &hostConfig if container.State.Running { - http.Error(w, "Container already running", http.StatusBadRequest) + http.Error(w, "", http.StatusNotModified) return } container.State.Running = true @@ -610,14 +611,34 @@ func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } + wg := sync.WaitGroup{} + if r.URL.Query().Get("stdin") == "1" { + wg.Add(1) + go func() { + ioutil.ReadAll(conn) + wg.Done() + }() + } outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout) if container.State.Running { - fmt.Fprintf(outStream, "Container %q is running\n", container.ID) + fmt.Fprintf(outStream, "Container is running\n") } else { - fmt.Fprintf(outStream, "Container %q is not running\n", container.ID) + fmt.Fprintf(outStream, "Container is not running\n") } fmt.Fprintln(outStream, "What happened?") fmt.Fprintln(outStream, "Something happened") + wg.Wait() + if r.URL.Query().Get("stream") == "1" { + for { + time.Sleep(1e6) + s.cMut.RLock() + if !container.State.Running { + s.cMut.RUnlock() + break + } + s.cMut.RUnlock() + } + } conn.Close() } diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go index 36789abb366..bb24ce56081 100644 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go @@ -5,9 +5,11 @@ package testing import ( + "bufio" "bytes" "encoding/json" "fmt" + "io/ioutil" "math/rand" "net" "net/http" @@ -624,8 +626,8 @@ func TestStartContainerAlreadyRunning(t *testing.T) { path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("null"))) server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) + if recorder.Code != http.StatusNotModified { + t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotModified, recorder.Code) } } @@ -845,22 +847,41 @@ func TestWaitContainerNotFound(t *testing.T) { } } +type HijackableResponseRecorder struct { + httptest.ResponseRecorder + readCh chan []byte +} + +func (r *HijackableResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { + myConn, otherConn := net.Pipe() + r.readCh = make(chan []byte) + go func() { + data, _ := ioutil.ReadAll(myConn) + r.readCh <- data + }() + return otherConn, nil, nil +} + +func (r *HijackableResponseRecorder) HijackBuffer() string { + return string(<-r.readCh) +} + func TestAttachContainer(t *testing.T) { server := DockerServer{} addContainers(&server, 1) server.containers[0].State.Running = true server.buildMuxer() - recorder := httptest.NewRecorder() + recorder := &HijackableResponseRecorder{} path := fmt.Sprintf("/containers/%s/attach?logs=1", server.containers[0].ID) request, _ := http.NewRequest("POST", path, nil) server.ServeHTTP(recorder, request) lines := []string{ - fmt.Sprintf("\x01\x00\x00\x00\x03\x00\x00\x00Container %q is running", server.containers[0].ID), - "What happened?", - "Something happened", + "\x01\x00\x00\x00\x00\x00\x00\x15Container is running", + "\x01\x00\x00\x00\x00\x00\x00\x0fWhat happened?", + "\x01\x00\x00\x00\x00\x00\x00\x13Something happened", } expected := strings.Join(lines, "\n") + "\n" - if body := recorder.Body.String(); body == expected { + if body := recorder.HijackBuffer(); body != expected { t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) } } @@ -868,7 +889,7 @@ func TestAttachContainer(t *testing.T) { func TestAttachContainerNotFound(t *testing.T) { server := DockerServer{} server.buildMuxer() - recorder := httptest.NewRecorder() + recorder := &HijackableResponseRecorder{} path := "/containers/abc123/attach?logs=1" request, _ := http.NewRequest("POST", path, nil) server.ServeHTTP(recorder, request) @@ -877,6 +898,44 @@ func TestAttachContainerNotFound(t *testing.T) { } } +func TestAttachContainerWithStreamBlocks(t *testing.T) { + server := DockerServer{} + addContainers(&server, 1) + server.containers[0].State.Running = true + server.buildMuxer() + path := fmt.Sprintf("/containers/%s/attach?logs=1&stdout=1&stream=1", server.containers[0].ID) + request, _ := http.NewRequest("POST", path, nil) + done := make(chan string) + go func() { + recorder := &HijackableResponseRecorder{} + server.ServeHTTP(recorder, request) + done <- recorder.HijackBuffer() + }() + select { + case <-done: + t.Fatalf("attach stream returned before container is stopped") + case <-time.After(500 * time.Millisecond): + } + server.cMut.Lock() + server.containers[0].State.Running = false + server.cMut.Unlock() + var body string + select { + case body = <-done: + case <-time.After(5 * time.Second): + t.Fatalf("timed out waiting for attach to finish") + } + lines := []string{ + "\x01\x00\x00\x00\x00\x00\x00\x15Container is running", + "\x01\x00\x00\x00\x00\x00\x00\x0fWhat happened?", + "\x01\x00\x00\x00\x00\x00\x00\x13Something happened", + } + expected := strings.Join(lines, "\n") + "\n" + if body != expected { + t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) + } +} + func TestRemoveContainer(t *testing.T) { server := DockerServer{} addContainers(&server, 1) @@ -1690,7 +1749,7 @@ func addNetworks(server *DockerServer, n int) { ID: fmt.Sprintf("%x", rand.Int()%10000), Type: "bridge", Endpoints: []*docker.Endpoint{ - &docker.Endpoint{ + { Name: "blah", ID: fmt.Sprintf("%x", rand.Int()%10000), Network: netid, diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go new file mode 100644 index 00000000000..4e63272542b --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go @@ -0,0 +1,118 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "errors" + "net/http" +) + +var ( + // ErrNoSuchVolume is the error returned when the volume does not exist. + ErrNoSuchVolume = errors.New("no such volume") + + // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. + ErrVolumeInUse = errors.New("volume in use and cannot be removed") +) + +// Volume represents a volume. +// +// See https://goo.gl/FZA4BK for more details. +type Volume struct { + Name string `json:"Name" yaml:"Name"` + Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` + Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"` +} + +// ListVolumesOptions specify parameters to the ListVolumes function. +// +// See https://goo.gl/FZA4BK for more details. +type ListVolumesOptions struct { + Filters map[string][]string +} + +// ListVolumes returns a list of available volumes in the server. +// +// See https://goo.gl/FZA4BK for more details. +func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { + body, _, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{}) + if err != nil { + return nil, err + } + m := make(map[string]interface{}) + if err := json.Unmarshal(body, &m); err != nil { + return nil, err + } + var volumes []Volume + volumesJSON, ok := m["Volumes"] + if !ok { + return volumes, nil + } + data, err := json.Marshal(volumesJSON) + if err != nil { + return nil, err + } + if err := json.Unmarshal(data, &volumes); err != nil { + return nil, err + } + return volumes, nil +} + +// CreateVolumeOptions specify parameters to the CreateVolume function. +// +// See https://goo.gl/pBUbZ9 for more details. +type CreateVolumeOptions struct { + Name string + Driver string + DriverOpts map[string]string +} + +// CreateVolume creates a volume on the server. +// +// See https://goo.gl/pBUbZ9 for more details. +func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { + body, _, err := c.do("POST", "/volumes", doOptions{data: opts}) + if err != nil { + return nil, err + } + var volume Volume + if err := json.Unmarshal(body, &volume); err != nil { + return nil, err + } + return &volume, nil +} + +// InspectVolume returns a volume by its name. +// +// See https://goo.gl/0g9A6i for more details. +func (c *Client) InspectVolume(name string) (*Volume, error) { + body, status, err := c.do("GET", "/volumes/"+name, doOptions{}) + if status == http.StatusNotFound { + return nil, ErrNoSuchVolume + } + if err != nil { + return nil, err + } + var volume Volume + if err := json.Unmarshal(body, &volume); err != nil { + return nil, err + } + return &volume, nil +} + +// RemoveVolume removes a volume by its name. +// +// See https://goo.gl/79GNQz for more details. +func (c *Client) RemoveVolume(name string) error { + _, status, err := c.do("DELETE", "/volumes/"+name, doOptions{}) + if status == http.StatusNotFound { + return ErrNoSuchVolume + } + if status == http.StatusConflict { + return ErrVolumeInUse + } + return err +} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go new file mode 100644 index 00000000000..9707c09cdce --- /dev/null +++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go @@ -0,0 +1,142 @@ +// Copyright 2015 go-dockerclient authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package docker + +import ( + "encoding/json" + "net/http" + "net/url" + "reflect" + "testing" +) + +func TestListVolumes(t *testing.T) { + volumesData := `[ + { + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + }, + { + "Name": "foo", + "Driver": "bar", + "Mountpoint": "/var/lib/docker/volumes/bar" + } +]` + body := `{ "Volumes": ` + volumesData + ` }` + var expected []Volume + if err := json.Unmarshal([]byte(volumesData), &expected); err != nil { + t.Fatal(err) + } + client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) + volumes, err := client.ListVolumes(ListVolumesOptions{}) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(volumes, expected) { + t.Errorf("ListVolumes: Wrong return value. Want %#v. Got %#v.", expected, volumes) + } +} + +func TestCreateVolume(t *testing.T) { + body := `{ + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + }` + var expected Volume + if err := json.Unmarshal([]byte(body), &expected); err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(fakeRT) + volume, err := client.CreateVolume( + CreateVolumeOptions{ + Name: "tardis", + Driver: "local", + DriverOpts: map[string]string{ + "foo": "bar", + }, + }, + ) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(volume, &expected) { + t.Errorf("CreateVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) + } + req := fakeRT.requests[0] + expectedMethod := "POST" + if req.Method != expectedMethod { + t.Errorf("CreateVolume(): Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/volumes")) + if req.URL.Path != u.Path { + t.Errorf("CreateVolume(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) + } +} + +func TestInspectVolume(t *testing.T) { + body := `{ + "Name": "tardis", + "Driver": "local", + "Mountpoint": "/var/lib/docker/volumes/tardis" + }` + var expected Volume + if err := json.Unmarshal([]byte(body), &expected); err != nil { + t.Fatal(err) + } + fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} + client := newTestClient(fakeRT) + name := "tardis" + volume, err := client.InspectVolume(name) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(volume, &expected) { + t.Errorf("InspectVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) + } + req := fakeRT.requests[0] + expectedMethod := "GET" + if req.Method != expectedMethod { + t.Errorf("InspectVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/volumes/" + name)) + if req.URL.Path != u.Path { + t.Errorf("CreateVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } +} + +func TestRemoveVolume(t *testing.T) { + name := "test" + fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} + client := newTestClient(fakeRT) + if err := client.RemoveVolume(name); err != nil { + t.Fatal(err) + } + req := fakeRT.requests[0] + expectedMethod := "DELETE" + if req.Method != expectedMethod { + t.Errorf("RemoveVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) + } + u, _ := url.Parse(client.getURL("/volumes/" + name)) + if req.URL.Path != u.Path { + t.Errorf("RemoveVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) + } +} + +func TestRemoveVolumeNotFound(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "no such volume", status: http.StatusNotFound}) + if err := client.RemoveVolume("test:"); err != ErrNoSuchVolume { + t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrNoSuchVolume, err) + } +} + +func TestRemoveVolumeInUse(t *testing.T) { + client := newTestClient(&FakeRoundTripper{message: "volume in use and cannot be removed", status: http.StatusConflict}) + if err := client.RemoveVolume("test:"); err != ErrVolumeInUse { + t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrVolumeInUse, err) + } +} From c83786979c6fa000aa8d3e35914acdd32643b0ef Mon Sep 17 00:00:00 2001 From: Andy Goldstein Date: Mon, 31 Aug 2015 13:23:47 -0400 Subject: [PATCH 053/101] Various exec fixes If stdin is noninteractive, the io.Copy from stdin to remoteStdin will unblock when it finishes reading from stdin. In this case, make sure to close remoteStdin so the server knows the client won't be sending any more data. This ensures that the remote process terminates. For example: echo foo | kubectl exec -i -- cat Without this change, the `cat` process never terminates and `kubectl exec` hangs. Fix interactive exec sessions hanging after you type 'exit'. Add e2e test to cover noninteractive stdin: `echo a | kubectl exec -i cat` Add e2e test to cover psuedo-interactive stdin: `kubectl exec -i bash` Prep for sending multiple data frames over multiple streams in remote command test, which is more likely to find flakes (requires bump of spdystream once an issue with the frame worker queues not being fully drained when a goaway frame is received). --- .../remotecommand/remotecommand.go | 121 ++++++++++-------- .../remotecommand/remotecommand_test.go | 86 +++++++++---- pkg/kubelet/server.go | 6 - pkg/util/httpstream/spdy/connection.go | 2 +- test/e2e/kubectl.go | 48 ++++++- test/e2e/util.go | 5 + 6 files changed, 182 insertions(+), 86 deletions(-) diff --git a/pkg/client/unversioned/remotecommand/remotecommand.go b/pkg/client/unversioned/remotecommand/remotecommand.go index 277532e6767..e63af79aa0a 100644 --- a/pkg/client/unversioned/remotecommand/remotecommand.go +++ b/pkg/client/unversioned/remotecommand/remotecommand.go @@ -21,12 +21,13 @@ import ( "io" "io/ioutil" "net/http" + "sync" - "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/conversion/queryparams" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/httpstream" "k8s.io/kubernetes/pkg/util/httpstream/spdy" ) @@ -155,90 +156,110 @@ func (e *Streamer) doStream() error { } defer conn.Close() - doneChan := make(chan struct{}, 2) - errorChan := make(chan error) - - cp := func(s string, dst io.Writer, src io.Reader) { - glog.V(4).Infof("Copying %s", s) - defer glog.V(4).Infof("Done copying %s", s) - if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - glog.Errorf("Error copying %s: %v", s, err) - } - if s == api.StreamTypeStdout || s == api.StreamTypeStderr { - doneChan <- struct{}{} - } - } - headers := http.Header{} + + // set up error stream + errorChan := make(chan error) headers.Set(api.StreamType, api.StreamTypeError) errorStream, err := conn.CreateStream(headers) if err != nil { return err } + go func() { message, err := ioutil.ReadAll(errorStream) - if err != nil && err != io.EOF { - errorChan <- fmt.Errorf("Error reading from error stream: %s", err) - return - } - if len(message) > 0 { - errorChan <- fmt.Errorf("Error executing remote command: %s", message) - return + switch { + case err != nil && err != io.EOF: + errorChan <- fmt.Errorf("error reading from error stream: %s", err) + case len(message) > 0: + errorChan <- fmt.Errorf("error executing remote command: %s", message) + default: + errorChan <- nil } + close(errorChan) }() - defer errorStream.Reset() + var wg sync.WaitGroup + var once sync.Once + + // set up stdin stream if e.stdin != nil { headers.Set(api.StreamType, api.StreamTypeStdin) remoteStdin, err := conn.CreateStream(headers) if err != nil { return err } - defer remoteStdin.Reset() - // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) - // because stdin is not closed until the process exits. If we try to call - // stdin.Close(), it returns no error but doesn't unblock the copy. It will - // exit when the process exits, instead. - go cp(api.StreamTypeStdin, remoteStdin, e.stdin) + + // copy from client's stdin to container's stdin + go func() { + // if e.stdin is noninteractive, e.g. `echo abc | kubectl exec -i -- cat`, make sure + // we close remoteStdin as soon as the copy from e.stdin to remoteStdin finishes. Otherwise + // the executed command will remain running. + defer once.Do(func() { remoteStdin.Close() }) + + if _, err := io.Copy(remoteStdin, e.stdin); err != nil { + util.HandleError(err) + } + }() + + // read from remoteStdin until the stream is closed. this is essential to + // be able to exit interactive sessions cleanly and not leak goroutines or + // hang the client's terminal. + // + // go-dockerclient's current hijack implementation + // (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564) + // waits for all three streams (stdin/stdout/stderr) to finish copying + // before returning. When hijack finishes copying stdout/stderr, it calls + // Close() on its side of remoteStdin, which allows this copy to complete. + // When that happens, we must Close() on our side of remoteStdin, to + // allow the copy in hijack to complete, and hijack to return. + go func() { + defer once.Do(func() { remoteStdin.Close() }) + // this "copy" doesn't actually read anything - it's just here to wait for + // the server to close remoteStdin. + if _, err := io.Copy(ioutil.Discard, remoteStdin); err != nil { + util.HandleError(err) + } + }() } - waitCount := 0 - completedStreams := 0 - + // set up stdout stream if e.stdout != nil { - waitCount++ headers.Set(api.StreamType, api.StreamTypeStdout) remoteStdout, err := conn.CreateStream(headers) if err != nil { return err } - defer remoteStdout.Reset() - go cp(api.StreamTypeStdout, e.stdout, remoteStdout) + + wg.Add(1) + go func() { + defer wg.Done() + if _, err := io.Copy(e.stdout, remoteStdout); err != nil { + util.HandleError(err) + } + }() } + // set up stderr stream if e.stderr != nil && !e.tty { - waitCount++ headers.Set(api.StreamType, api.StreamTypeStderr) remoteStderr, err := conn.CreateStream(headers) if err != nil { return err } - defer remoteStderr.Reset() - go cp(api.StreamTypeStderr, e.stderr, remoteStderr) - } -Loop: - for { - select { - case <-doneChan: - completedStreams++ - if completedStreams == waitCount { - break Loop + wg.Add(1) + go func() { + defer wg.Done() + if _, err := io.Copy(e.stderr, remoteStderr); err != nil { + util.HandleError(err) } - case err := <-errorChan: - return err - } + }() } - return nil + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan } diff --git a/pkg/client/unversioned/remotecommand/remotecommand_test.go b/pkg/client/unversioned/remotecommand/remotecommand_test.go index a07a27a29a2..9870ee259bc 100644 --- a/pkg/client/unversioned/remotecommand/remotecommand_test.go +++ b/pkg/client/unversioned/remotecommand/remotecommand_test.go @@ -19,7 +19,7 @@ package remotecommand import ( "bytes" "fmt" - "io/ioutil" + "io" "net/http" "net/http/httptest" "net/url" @@ -32,7 +32,7 @@ import ( "k8s.io/kubernetes/pkg/util/httpstream/spdy" ) -func fakeExecServer(t *testing.T, i int, stdinData, stdoutData, stderrData, errorData string, tty bool) http.HandlerFunc { +func fakeExecServer(t *testing.T, i int, stdinData, stdoutData, stderrData, errorData string, tty bool, messageCount int) http.HandlerFunc { // error + stdin + stdout expectedStreams := 3 if !tty { @@ -70,7 +70,6 @@ func fakeExecServer(t *testing.T, i int, stdinData, stdoutData, stderrData, erro receivedStreams++ case api.StreamTypeStdin: stdinStream = stream - stdinStream.Close() receivedStreams++ case api.StreamTypeStdout: stdoutStream = stream @@ -82,8 +81,6 @@ func fakeExecServer(t *testing.T, i int, stdinData, stdoutData, stderrData, erro t.Errorf("%d: unexpected stream type: %q", i, streamType) } - defer stream.Reset() - if receivedStreams == expectedStreams { break WaitForStreams } @@ -91,37 +88,67 @@ func fakeExecServer(t *testing.T, i int, stdinData, stdoutData, stderrData, erro } if len(errorData) > 0 { - fmt.Fprint(errorStream, errorData) + n, err := fmt.Fprint(errorStream, errorData) + if err != nil { + t.Errorf("%d: error writing to errorStream: %v", i, err) + } + if e, a := len(errorData), n; e != a { + t.Errorf("%d: expected to write %d bytes to errorStream, but only wrote %d", i, e, a) + } errorStream.Close() } if len(stdoutData) > 0 { - fmt.Fprint(stdoutStream, stdoutData) + for j := 0; j < messageCount; j++ { + n, err := fmt.Fprint(stdoutStream, stdoutData) + if err != nil { + t.Errorf("%d: error writing to stdoutStream: %v", i, err) + } + if e, a := len(stdoutData), n; e != a { + t.Errorf("%d: expected to write %d bytes to stdoutStream, but only wrote %d", i, e, a) + } + } stdoutStream.Close() } if len(stderrData) > 0 { - fmt.Fprint(stderrStream, stderrData) + for j := 0; j < messageCount; j++ { + n, err := fmt.Fprint(stderrStream, stderrData) + if err != nil { + t.Errorf("%d: error writing to stderrStream: %v", i, err) + } + if e, a := len(stderrData), n; e != a { + t.Errorf("%d: expected to write %d bytes to stderrStream, but only wrote %d", i, e, a) + } + } stderrStream.Close() } if len(stdinData) > 0 { - data, err := ioutil.ReadAll(stdinStream) - if err != nil { - t.Errorf("%d: error reading stdin stream: %v", i, err) - } - if e, a := stdinData, string(data); e != a { - t.Errorf("%d: stdin: expected %q, got %q", i, e, a) + data := make([]byte, len(stdinData)) + for j := 0; j < messageCount; j++ { + n, err := io.ReadFull(stdinStream, data) + if err != nil { + t.Errorf("%d: error reading stdin stream: %v", i, err) + } + if e, a := len(stdinData), n; e != a { + t.Errorf("%d: expected to read %d bytes from stdinStream, but only read %d", i, e, a) + } + if e, a := stdinData, string(data); e != a { + t.Errorf("%d: stdin: expected %q, got %q", i, e, a) + } } + stdinStream.Close() } }) } func TestRequestExecuteRemoteCommand(t *testing.T) { testCases := []struct { - Stdin string - Stdout string - Stderr string - Error string - Tty bool + Stdin string + Stdout string + Stderr string + Error string + Tty bool + MessageCount int }{ { Error: "bail", @@ -130,6 +157,15 @@ func TestRequestExecuteRemoteCommand(t *testing.T) { Stdin: "a", Stdout: "b", Stderr: "c", + // TODO bump this to a larger number such as 100 once + // https://github.com/docker/spdystream/issues/55 is fixed and the Godep + // is bumped. Sending multiple messages over stdin/stdout/stderr results + // in more frames being spread across multiple spdystream frame workers. + // This makes it more likely that the spdystream bug will be encountered, + // where streams are closed as soon as a goaway frame is received, and + // any pending frames that haven't been processed yet may not be + // delivered (it's a race). + MessageCount: 1, }, { Stdin: "a", @@ -142,7 +178,7 @@ func TestRequestExecuteRemoteCommand(t *testing.T) { localOut := &bytes.Buffer{} localErr := &bytes.Buffer{} - server := httptest.NewServer(fakeExecServer(t, i, testCase.Stdin, testCase.Stdout, testCase.Stderr, testCase.Error, testCase.Tty)) + server := httptest.NewServer(fakeExecServer(t, i, testCase.Stdin, testCase.Stdout, testCase.Stderr, testCase.Error, testCase.Tty, testCase.MessageCount)) url, _ := url.ParseRequestURI(server.URL) c := client.NewRESTClient(url, "x", nil, -1, -1) @@ -151,8 +187,7 @@ func TestRequestExecuteRemoteCommand(t *testing.T) { conf := &client.Config{ Host: server.URL, } - e := New(req, conf, []string{"ls", "/"}, strings.NewReader(testCase.Stdin), localOut, localErr, testCase.Tty) - //e.upgrader = testCase.Upgrader + e := New(req, conf, []string{"ls", "/"}, strings.NewReader(strings.Repeat(testCase.Stdin, testCase.MessageCount)), localOut, localErr, testCase.Tty) err := e.Execute() hasErr := err != nil @@ -176,13 +211,13 @@ func TestRequestExecuteRemoteCommand(t *testing.T) { } if len(testCase.Stdout) > 0 { - if e, a := testCase.Stdout, localOut; e != a.String() { + if e, a := strings.Repeat(testCase.Stdout, testCase.MessageCount), localOut; e != a.String() { t.Errorf("%d: expected stdout data '%s', got '%s'", i, e, a) } } if testCase.Stderr != "" { - if e, a := testCase.Stderr, localErr; e != a.String() { + if e, a := strings.Repeat(testCase.Stderr, testCase.MessageCount), localErr; e != a.String() { t.Errorf("%d: expected stderr data '%s', got '%s'", i, e, a) } } @@ -219,7 +254,7 @@ func TestRequestAttachRemoteCommand(t *testing.T) { localOut := &bytes.Buffer{} localErr := &bytes.Buffer{} - server := httptest.NewServer(fakeExecServer(t, i, testCase.Stdin, testCase.Stdout, testCase.Stderr, testCase.Error, testCase.Tty)) + server := httptest.NewServer(fakeExecServer(t, i, testCase.Stdin, testCase.Stdout, testCase.Stderr, testCase.Error, testCase.Tty, 1)) url, _ := url.ParseRequestURI(server.URL) c := client.NewRESTClient(url, "x", nil, -1, -1) @@ -229,7 +264,6 @@ func TestRequestAttachRemoteCommand(t *testing.T) { Host: server.URL, } e := NewAttach(req, conf, strings.NewReader(testCase.Stdin), localOut, localErr, testCase.Tty) - //e.upgrader = testCase.Upgrader err := e.Execute() hasErr := err != nil diff --git a/pkg/kubelet/server.go b/pkg/kubelet/server.go index d60376caeb1..41f7dd90ab8 100644 --- a/pkg/kubelet/server.go +++ b/pkg/kubelet/server.go @@ -543,7 +543,6 @@ WaitForStreams: switch streamType { case api.StreamTypeError: errorStream = stream - defer errorStream.Reset() receivedStreams++ case api.StreamTypeStdin: stdinStream = stream @@ -568,11 +567,6 @@ WaitForStreams: } } - if stdinStream != nil { - // close our half of the input stream, since we won't be writing to it - stdinStream.Close() - } - return stdinStream, stdoutStream, stderrStream, errorStream, conn, tty, true } diff --git a/pkg/util/httpstream/spdy/connection.go b/pkg/util/httpstream/spdy/connection.go index 7c2227917d8..6d4855d195f 100644 --- a/pkg/util/httpstream/spdy/connection.go +++ b/pkg/util/httpstream/spdy/connection.go @@ -78,7 +78,7 @@ const createStreamResponseTimeout = 30 * time.Second func (c *connection) Close() error { c.streamLock.Lock() for _, s := range c.streams { - s.Reset() + s.Close() } c.streams = make([]httpstream.Stream, 0) c.streamLock.Unlock() diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 82c649e7d3b..1bd0b04ea6e 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -20,6 +20,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "io/ioutil" "net" "net/http" @@ -158,11 +159,35 @@ var _ = Describe("Kubectl client", func() { It("should support exec", func() { By("executing a command in the container") execOutput := runKubectl("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") - expectedExecOutput := "running in container" - if execOutput != expectedExecOutput { - Failf("Unexpected kubectl exec output. Wanted '%s', got '%s'", execOutput, expectedExecOutput) + if e, a := "running in container", execOutput; e != a { + Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + } + + By("executing a command in the container with noninteractive stdin") + execOutput = newKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "cat"). + withStdinData("abcd1234"). + exec() + if e, a := "abcd1234", execOutput; e != a { + Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + } + + // pretend that we're a user in an interactive shell + r, c, err := newBlockingReader("echo hi\nexit\n") + if err != nil { + Failf("Error creating blocking reader: %v", err) + } + // NOTE this is solely for test cleanup! + defer c.Close() + + By("executing a command in the container with pseudo-interactive stdin") + execOutput = newKubectlCommand("exec", fmt.Sprintf("--namespace=%v", ns), "-i", simplePodName, "bash"). + withStdinReader(r). + exec() + if e, a := "hi", execOutput; e != a { + Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } }) + It("should support port-forward", func() { By("forwarding the container port to a local port") cmd := kubectlCmd("port-forward", fmt.Sprintf("--namespace=%v", ns), simplePodName, fmt.Sprintf(":%d", simplePodPort)) @@ -791,3 +816,20 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error } } } + +// newBlockingReader returns a reader that allows reading the given string, +// then blocks until Close() is called on the returned closer. +// +// We're explicitly returning the reader and closer separately, because +// the closer needs to be the *os.File we get from os.Pipe(). This is required +// so the exec of kubectl can pass the underlying file descriptor to the exec +// syscall, instead of creating another os.Pipe and blocking on the io.Copy +// between the source (e.g. stdin) and the write half of the pipe. +func newBlockingReader(s string) (io.Reader, io.Closer, error) { + r, w, err := os.Pipe() + if err != nil { + return nil, nil, err + } + w.Write([]byte(s)) + return r, w, nil +} diff --git a/test/e2e/util.go b/test/e2e/util.go index c92dfe0190e..02ffdb4c513 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -977,6 +977,11 @@ func (b kubectlBuilder) withStdinData(data string) *kubectlBuilder { return &b } +func (b kubectlBuilder) withStdinReader(reader io.Reader) *kubectlBuilder { + b.cmd.Stdin = reader + return &b +} + func (b kubectlBuilder) exec() string { var stdout, stderr bytes.Buffer cmd := b.cmd From 219a1fae621bdac14619c925917d88b546cbd5bb Mon Sep 17 00:00:00 2001 From: Isaac Hollander McCreery Date: Fri, 4 Sep 2015 08:36:55 -0700 Subject: [PATCH 054/101] Second attempt at GCE tokens behavior to new format --- cluster/gce/configure-vm.sh | 3 ++- pkg/cloudprovider/providers/gce/gce.go | 3 ++- pkg/cloudprovider/providers/gce/token_source.go | 13 ++++++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 6e41453649f..c199a6643c5 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -540,10 +540,11 @@ grains: - kubernetes-master cloud: gce EOF - if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then + if ! [[ -z "${PROJECT_ID:-}" ]] && ! [[ -z "${TOKEN_URL:-}" ]] && ! [[ -z "${TOKEN_BODY:-}" ]] && ! [[ -z "${NODE_NETWORK:-}" ]] ; then cat </etc/gce.conf [global] token-url = ${TOKEN_URL} +token-body = ${TOKEN_BODY} project-id = ${PROJECT_ID} network-name = ${NODE_NETWORK} EOF diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index fbe172ab23f..3829956b98e 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -61,6 +61,7 @@ type GCECloud struct { type Config struct { Global struct { TokenURL string `gcfg:"token-url"` + TokenBody string `gcfg:"token-body"` ProjectID string `gcfg:"project-id"` NetworkName string `gcfg:"network-name"` } @@ -159,7 +160,7 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { } } if cfg.Global.TokenURL != "" { - tokenSource = newAltTokenSource(cfg.Global.TokenURL) + tokenSource = newAltTokenSource(cfg.Global.TokenURL, cfg.Global.TokenBody) } } client := oauth2.NewClient(oauth2.NoContext, tokenSource) diff --git a/pkg/cloudprovider/providers/gce/token_source.go b/pkg/cloudprovider/providers/gce/token_source.go index 4bf33246ca0..e5e327d03c8 100644 --- a/pkg/cloudprovider/providers/gce/token_source.go +++ b/pkg/cloudprovider/providers/gce/token_source.go @@ -19,6 +19,7 @@ package gce_cloud import ( "encoding/json" "net/http" + "strings" "time" "k8s.io/kubernetes/pkg/util" @@ -59,6 +60,7 @@ func init() { type altTokenSource struct { oauthClient *http.Client tokenURL string + tokenBody string throttle util.RateLimiter } @@ -73,7 +75,7 @@ func (a *altTokenSource) Token() (*oauth2.Token, error) { } func (a *altTokenSource) token() (*oauth2.Token, error) { - req, err := http.NewRequest("GET", a.tokenURL, nil) + req, err := http.NewRequest("POST", a.tokenURL, strings.NewReader(a.tokenBody)) if err != nil { return nil, err } @@ -86,23 +88,24 @@ func (a *altTokenSource) token() (*oauth2.Token, error) { return nil, err } var tok struct { - AccessToken string `json:"accessToken"` - ExpiryTimeSeconds int64 `json:"expiryTimeSeconds,string"` + AccessToken string `json:"accessToken"` + ExpireTime time.Time `json:"expireTime"` } if err := json.NewDecoder(res.Body).Decode(&tok); err != nil { return nil, err } return &oauth2.Token{ AccessToken: tok.AccessToken, - Expiry: time.Unix(tok.ExpiryTimeSeconds, 0), + Expiry: tok.ExpireTime, }, nil } -func newAltTokenSource(tokenURL string) oauth2.TokenSource { +func newAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource("")) a := &altTokenSource{ oauthClient: client, tokenURL: tokenURL, + tokenBody: tokenBody, throttle: util.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst), } return oauth2.ReuseTokenSource(nil, a) From ab1f4c5c2c5705a0a3629df230ef447bb8b11da5 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Fri, 4 Sep 2015 11:38:36 -0400 Subject: [PATCH 055/101] Fix typo in api server flag --- cmd/kube-apiserver/app/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index ff478fab969..b1adec83b26 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -162,7 +162,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { "The IP address on which to serve the --insecure-port (set to 0.0.0.0 for all interfaces). "+ "Defaults to localhost.") fs.IPVar(&s.InsecureBindAddress, "address", s.InsecureBindAddress, "DEPRECATED: see --insecure-bind-address instead") - fs.MarkDeprecated("address", "see --insecure-bind-address instread") + fs.MarkDeprecated("address", "see --insecure-bind-address instead") fs.IPVar(&s.BindAddress, "bind-address", s.BindAddress, ""+ "The IP address on which to serve the --read-only-port and --secure-port ports. The "+ "associated interface(s) must be reachable by the rest of the cluster, and by CLI/web "+ From eff8ea24325ebaa947870186c011b87729527880 Mon Sep 17 00:00:00 2001 From: Quinton Hoole Date: Fri, 4 Sep 2015 08:44:34 -0700 Subject: [PATCH 056/101] Banish some more Services e2e tests to flaky in parallel. --- hack/jenkins/e2e.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index c39195f7c85..1830f769254 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -138,6 +138,7 @@ GCE_PARALLEL_FLAKY_TESTS=( "Services.*functioning\sexternal\sload\sbalancer" "Services.*identically\snamed" "Services.*release.*load\sbalancer" + "Services.*endpoint" ) # Tests that should not run on soak cluster. From ada14a524d0640c24037a47f540dfeb7f3fe0b0f Mon Sep 17 00:00:00 2001 From: "Dr. Stefan Schimanski" Date: Fri, 4 Sep 2015 17:38:10 +0200 Subject: [PATCH 057/101] Fix compilation of Mesos minion on Mac --- contrib/mesos/pkg/minion/tasks/task.go | 5 +-- contrib/mesos/pkg/minion/tasks/task_linux.go | 28 +++++++++++++++ contrib/mesos/pkg/minion/tasks/task_other.go | 38 ++++++++++++++++++++ 3 files changed, 67 insertions(+), 4 deletions(-) create mode 100644 contrib/mesos/pkg/minion/tasks/task_linux.go create mode 100644 contrib/mesos/pkg/minion/tasks/task_other.go diff --git a/contrib/mesos/pkg/minion/tasks/task.go b/contrib/mesos/pkg/minion/tasks/task.go index 51dfcaf07f0..f6ddb52a9e6 100644 --- a/contrib/mesos/pkg/minion/tasks/task.go +++ b/contrib/mesos/pkg/minion/tasks/task.go @@ -214,10 +214,7 @@ func notStartedTask(t *Task) taskStateFn { if len(t.env) > 0 { cmd.Env = t.env } - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setpgid: true, - Pdeathsig: syscall.SIGKILL, // see cmdProcess.Kill - } + cmd.SysProcAttr = sysProcAttr() // last min check for shouldQuit here select { diff --git a/contrib/mesos/pkg/minion/tasks/task_linux.go b/contrib/mesos/pkg/minion/tasks/task_linux.go new file mode 100644 index 00000000000..a570f85370c --- /dev/null +++ b/contrib/mesos/pkg/minion/tasks/task_linux.go @@ -0,0 +1,28 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "syscall" +) + +func sysProcAttr() *syscall.SysProcAttr { + return &syscall.SysProcAttr{ + Setpgid: true, + Pdeathsig: syscall.SIGKILL, // see cmdProcess.Kill + } +} diff --git a/contrib/mesos/pkg/minion/tasks/task_other.go b/contrib/mesos/pkg/minion/tasks/task_other.go new file mode 100644 index 00000000000..a83c28a8536 --- /dev/null +++ b/contrib/mesos/pkg/minion/tasks/task_other.go @@ -0,0 +1,38 @@ +// +build !linux + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "syscall" +) + +func sysProcAttr() *syscall.SysProcAttr { + // TODO(jdef) + // Consequence of not having Pdeathdig is that on non-Linux systems, + // if SIGTERM doesn't stop child procs then they may "leak" and be + // reparented 'up the chain' somewhere when the minion process + // terminates. For example, such child procs end up living indefinitely + // as children of the mesos slave process (I think the slave could handle + // this case, but currently doesn't do it very well). Pdeathsig on Linux + // was a fallback/failsafe mechanism implemented to guard against this. I + // don't know if OS X has any syscalls that do something similar. + return &syscall.SysProcAttr{ + Setpgid: true, + } +} From de064f4254f23321f388dbd69d501c9e00f36864 Mon Sep 17 00:00:00 2001 From: James DeFelice Date: Sun, 30 Aug 2015 19:47:24 +0000 Subject: [PATCH 058/101] fix kubelet syncLoop busy loop upon close of updates chan --- pkg/kubelet/kubelet.go | 13 ++++++++----- pkg/kubelet/kubelet_test.go | 20 ++++++++++++++++++++ 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 6eb6a3222c3..630967784c7 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1896,17 +1896,19 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) { } housekeepingTimestamp = time.Now() } - kl.syncLoopIteration(updates, handler) + if !kl.syncLoopIteration(updates, handler) { + break + } } } -func (kl *Kubelet) syncLoopIteration(updates <-chan PodUpdate, handler SyncHandler) { +func (kl *Kubelet) syncLoopIteration(updates <-chan PodUpdate, handler SyncHandler) bool { kl.syncLoopMonitor.Store(time.Now()) select { - case u, ok := <-updates: - if !ok { + case u, open := <-updates: + if !open { glog.Errorf("Update channel is closed. Exiting the sync loop.") - return + return false } switch u.Op { case ADD: @@ -1928,6 +1930,7 @@ func (kl *Kubelet) syncLoopIteration(updates <-chan PodUpdate, handler SyncHandl handler.HandlePodSyncs(kl.podManager.GetPods()) } kl.syncLoopMonitor.Store(time.Now()) + return true } func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType SyncPodType, mirrorPod *api.Pod, start time.Time) { diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 816fed19923..b1cd78c7bc0 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -329,6 +329,26 @@ func TestSyncLoopTimeUpdate(t *testing.T) { } } +func TestSyncLoopAbort(t *testing.T) { + testKubelet := newTestKubelet(t) + testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) + kubelet := testKubelet.kubelet + kubelet.lastTimestampRuntimeUp = time.Now() + kubelet.networkConfigured = true + + ch := make(chan PodUpdate) + close(ch) + + // sanity check (also prevent this test from hanging in the next step) + ok := kubelet.syncLoopIteration(ch, kubelet) + if ok { + t.Fatalf("expected syncLoopIteration to return !ok since update chan was closed") + } + + // this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly + kubelet.syncLoop(ch, kubelet) +} + func TestSyncPodsStartPod(t *testing.T) { testKubelet := newTestKubelet(t) testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) From f8d6f13f7c25150cbb76eb7423021060a3f3705a Mon Sep 17 00:00:00 2001 From: "Ruddarraju, Uday Kumar Raju" Date: Thu, 27 Aug 2015 16:18:26 -0700 Subject: [PATCH 059/101] Union of authorizers --- cmd/kube-apiserver/app/server.go | 5 +- pkg/apiserver/authz.go | 58 ++++++++++++++------ pkg/apiserver/authz_test.go | 23 ++++---- pkg/auth/authorizer/union/union.go | 45 +++++++++++++++ pkg/auth/authorizer/union/union_test.go | 73 +++++++++++++++++++++++++ 5 files changed, 176 insertions(+), 28 deletions(-) create mode 100644 pkg/auth/authorizer/union/union.go create mode 100644 pkg/auth/authorizer/union/union_test.go diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index b1adec83b26..158ddde7a12 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -204,7 +204,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.ServiceAccountKeyFile, "service-account-key-file", s.ServiceAccountKeyFile, "File containing PEM-encoded x509 RSA private or public key, used to verify ServiceAccount tokens. If unspecified, --tls-private-key-file is used.") fs.BoolVar(&s.ServiceAccountLookup, "service-account-lookup", s.ServiceAccountLookup, "If true, validate ServiceAccount tokens exist in etcd as part of authentication.") fs.StringVar(&s.KeystoneURL, "experimental-keystone-url", s.KeystoneURL, "If passed, activates the keystone authentication plugin") - fs.StringVar(&s.AuthorizationMode, "authorization-mode", s.AuthorizationMode, "Selects how to do authorization on the secure port. One of: "+strings.Join(apiserver.AuthorizationModeChoices, ",")) + fs.StringVar(&s.AuthorizationMode, "authorization-mode", s.AuthorizationMode, "Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: "+strings.Join(apiserver.AuthorizationModeChoices, ",")) fs.StringVar(&s.AuthorizationPolicyFile, "authorization-policy-file", s.AuthorizationPolicyFile, "File with authorization policy in csv format, used with --authorization-mode=ABAC, on the secure port.") fs.StringVar(&s.AdmissionControl, "admission-control", s.AdmissionControl, "Ordered list of plug-ins to do admission control of resources into cluster. Comma-delimited list of: "+strings.Join(admission.GetPlugins(), ", ")) fs.StringVar(&s.AdmissionControlConfigFile, "admission-control-config-file", s.AdmissionControlConfigFile, "File with admission control configuration.") @@ -383,7 +383,8 @@ func (s *APIServer) Run(_ []string) error { glog.Fatalf("Invalid Authentication Config: %v", err) } - authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(s.AuthorizationMode, s.AuthorizationPolicyFile) + authorizationModeNames := strings.Split(s.AuthorizationMode, ",") + authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, s.AuthorizationPolicyFile) if err != nil { glog.Fatalf("Invalid Authorization Config: %v", err) } diff --git a/pkg/apiserver/authz.go b/pkg/apiserver/authz.go index 0c0bb77a674..8da50c33313 100644 --- a/pkg/apiserver/authz.go +++ b/pkg/apiserver/authz.go @@ -18,9 +18,10 @@ package apiserver import ( "errors" - + "fmt" "k8s.io/kubernetes/pkg/auth/authorizer" "k8s.io/kubernetes/pkg/auth/authorizer/abac" + "k8s.io/kubernetes/pkg/auth/authorizer/union" ) // Attributes implements authorizer.Attributes interface. @@ -63,21 +64,46 @@ const ( // Keep this list in sync with constant list above. var AuthorizationModeChoices = []string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC} -// NewAuthorizerFromAuthorizationConfig returns the right sort of authorizer.Authorizer -// based on the authorizationMode xor an error. authorizationMode should be one of AuthorizationModeChoices. -func NewAuthorizerFromAuthorizationConfig(authorizationMode string, authorizationPolicyFile string) (authorizer.Authorizer, error) { - if authorizationPolicyFile != "" && authorizationMode != "ABAC" { +// NewAuthorizerFromAuthorizationConfig returns the right sort of union of multiple authorizer.Authorizer objects +// based on the authorizationMode or an error. authorizationMode should be a comma separated values +// of AuthorizationModeChoices. +func NewAuthorizerFromAuthorizationConfig(authorizationModes []string, authorizationPolicyFile string) (authorizer.Authorizer, error) { + + if len(authorizationModes) == 0 { + return nil, errors.New("Atleast one authorization mode should be passed") + } + + var authorizers []authorizer.Authorizer + authorizerMap := make(map[string]bool) + + for _, authorizationMode := range authorizationModes { + if authorizerMap[authorizationMode] { + return nil, fmt.Errorf("Authorization mode %s specified more than once", authorizationMode) + } + // Keep cases in sync with constant list above. + switch authorizationMode { + case ModeAlwaysAllow: + authorizers = append(authorizers, NewAlwaysAllowAuthorizer()) + case ModeAlwaysDeny: + authorizers = append(authorizers, NewAlwaysDenyAuthorizer()) + case ModeABAC: + if authorizationPolicyFile == "" { + return nil, errors.New("ABAC's authorization policy file not passed") + } + abacAuthorizer, err := abac.NewFromFile(authorizationPolicyFile) + if err != nil { + return nil, err + } + authorizers = append(authorizers, abacAuthorizer) + default: + return nil, fmt.Errorf("Unknown authorization mode %s specified", authorizationMode) + } + authorizerMap[authorizationMode] = true + } + + if !authorizerMap[ModeABAC] && authorizationPolicyFile != "" { return nil, errors.New("Cannot specify --authorization-policy-file without mode ABAC") } - // Keep cases in sync with constant list above. - switch authorizationMode { - case ModeAlwaysAllow: - return NewAlwaysAllowAuthorizer(), nil - case ModeAlwaysDeny: - return NewAlwaysDenyAuthorizer(), nil - case ModeABAC: - return abac.NewFromFile(authorizationPolicyFile) - default: - return nil, errors.New("Unknown authorization mode") - } + + return union.New(authorizers...), nil } diff --git a/pkg/apiserver/authz_test.go b/pkg/apiserver/authz_test.go index 53bcbf1d6c0..72d33802ae1 100644 --- a/pkg/apiserver/authz_test.go +++ b/pkg/apiserver/authz_test.go @@ -42,27 +42,30 @@ func TestNewAlwaysDenyAuthorizer(t *testing.T) { // validates that errors are returned only when proper. func TestNewAuthorizerFromAuthorizationConfig(t *testing.T) { // Unknown modes should return errors - if _, err := NewAuthorizerFromAuthorizationConfig("DoesNotExist", ""); err == nil { + if _, err := NewAuthorizerFromAuthorizationConfig([]string{"DoesNotExist"}, ""); err == nil { t.Errorf("NewAuthorizerFromAuthorizationConfig using a fake mode should have returned an error") } // ModeAlwaysAllow and ModeAlwaysDeny should return without authorizationPolicyFile // but error if one is given - for _, config := range []string{ModeAlwaysAllow, ModeAlwaysDeny} { - if _, err := NewAuthorizerFromAuthorizationConfig(config, ""); err != nil { - t.Errorf("NewAuthorizerFromAuthorizationConfig with %s returned an error: %s", err, config) - } - if _, err := NewAuthorizerFromAuthorizationConfig(config, "shoulderror"); err == nil { - t.Errorf("NewAuthorizerFromAuthorizationConfig with %s should have returned an error", config) - } + if _, err := NewAuthorizerFromAuthorizationConfig([]string{ModeAlwaysAllow, ModeAlwaysDeny}, ""); err != nil { + t.Errorf("NewAuthorizerFromAuthorizationConfig returned an error: %s", err) } // ModeABAC requires a policy file - if _, err := NewAuthorizerFromAuthorizationConfig(ModeABAC, ""); err == nil { + if _, err := NewAuthorizerFromAuthorizationConfig([]string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC}, ""); err == nil { t.Errorf("NewAuthorizerFromAuthorizationConfig using a fake mode should have returned an error") } // ModeABAC should not error if a valid policy path is provided - if _, err := NewAuthorizerFromAuthorizationConfig(ModeABAC, "../auth/authorizer/abac/example_policy_file.jsonl"); err != nil { + if _, err := NewAuthorizerFromAuthorizationConfig([]string{ModeAlwaysAllow, ModeAlwaysDeny, ModeABAC}, "../auth/authorizer/abac/example_policy_file.jsonl"); err != nil { t.Errorf("NewAuthorizerFromAuthorizationConfig errored while using a valid policy file: %s", err) } + // Authorization Policy file cannot be used without ModeABAC + if _, err := NewAuthorizerFromAuthorizationConfig([]string{ModeAlwaysAllow, ModeAlwaysDeny}, "../auth/authorizer/abac/example_policy_file.jsonl"); err == nil { + t.Errorf("NewAuthorizerFromAuthorizationConfig should have errored when Authorization Policy File is used without ModeABAC") + } + // Atleast one authorizationMode is necessary + if _, err := NewAuthorizerFromAuthorizationConfig([]string{}, "../auth/authorizer/abac/example_policy_file.jsonl"); err == nil { + t.Errorf("NewAuthorizerFromAuthorizationConfig should have errored when no authorization modes are passed") + } } diff --git a/pkg/auth/authorizer/union/union.go b/pkg/auth/authorizer/union/union.go new file mode 100644 index 00000000000..9b5a8c3aeea --- /dev/null +++ b/pkg/auth/authorizer/union/union.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + "k8s.io/kubernetes/pkg/auth/authorizer" + "k8s.io/kubernetes/pkg/util/errors" +) + +// unionAuthzHandler authorizer against a chain of authorizer.Authorizer +type unionAuthzHandler []authorizer.Authorizer + +// New returns an authorizer that authorizes against a chain of authorizer.Authorizer objects +func New(authorizationHandlers ...authorizer.Authorizer) authorizer.Authorizer { + return unionAuthzHandler(authorizationHandlers) +} + +// Authorizes against a chain of authorizer.Authorizer objects and returns nil if successful and returns error if unsuccessful +func (authzHandler unionAuthzHandler) Authorize(a authorizer.Attributes) error { + var errlist []error + for _, currAuthzHandler := range authzHandler { + err := currAuthzHandler.Authorize(a) + if err != nil { + errlist = append(errlist, err) + continue + } + return nil + } + + return errors.NewAggregate(errlist) +} diff --git a/pkg/auth/authorizer/union/union_test.go b/pkg/auth/authorizer/union/union_test.go new file mode 100644 index 00000000000..1a01676af6c --- /dev/null +++ b/pkg/auth/authorizer/union/union_test.go @@ -0,0 +1,73 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package union + +import ( + "errors" + "testing" + + "k8s.io/kubernetes/pkg/auth/authorizer" +) + +type mockAuthzHandler struct { + isAuthorized bool + err error +} + +func (mock *mockAuthzHandler) Authorize(a authorizer.Attributes) error { + if mock.err != nil { + return mock.err + } + if !mock.isAuthorized { + return errors.New("Request unauthorized") + } else { + return nil + } +} + +func TestAuthorizationSecondPasses(t *testing.T) { + handler1 := &mockAuthzHandler{isAuthorized: false} + handler2 := &mockAuthzHandler{isAuthorized: true} + authzHandler := New(handler1, handler2) + + err := authzHandler.Authorize(nil) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestAuthorizationFirstPasses(t *testing.T) { + handler1 := &mockAuthzHandler{isAuthorized: true} + handler2 := &mockAuthzHandler{isAuthorized: false} + authzHandler := New(handler1, handler2) + + err := authzHandler.Authorize(nil) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestAuthorizationNonePasses(t *testing.T) { + handler1 := &mockAuthzHandler{isAuthorized: false} + handler2 := &mockAuthzHandler{isAuthorized: false} + authzHandler := New(handler1, handler2) + + err := authzHandler.Authorize(nil) + if err == nil { + t.Errorf("Expected error: %v", err) + } +} From 4f5fdfb08bdadd23b015070857b8fef8d79240fe Mon Sep 17 00:00:00 2001 From: Quinton Hoole Date: Fri, 4 Sep 2015 11:45:52 -0700 Subject: [PATCH 060/101] Increase service latency 99th percentile limit from 40s to 50s --- test/e2e/service_latency.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/service_latency.go b/test/e2e/service_latency.go index eff9267c64e..bcbb24096e5 100644 --- a/test/e2e/service_latency.go +++ b/test/e2e/service_latency.go @@ -49,7 +49,7 @@ var _ = Describe("Service endpoints latency", func() { // get this much lower in the future. See issue // #10436. limitMedian = time.Second * 20 - limitTail = time.Second * 40 + limitTail = time.Second * 50 // Numbers chosen to make the test complete in a short amount // of time. This sample size is not actually large enough to From a66087c83f04ca398cdd2c79587e2090b89a7233 Mon Sep 17 00:00:00 2001 From: Quinton Hoole Date: Fri, 4 Sep 2015 11:52:59 -0700 Subject: [PATCH 061/101] Remove service latency e2e test from the flaky bucket. --- hack/jenkins/e2e.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 1830f769254..56f8b8e6728 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -133,7 +133,6 @@ GCE_PARALLEL_FLAKY_TESTS=( "Elasticsearch" "PD" "ServiceAccounts" - "Service\sendpoints\slatency" "Services.*change\sthe\stype" "Services.*functioning\sexternal\sload\sbalancer" "Services.*identically\snamed" From c6ff44cf276c5b0068fbbcca834750d2d779ede6 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 3 Sep 2015 17:05:50 -0400 Subject: [PATCH 062/101] Godep: update github.com/spf13/cobra --- Godeps/Godeps.json | 2 +- .../src/github.com/spf13/cobra/README.md | 4 + .../spf13/cobra/bash_completions.go | 21 ++- .../src/github.com/spf13/cobra/cobra.go | 28 ++- .../src/github.com/spf13/cobra/cobra_test.go | 18 ++ .../src/github.com/spf13/cobra/command.go | 127 +++++++++----- .../src/github.com/spf13/cobra/doc_util.go | 36 ++++ .../src/github.com/spf13/cobra/man_docs.go | 164 ++++++++++++++++++ .../src/github.com/spf13/cobra/man_docs.md | 21 +++ .../github.com/spf13/cobra/man_docs_test.go | 71 ++++++++ .../src/github.com/spf13/cobra/md_docs.go | 31 +++- 11 files changed, 465 insertions(+), 58 deletions(-) create mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/doc_util.go create mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/man_docs.go create mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/man_docs.md create mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/man_docs_test.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 3f48d15175e..4bb159e3fd5 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -512,7 +512,7 @@ }, { "ImportPath": "github.com/spf13/cobra", - "Rev": "db0518444643a7b170abb78164bbeaf5a2bb816f" + "Rev": "68f5a81a722d56241bd70faf6860ceb05eb27d64" }, { "ImportPath": "github.com/spf13/pflag", diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/README.md b/Godeps/_workspace/src/github.com/spf13/cobra/README.md index b1fb0889ec1..5b97e021c25 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/README.md +++ b/Godeps/_workspace/src/github.com/spf13/cobra/README.md @@ -422,6 +422,10 @@ func main() { Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md) +## Generating man pages for your command + +Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](man_docs.md) + ## Generating bash completions for your command Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md) diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go b/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go index 82c4274a8e1..66efd719ab7 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go +++ b/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go @@ -19,7 +19,6 @@ const ( func preamble(out *bytes.Buffer) { fmt.Fprintf(out, `#!/bin/bash - __debug() { if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then @@ -27,6 +26,14 @@ __debug() fi } +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__my_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref cur prev words cword +} + __index_of_word() { local w word=$1 @@ -188,7 +195,11 @@ func postscript(out *bytes.Buffer, name string) { fmt.Fprintf(out, "__start_%s()\n", name) fmt.Fprintf(out, `{ local cur prev words cword - _init_completion -s || return + if declare -F _init_completions >/dev/null 2>&1; then + _init_completion -s || return + else + __my_init_completion || return + fi local c=0 local flags=() @@ -212,7 +223,7 @@ func postscript(out *bytes.Buffer, name string) { func writeCommands(cmd *Command, out *bytes.Buffer) { fmt.Fprintf(out, " commands=()\n") for _, c := range cmd.Commands() { - if len(c.Deprecated) > 0 { + if len(c.Deprecated) > 0 || c == cmd.helpCommand { continue } fmt.Fprintf(out, " commands+=(%q)\n", c.Name()) @@ -292,7 +303,7 @@ func writeRequiredFlag(cmd *Command, out *bytes.Buffer) { fmt.Fprintf(out, " must_have_one_flag=()\n") flags := cmd.NonInheritedFlags() flags.VisitAll(func(flag *pflag.Flag) { - for key, _ := range flag.Annotations { + for key := range flag.Annotations { switch key { case BashCompOneRequiredFlag: format := " must_have_one_flag+=(\"--%s" @@ -321,7 +332,7 @@ func writeRequiredNoun(cmd *Command, out *bytes.Buffer) { func gen(cmd *Command, out *bytes.Buffer) { for _, c := range cmd.Commands() { - if len(c.Deprecated) > 0 { + if len(c.Deprecated) > 0 || c == cmd.helpCommand { continue } gen(c, out) diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go b/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go index 78b92b0af3b..1d66a70eb56 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go +++ b/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go @@ -25,6 +25,13 @@ import ( "text/template" ) +var templateFuncs template.FuncMap = template.FuncMap{ + "trim": strings.TrimSpace, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + var initializers []func() // automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. @@ -39,6 +46,20 @@ var MousetrapHelpText string = `This is a command line tool You need to open cmd.exe and run it from there. ` +//AddTemplateFunc adds a template function that's available to Usage and Help +//template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +//AddTemplateFuncs adds multiple template functions availalble to Usage and +//Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + //OnInitialize takes a series of func() arguments and appends them to a slice of func(). func OnInitialize(y ...func()) { for _, x := range y { @@ -101,12 +122,7 @@ func rpad(s string, padding int) string { // tmpl executes the given template text on data, writing the result to w. func tmpl(w io.Writer, text string, data interface{}) error { t := template.New("top") - t.Funcs(template.FuncMap{ - "trim": strings.TrimSpace, - "rpad": rpad, - "gt": Gt, - "eq": Eq, - }) + t.Funcs(templateFuncs) template.Must(t.Parse(text)) return t.Execute(w, data) } diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go b/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go index 4fc3b88b845..3aed7dd6466 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go +++ b/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go @@ -8,6 +8,7 @@ import ( "runtime" "strings" "testing" + "text/template" "github.com/spf13/pflag" ) @@ -971,3 +972,20 @@ func TestFlagOnPflagCommandLine(t *testing.T) { checkResultContains(t, r, flagName) } + +func TestAddTemplateFunctions(t *testing.T) { + AddTemplateFunc("t", func() bool { return true }) + AddTemplateFuncs(template.FuncMap{ + "f": func() bool { return false }, + "h": func() string { return "Hello," }, + "w": func() string { return "world." }}) + + const usage = "Hello, world." + + c := &Command{} + c.SetUsageTemplate(`{{if t}}{{h}}{{end}}{{if f}}{{h}}{{end}} {{w}}`) + + if us := c.UsageString(); us != usage { + t.Errorf("c.UsageString() != \"%s\", is \"%s\"", usage, us) + } +} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/command.go b/Godeps/_workspace/src/github.com/spf13/cobra/command.go index cbbc3264f28..bf642b536ac 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/command.go +++ b/Godeps/_workspace/src/github.com/spf13/cobra/command.go @@ -66,14 +66,24 @@ type Command struct { // All functions get the same args, the arguments after the command name // PersistentPreRun: children of this command will inherit and execute PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error + PersistentPreRunE func(cmd *Command, args []string) error // PreRun: children of this command will not inherit. PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error + PreRunE func(cmd *Command, args []string) error // Run: Typically the actual work function. Most commands will only implement this Run func(cmd *Command, args []string) + // RunE: Run but returns an error + RunE func(cmd *Command, args []string) error // PostRun: run after the Run command. PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error + PostRunE func(cmd *Command, args []string) error // PersistentPostRun: children of this command will inherit and execute after PostRun PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error + PersistentPostRunE func(cmd *Command, args []string) error // Commands is the list of commands supported by this program. commands []*Command // Parent Command for this command @@ -92,7 +102,6 @@ type Command struct { helpTemplate string // Can be defined by Application helpFunc func(*Command, []string) // Help can be defined by application helpCommand *Command // The help command - helpFlagVal bool // The global normalization function that we can use on every pFlag set and children commands globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName } @@ -179,32 +188,21 @@ func (c *Command) UsageFunc() (f func(*Command) error) { } } } + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function which calls c.Help() func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc + cmd := c + for cmd != nil { + if cmd.helpFunc != nil { + return cmd.helpFunc + } + cmd = cmd.parent } - - if c.HasParent() { - return c.parent.HelpFunc() - } else { - return func(c *Command, args []string) { - if len(args) == 0 { - // Help called without any topic, calling on root - c.Root().Help() - return - } - - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q.", args) - - c.Root().Usage() - } else { - err := cmd.Help() - if err != nil { - c.Println(err) - } - } + return func(*Command, []string) { + err := c.Help() + if err != nil { + c.Println(err) } } } @@ -270,7 +268,7 @@ Global Flags: {{.InheritedFlags.FlagUsages}}{{end}}{{if .HasHelpSubCommands}} Additional help topics: {{range .Commands}}{{if .IsHelpCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}}{{end}}{{end}}{{ if .HasSubCommands }} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasSubCommands }} Use "{{.CommandPath}} [command] --help" for more information about a command. {{end}}` @@ -450,13 +448,24 @@ func (c *Command) execute(a []string) (err error) { c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) } + // initialize help flag as the last point possible to allow for user + // overriding + c.initHelpFlag() + err = c.ParseFlags(a) if err != nil { return err } // If help is called, regardless of other flags, return we want help - // Also say we need help if c.Run is nil. - if c.helpFlagVal || !c.Runnable() { + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in initHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + if helpVal || !c.Runnable() { return flag.ErrHelp } @@ -464,22 +473,45 @@ func (c *Command) execute(a []string) (err error) { argWoFlags := c.Flags().Args() for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRun != nil { + if p.PersistentPreRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { p.PersistentPreRun(c, argWoFlags) break } } - if c.PreRun != nil { + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { c.PreRun(c, argWoFlags) } - c.Run(c, argWoFlags) - - if c.PostRun != nil { + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { c.PostRun(c, argWoFlags) } for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRun != nil { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { p.PersistentPostRun(c, argWoFlags) break } @@ -526,7 +558,7 @@ func (c *Command) Execute() (err error) { // initialize help as the last point possible to allow for user // overriding - c.initHelp() + c.initHelpCmd() var args []string @@ -550,7 +582,7 @@ func (c *Command) Execute() (err error) { err = cmd.execute(flags) if err != nil { if err == flag.ErrHelp { - cmd.Help() + cmd.HelpFunc()(cmd, args) return nil } c.Println(cmd.UsageString()) @@ -560,7 +592,13 @@ func (c *Command) Execute() (err error) { return } -func (c *Command) initHelp() { +func (c *Command) initHelpFlag() { + if c.Flags().Lookup("help") == nil { + c.Flags().BoolP("help", "h", false, "help for "+c.Name()) + } +} + +func (c *Command) initHelpCmd() { if c.helpCommand == nil { if !c.HasSubCommands() { return @@ -571,9 +609,19 @@ func (c *Command) initHelp() { Short: "Help about any command", Long: `Help provides help for any command in the application. Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), PersistentPreRun: func(cmd *Command, args []string) {}, PersistentPostRun: func(cmd *Command, args []string) {}, + + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q.", args) + c.Root().Usage() + } else { + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + } + }, } } c.AddCommand(c.helpCommand) @@ -794,7 +842,7 @@ func (c *Command) HasExample() bool { // Determine if the command is itself runnable func (c *Command) Runnable() bool { - return c.Run != nil + return c.Run != nil || c.RunE != nil } // Determine if the command has children commands @@ -859,7 +907,6 @@ func (c *Command) Flags() *flag.FlagSet { c.flagErrorBuf = new(bytes.Buffer) } c.flags.SetOutput(c.flagErrorBuf) - c.PersistentFlags().BoolVarP(&c.helpFlagVal, "help", "h", false, "help for "+c.Name()) } return c.flags } diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/doc_util.go b/Godeps/_workspace/src/github.com/spf13/cobra/doc_util.go new file mode 100644 index 00000000000..9c20bca84a6 --- /dev/null +++ b/Godeps/_workspace/src/github.com/spf13/cobra/doc_util.go @@ -0,0 +1,36 @@ +// Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import () + +// Test to see if we have a reason to print See Also information in docs +// Basically this is a test for a parent commend or a subcommand which is +// both not deprecated and not the autogenerated help command. +func (cmd *Command) hasSeeAlso() bool { + if cmd.HasParent() { + return true + } + children := cmd.Commands() + if len(children) == 0 { + return false + } + for _, c := range children { + if len(c.Deprecated) != 0 || c == cmd.helpCommand { + continue + } + return true + } + return false +} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/man_docs.go b/Godeps/_workspace/src/github.com/spf13/cobra/man_docs.go new file mode 100644 index 00000000000..f260990e938 --- /dev/null +++ b/Godeps/_workspace/src/github.com/spf13/cobra/man_docs.go @@ -0,0 +1,164 @@ +// Copyright 2015 Red Hat Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "os" + "sort" + "strings" + "time" + + mangen "github.com/cpuguy83/go-md2man/md2man" + "github.com/spf13/pflag" +) + +func GenManTree(cmd *Command, projectName, dir string) { + cmd.GenManTree(projectName, dir) +} + +func (cmd *Command) GenManTree(projectName, dir string) { + for _, c := range cmd.Commands() { + if len(c.Deprecated) != 0 || c == cmd.helpCommand { + continue + } + GenManTree(c, projectName, dir) + } + out := new(bytes.Buffer) + + cmd.GenMan(projectName, out) + + filename := cmd.CommandPath() + filename = dir + strings.Replace(filename, " ", "-", -1) + ".1" + outFile, err := os.Create(filename) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + defer outFile.Close() + _, err = outFile.Write(out.Bytes()) + if err != nil { + fmt.Println(err) + os.Exit(1) + } +} + +func GenMan(cmd *Command, projectName string, out *bytes.Buffer) { + cmd.GenMan(projectName, out) +} + +func (cmd *Command) GenMan(projectName string, out *bytes.Buffer) { + + buf := genMarkdown(cmd, projectName) + final := mangen.Render(buf) + out.Write(final) +} + +func manPreamble(out *bytes.Buffer, projectName, name, short, long string) { + fmt.Fprintf(out, `%% %s(1) +# NAME +`, projectName) + fmt.Fprintf(out, "%s \\- %s\n\n", name, short) + fmt.Fprintf(out, "# SYNOPSIS\n") + fmt.Fprintf(out, "**%s** [OPTIONS]\n\n", name) + fmt.Fprintf(out, "# DESCRIPTION\n") + fmt.Fprintf(out, "%s\n\n", long) +} + +func manPrintFlags(out *bytes.Buffer, flags *pflag.FlagSet) { + flags.VisitAll(func(flag *pflag.Flag) { + if len(flag.Deprecated) > 0 { + return + } + format := "" + if len(flag.Shorthand) > 0 { + format = "**-%s**, **--%s**" + } else { + format = "%s**--%s**" + } + if len(flag.NoOptDefVal) > 0 { + format = format + "[" + } + if flag.Value.Type() == "string" { + // put quotes on the value + format = format + "=%q" + } else { + format = format + "=%s" + } + if len(flag.NoOptDefVal) > 0 { + format = format + "]" + } + format = format + "\n\t%s\n\n" + fmt.Fprintf(out, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) + }) +} + +func manPrintOptions(out *bytes.Buffer, command *Command) { + flags := command.NonInheritedFlags() + if flags.HasFlags() { + fmt.Fprintf(out, "# OPTIONS\n") + manPrintFlags(out, flags) + fmt.Fprintf(out, "\n") + } + flags = command.InheritedFlags() + if flags.HasFlags() { + fmt.Fprintf(out, "# OPTIONS INHERITED FROM PARENT COMMANDS\n") + manPrintFlags(out, flags) + fmt.Fprintf(out, "\n") + } +} + +func genMarkdown(cmd *Command, projectName string) []byte { + // something like `rootcmd subcmd1 subcmd2` + commandName := cmd.CommandPath() + // something like `rootcmd-subcmd1-subcmd2` + dashCommandName := strings.Replace(commandName, " ", "-", -1) + + buf := new(bytes.Buffer) + + short := cmd.Short + long := cmd.Long + if len(long) == 0 { + long = short + } + + manPreamble(buf, projectName, commandName, short, long) + manPrintOptions(buf, cmd) + + if len(cmd.Example) > 0 { + fmt.Fprintf(buf, "# EXAMPLE\n") + fmt.Fprintf(buf, "```\n%s\n```\n", cmd.Example) + } + + if cmd.hasSeeAlso() { + fmt.Fprintf(buf, "# SEE ALSO\n") + if cmd.HasParent() { + fmt.Fprintf(buf, "**%s(1)**, ", cmd.Parent().CommandPath()) + } + + children := cmd.Commands() + sort.Sort(byName(children)) + for _, c := range children { + if len(c.Deprecated) != 0 || c == cmd.helpCommand { + continue + } + fmt.Fprintf(buf, "**%s-%s(1)**, ", dashCommandName, c.Name()) + } + fmt.Fprintf(buf, "\n") + } + + fmt.Fprintf(buf, "# HISTORY\n%s Auto generated by spf13/cobra\n", time.Now().UTC()) + return buf.Bytes() +} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/man_docs.md b/Godeps/_workspace/src/github.com/spf13/cobra/man_docs.md new file mode 100644 index 00000000000..3516d37cef4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/spf13/cobra/man_docs.md @@ -0,0 +1,21 @@ +# Generating Man Pages For Your Own cobra.Command + +Generating bash completions from a cobra command is incredibly easy. An example is as follows: + +```go +package main + +import ( + "github.com/spf13/cobra" +) + +func main() { + cmd := &cobra.Command{ + Use: "test", + Short: "my test program", + } + cmd.GenManTree("/tmp") +} +``` + +That will get you a man page `/tmp/test.1` diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/man_docs_test.go b/Godeps/_workspace/src/github.com/spf13/cobra/man_docs_test.go new file mode 100644 index 00000000000..6e31ab3b33b --- /dev/null +++ b/Godeps/_workspace/src/github.com/spf13/cobra/man_docs_test.go @@ -0,0 +1,71 @@ +package cobra + +import ( + "bytes" + "fmt" + "os" + "strings" + "testing" +) + +var _ = fmt.Println +var _ = os.Stderr + +func translate(in string) string { + return strings.Replace(in, "-", "\\-", -1) +} + +func TestGenManDoc(t *testing.T) { + c := initializeWithRootCmd() + // Need two commands to run the command alphabetical sort + cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated) + c.AddCommand(cmdPrint, cmdEcho) + cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp) + + out := new(bytes.Buffer) + + // We generate on a subcommand so we have both subcommands and parents + cmdEcho.GenMan("PROJECT", out) + found := out.String() + + // Our description + expected := translate(cmdEcho.Name()) + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // Better have our example + expected = translate(cmdEcho.Name()) + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // A local flag + expected = "boolone" + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // persistent flag on parent + expected = "rootflag" + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // We better output info about our parent + expected = translate(cmdRootWithRun.Name()) + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + // And about subcommands + expected = translate(cmdEchoSub.Name()) + if !strings.Contains(found, expected) { + t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) + } + + unexpected := translate(cmdDeprecated.Name()) + if strings.Contains(found, unexpected) { + t.Errorf("Unexpected response.\nFound: %v\nBut should not have!!\n", unexpected) + } +} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go b/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go index 6092c85af68..dde5b114037 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go +++ b/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go @@ -47,10 +47,18 @@ func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } func GenMarkdown(cmd *Command, out *bytes.Buffer) { - GenMarkdownCustom(cmd, out, func(s string) string { return s }) + cmd.GenMarkdown(out) +} + +func (cmd *Command) GenMarkdown(out *bytes.Buffer) { + cmd.GenMarkdownCustom(out, func(s string) string { return s }) } func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { + cmd.GenMarkdownCustom(out, linkHandler) +} + +func (cmd *Command) GenMarkdownCustom(out *bytes.Buffer, linkHandler func(string) string) { name := cmd.CommandPath() short := cmd.Short @@ -75,7 +83,7 @@ func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) printOptions(out, cmd, name) - if len(cmd.Commands()) > 0 || cmd.HasParent() { + if cmd.hasSeeAlso() { fmt.Fprintf(out, "### SEE ALSO\n") if cmd.HasParent() { parent := cmd.Parent() @@ -89,7 +97,7 @@ func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) sort.Sort(byName(children)) for _, child := range children { - if len(child.Deprecated) > 0 { + if len(child.Deprecated) > 0 || child == cmd.helpCommand { continue } cname := name + " " + child.Name() @@ -104,18 +112,29 @@ func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) } func GenMarkdownTree(cmd *Command, dir string) { + cmd.GenMarkdownTree(dir) +} + +func (cmd *Command) GenMarkdownTree(dir string) { identity := func(s string) string { return s } emptyStr := func(s string) string { return "" } - GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) + cmd.GenMarkdownTreeCustom(dir, emptyStr, identity) } func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { + cmd.GenMarkdownTreeCustom(dir, filePrepender, linkHandler) +} + +func (cmd *Command) GenMarkdownTreeCustom(dir string, filePrepender func(string) string, linkHandler func(string) string) { for _, c := range cmd.Commands() { - GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler) + if len(c.Deprecated) != 0 || c == cmd.helpCommand { + continue + } + c.GenMarkdownTreeCustom(dir, filePrepender, linkHandler) } out := new(bytes.Buffer) - GenMarkdownCustom(cmd, out, linkHandler) + cmd.GenMarkdownCustom(out, linkHandler) filename := cmd.CommandPath() filename = dir + strings.Replace(filename, " ", "_", -1) + ".md" From bf7646bd9eeda1f841b17b1f128d0c8eab719a98 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 3 Sep 2015 17:07:24 -0400 Subject: [PATCH 063/101] Update docs based on new spf13/cobra --- contrib/completions/bash/kubectl | 77 ++++--------------- docs/man/man1/kubectl-annotate.1 | 4 - docs/man/man1/kubectl-api-versions.1 | 6 -- docs/man/man1/kubectl-attach.1 | 4 - docs/man/man1/kubectl-cluster-info.1 | 6 -- docs/man/man1/kubectl-config-set-cluster.1 | 4 - docs/man/man1/kubectl-config-set-context.1 | 4 - .../man/man1/kubectl-config-set-credentials.1 | 4 - docs/man/man1/kubectl-config-set.1 | 6 -- docs/man/man1/kubectl-config-unset.1 | 6 -- docs/man/man1/kubectl-config-use-context.1 | 6 -- docs/man/man1/kubectl-config-view.1 | 4 - docs/man/man1/kubectl-config.1 | 4 - docs/man/man1/kubectl-create.1 | 4 - docs/man/man1/kubectl-delete.1 | 4 - docs/man/man1/kubectl-describe.1 | 4 - docs/man/man1/kubectl-exec.1 | 4 - docs/man/man1/kubectl-expose.1 | 4 - docs/man/man1/kubectl-get.1 | 4 - docs/man/man1/kubectl-label.1 | 4 - docs/man/man1/kubectl-logs.1 | 4 - docs/man/man1/kubectl-namespace.1 | 6 -- docs/man/man1/kubectl-patch.1 | 4 - docs/man/man1/kubectl-port-forward.1 | 4 - docs/man/man1/kubectl-proxy.1 | 4 - docs/man/man1/kubectl-replace.1 | 4 - docs/man/man1/kubectl-rolling-update.1 | 4 - docs/man/man1/kubectl-run.1 | 4 - docs/man/man1/kubectl-scale.1 | 4 - docs/man/man1/kubectl-stop.1 | 4 - docs/man/man1/kubectl-version.1 | 4 - docs/man/man1/kubectl.1 | 4 - docs/user-guide/kubectl/kubectl.md | 3 +- docs/user-guide/kubectl/kubectl_annotate.md | 3 +- .../kubectl/kubectl_api-versions.md | 8 +- docs/user-guide/kubectl/kubectl_attach.md | 3 +- .../kubectl/kubectl_cluster-info.md | 8 +- docs/user-guide/kubectl/kubectl_config.md | 3 +- .../kubectl/kubectl_config_set-cluster.md | 3 +- .../kubectl/kubectl_config_set-context.md | 3 +- .../kubectl/kubectl_config_set-credentials.md | 3 +- docs/user-guide/kubectl/kubectl_config_set.md | 8 +- .../kubectl/kubectl_config_unset.md | 8 +- .../kubectl/kubectl_config_use-context.md | 8 +- .../user-guide/kubectl/kubectl_config_view.md | 3 +- docs/user-guide/kubectl/kubectl_create.md | 3 +- docs/user-guide/kubectl/kubectl_delete.md | 3 +- docs/user-guide/kubectl/kubectl_describe.md | 3 +- docs/user-guide/kubectl/kubectl_exec.md | 3 +- docs/user-guide/kubectl/kubectl_expose.md | 3 +- docs/user-guide/kubectl/kubectl_get.md | 3 +- docs/user-guide/kubectl/kubectl_label.md | 3 +- docs/user-guide/kubectl/kubectl_logs.md | 3 +- docs/user-guide/kubectl/kubectl_namespace.md | 8 +- docs/user-guide/kubectl/kubectl_patch.md | 3 +- .../kubectl/kubectl_port-forward.md | 3 +- docs/user-guide/kubectl/kubectl_proxy.md | 3 +- docs/user-guide/kubectl/kubectl_replace.md | 3 +- .../kubectl/kubectl_rolling-update.md | 3 +- docs/user-guide/kubectl/kubectl_run.md | 3 +- docs/user-guide/kubectl/kubectl_scale.md | 3 +- docs/user-guide/kubectl/kubectl_stop.md | 3 +- docs/user-guide/kubectl/kubectl_version.md | 3 +- 63 files changed, 44 insertions(+), 292 deletions(-) diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index 84ce513a09e..34bd8d3c7a0 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -1,6 +1,5 @@ #!/bin/bash - __debug() { if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then @@ -8,6 +7,14 @@ __debug() fi } +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__my_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref cur prev words cword +} + __index_of_word() { local w word=$1 @@ -260,8 +267,6 @@ _kubectl_get() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--label-columns=") two_word_flags+=("-L") flags+=("--no-headers") @@ -317,8 +322,6 @@ _kubectl_describe() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--selector=") two_word_flags+=("-l") @@ -354,8 +357,6 @@ _kubectl_create() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--output=") two_word_flags+=("-o") flags+=("--validate") @@ -385,8 +386,6 @@ _kubectl_replace() flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--force") flags+=("--grace-period=") - flags+=("--help") - flags+=("-h") flags+=("--output=") two_word_flags+=("-o") flags+=("--timeout=") @@ -414,8 +413,6 @@ _kubectl_patch() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--output=") two_word_flags+=("-o") flags+=("--patch=") @@ -446,8 +443,6 @@ _kubectl_delete() flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--grace-period=") - flags+=("--help") - flags+=("-h") flags+=("--ignore-not-found") flags+=("--output=") two_word_flags+=("-o") @@ -487,8 +482,6 @@ _kubectl_namespace() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") must_have_one_flag=() must_have_one_noun=() @@ -508,8 +501,6 @@ _kubectl_logs() two_word_flags+=("-c") flags+=("--follow") flags+=("-f") - flags+=("--help") - flags+=("-h") flags+=("--interactive") flags+=("--previous") flags+=("-p") @@ -536,8 +527,6 @@ _kubectl_rolling-update() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--image=") flags+=("--no-headers") flags+=("--output=") @@ -578,8 +567,6 @@ _kubectl_scale() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--output=") two_word_flags+=("-o") flags+=("--replicas=") @@ -603,8 +590,6 @@ _kubectl_attach() flags+=("--container=") two_word_flags+=("-c") - flags+=("--help") - flags+=("-h") flags+=("--stdin") flags+=("-i") flags+=("--tty") @@ -626,8 +611,6 @@ _kubectl_exec() flags+=("--container=") two_word_flags+=("-c") - flags+=("--help") - flags+=("-h") flags+=("--pod=") two_word_flags+=("-p") flags+=("--stdin") @@ -649,8 +632,6 @@ _kubectl_port-forward() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") flags+=("--pod=") two_word_flags+=("-p") @@ -672,8 +653,6 @@ _kubectl_proxy() flags+=("--accept-paths=") flags+=("--api-prefix=") flags+=("--disable-filter") - flags+=("--help") - flags+=("-h") flags+=("--port=") two_word_flags+=("-p") flags+=("--reject-methods=") @@ -703,8 +682,6 @@ _kubectl_run() flags+=("--command") flags+=("--dry-run") flags+=("--generator=") - flags+=("--help") - flags+=("-h") flags+=("--hostport=") flags+=("--image=") flags+=("--labels=") @@ -750,8 +727,6 @@ _kubectl_stop() flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--grace-period=") - flags+=("--help") - flags+=("-h") flags+=("--ignore-not-found") flags+=("--output=") two_word_flags+=("-o") @@ -784,8 +759,6 @@ _kubectl_expose() flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") flags+=("--generator=") - flags+=("--help") - flags+=("-h") flags+=("--labels=") two_word_flags+=("-l") flags+=("--name=") @@ -829,8 +802,6 @@ _kubectl_label() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--no-headers") flags+=("--output=") two_word_flags+=("-o") @@ -884,8 +855,6 @@ _kubectl_annotate() two_word_flags+=("-f") flags_with_completion+=("-f") flags_completion+=("__handle_filename_extension_flag json|stdin|yaml|yml") - flags+=("--help") - flags+=("-h") flags+=("--overwrite") flags+=("--resource-version=") @@ -904,8 +873,6 @@ _kubectl_config_view() flags_completion=() flags+=("--flatten") - flags+=("--help") - flags+=("-h") flags+=("--merge") flags+=("--minify") flags+=("--no-headers") @@ -936,8 +903,6 @@ _kubectl_config_set-cluster() flags+=("--api-version=") flags+=("--certificate-authority=") flags+=("--embed-certs") - flags+=("--help") - flags+=("-h") flags+=("--insecure-skip-tls-verify") flags+=("--server=") @@ -958,8 +923,6 @@ _kubectl_config_set-credentials() flags+=("--client-certificate=") flags+=("--client-key=") flags+=("--embed-certs") - flags+=("--help") - flags+=("-h") flags+=("--password=") flags+=("--token=") flags+=("--username=") @@ -979,8 +942,6 @@ _kubectl_config_set-context() flags_completion=() flags+=("--cluster=") - flags+=("--help") - flags+=("-h") flags+=("--namespace=") flags+=("--user=") @@ -998,8 +959,6 @@ _kubectl_config_set() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") must_have_one_flag=() must_have_one_noun=() @@ -1015,8 +974,6 @@ _kubectl_config_unset() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") must_have_one_flag=() must_have_one_noun=() @@ -1032,8 +989,6 @@ _kubectl_config_use-context() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") must_have_one_flag=() must_have_one_noun=() @@ -1056,8 +1011,6 @@ _kubectl_config() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") flags+=("--kubeconfig=") must_have_one_flag=() @@ -1074,8 +1027,6 @@ _kubectl_cluster-info() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") must_have_one_flag=() must_have_one_noun=() @@ -1091,8 +1042,6 @@ _kubectl_api-versions() flags_with_completion=() flags_completion=() - flags+=("--help") - flags+=("-h") must_have_one_flag=() must_have_one_noun=() @@ -1110,8 +1059,6 @@ _kubectl_version() flags+=("--client") flags+=("-c") - flags+=("--help") - flags+=("-h") must_have_one_flag=() must_have_one_noun=() @@ -1157,8 +1104,6 @@ _kubectl() flags+=("--client-key=") flags+=("--cluster=") flags+=("--context=") - flags+=("--help") - flags+=("-h") flags+=("--insecure-skip-tls-verify") flags+=("--kubeconfig=") flags+=("--log-backtrace-at=") @@ -1184,7 +1129,11 @@ _kubectl() __start_kubectl() { local cur prev words cword - _init_completion -s || return + if declare -F _init_completions >/dev/null 2>&1; then + _init_completion -s || return + else + __my_init_completion || return + fi local c=0 local flags=() diff --git a/docs/man/man1/kubectl-annotate.1 b/docs/man/man1/kubectl-annotate.1 index 6b2666c87bc..15396b5ae77 100644 --- a/docs/man/man1/kubectl-annotate.1 +++ b/docs/man/man1/kubectl-annotate.1 @@ -37,10 +37,6 @@ resourcequotas (quota) or secrets. \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to a file identifying the resource to update the annotation -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for annotate - .PP \fB\-\-overwrite\fP=false If true, allow annotations to be overwritten, otherwise reject annotation updates that overwrite existing annotations. diff --git a/docs/man/man1/kubectl-api-versions.1 b/docs/man/man1/kubectl-api-versions.1 index 7ee68b2c2bd..a0c08afe9d1 100644 --- a/docs/man/man1/kubectl-api-versions.1 +++ b/docs/man/man1/kubectl-api-versions.1 @@ -16,12 +16,6 @@ kubectl api\-versions \- Print available API versions. Print available API versions. -.SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for api\-versions - - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP \fB\-\-alsologtostderr\fP=false diff --git a/docs/man/man1/kubectl-attach.1 b/docs/man/man1/kubectl-attach.1 index 55de8f34243..a5d499d20d2 100644 --- a/docs/man/man1/kubectl-attach.1 +++ b/docs/man/man1/kubectl-attach.1 @@ -21,10 +21,6 @@ Attach to a a process that is already running inside an existing container. \fB\-c\fP, \fB\-\-container\fP="" Container name -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for attach - .PP \fB\-i\fP, \fB\-\-stdin\fP=false Pass stdin to the container diff --git a/docs/man/man1/kubectl-cluster-info.1 b/docs/man/man1/kubectl-cluster-info.1 index 8d2c19a9a5f..5be571dc06f 100644 --- a/docs/man/man1/kubectl-cluster-info.1 +++ b/docs/man/man1/kubectl-cluster-info.1 @@ -16,12 +16,6 @@ kubectl cluster\-info \- Display cluster info Display addresses of the master and services with label kubernetes.io/cluster\-service=true -.SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for cluster\-info - - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP \fB\-\-alsologtostderr\fP=false diff --git a/docs/man/man1/kubectl-config-set-cluster.1 b/docs/man/man1/kubectl-config-set-cluster.1 index f89e66e4971..39dedd8d014 100644 --- a/docs/man/man1/kubectl-config-set-cluster.1 +++ b/docs/man/man1/kubectl-config-set-cluster.1 @@ -30,10 +30,6 @@ Specifying a name that already exists will merge new fields on top of existing v \fB\-\-embed\-certs\fP=false embed\-certs for the cluster entry in kubeconfig -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for set\-cluster - .PP \fB\-\-insecure\-skip\-tls\-verify\fP=false insecure\-skip\-tls\-verify for the cluster entry in kubeconfig diff --git a/docs/man/man1/kubectl-config-set-context.1 b/docs/man/man1/kubectl-config-set-context.1 index 389c0f0e311..9cfc976ba84 100644 --- a/docs/man/man1/kubectl-config-set-context.1 +++ b/docs/man/man1/kubectl-config-set-context.1 @@ -22,10 +22,6 @@ Specifying a name that already exists will merge new fields on top of existing v \fB\-\-cluster\fP="" cluster for the context entry in kubeconfig -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for set\-context - .PP \fB\-\-namespace\fP="" namespace for the context entry in kubeconfig diff --git a/docs/man/man1/kubectl-config-set-credentials.1 b/docs/man/man1/kubectl-config-set-credentials.1 index 9e752ce55cc..749eabc107b 100644 --- a/docs/man/man1/kubectl-config-set-credentials.1 +++ b/docs/man/man1/kubectl-config-set-credentials.1 @@ -45,10 +45,6 @@ Bearer token and basic auth are mutually exclusive. \fB\-\-embed\-certs\fP=false embed client cert/key for the user entry in kubeconfig -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for set\-credentials - .PP \fB\-\-password\fP="" password for the user entry in kubeconfig diff --git a/docs/man/man1/kubectl-config-set.1 b/docs/man/man1/kubectl-config-set.1 index ccbd2645f55..18f1c9ebdbb 100644 --- a/docs/man/man1/kubectl-config-set.1 +++ b/docs/man/man1/kubectl-config-set.1 @@ -18,12 +18,6 @@ PROPERTY\_NAME is a dot delimited name where each token represents either a attr PROPERTY\_VALUE is the new value you wish to set. -.SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for set - - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP \fB\-\-alsologtostderr\fP=false diff --git a/docs/man/man1/kubectl-config-unset.1 b/docs/man/man1/kubectl-config-unset.1 index 2e676d52901..99a5a53f282 100644 --- a/docs/man/man1/kubectl-config-unset.1 +++ b/docs/man/man1/kubectl-config-unset.1 @@ -17,12 +17,6 @@ Unsets an individual value in a kubeconfig file PROPERTY\_NAME is a dot delimited name where each token represents either a attribute name or a map key. Map keys may not contain dots. -.SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for unset - - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP \fB\-\-alsologtostderr\fP=false diff --git a/docs/man/man1/kubectl-config-use-context.1 b/docs/man/man1/kubectl-config-use-context.1 index 35d7cc4a10b..e0758fc8944 100644 --- a/docs/man/man1/kubectl-config-use-context.1 +++ b/docs/man/man1/kubectl-config-use-context.1 @@ -16,12 +16,6 @@ kubectl config use\-context \- Sets the current\-context in a kubeconfig file Sets the current\-context in a kubeconfig file -.SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for use\-context - - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP \fB\-\-alsologtostderr\fP=false diff --git a/docs/man/man1/kubectl-config-view.1 b/docs/man/man1/kubectl-config-view.1 index e679fc132c2..25ab0462cac 100644 --- a/docs/man/man1/kubectl-config-view.1 +++ b/docs/man/man1/kubectl-config-view.1 @@ -24,10 +24,6 @@ You can use \-\-output=template \-\-template=TEMPLATE to extract specific values \fB\-\-flatten\fP=false flatten the resulting kubeconfig file into self contained output (useful for creating portable kubeconfig files) -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for view - .PP \fB\-\-merge\fP=true merge together the full hierarchy of kubeconfig files diff --git a/docs/man/man1/kubectl-config.1 b/docs/man/man1/kubectl-config.1 index 88568368156..6568d96bb67 100644 --- a/docs/man/man1/kubectl-config.1 +++ b/docs/man/man1/kubectl-config.1 @@ -23,10 +23,6 @@ The loading order follows these rules: .SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for config - .PP \fB\-\-kubeconfig\fP="" use a particular kubeconfig file diff --git a/docs/man/man1/kubectl-create.1 b/docs/man/man1/kubectl-create.1 index 7d991f9088e..d41c1073703 100644 --- a/docs/man/man1/kubectl-create.1 +++ b/docs/man/man1/kubectl-create.1 @@ -24,10 +24,6 @@ JSON and YAML formats are accepted. \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to file to use to create the resource -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for create - .PP \fB\-o\fP, \fB\-\-output\fP="" Output mode. Use "\-o name" for shorter output (resource/name). diff --git a/docs/man/man1/kubectl-delete.1 b/docs/man/man1/kubectl-delete.1 index f4d2fbe7568..feefc5ba68a 100644 --- a/docs/man/man1/kubectl-delete.1 +++ b/docs/man/man1/kubectl-delete.1 @@ -44,10 +44,6 @@ will be lost along with the rest of the resource. \fB\-\-grace\-period\fP=\-1 Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for delete - .PP \fB\-\-ignore\-not\-found\fP=false Treat "resource not found" as a successful delete. Defaults to "true" when \-\-all is specified. diff --git a/docs/man/man1/kubectl-describe.1 b/docs/man/man1/kubectl-describe.1 index bc4f4a1bbbf..9633eb1da5f 100644 --- a/docs/man/man1/kubectl-describe.1 +++ b/docs/man/man1/kubectl-describe.1 @@ -38,10 +38,6 @@ namespaces (ns) or secrets. \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to a file containing the resource to describe -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for describe - .PP \fB\-l\fP, \fB\-\-selector\fP="" Selector (label query) to filter on diff --git a/docs/man/man1/kubectl-exec.1 b/docs/man/man1/kubectl-exec.1 index 942f9a21bf4..65c5bf8f9c7 100644 --- a/docs/man/man1/kubectl-exec.1 +++ b/docs/man/man1/kubectl-exec.1 @@ -21,10 +21,6 @@ Execute a command in a container. \fB\-c\fP, \fB\-\-container\fP="" Container name. If omitted, the first container in the pod will be chosen -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for exec - .PP \fB\-p\fP, \fB\-\-pod\fP="" Pod name diff --git a/docs/man/man1/kubectl-expose.1 b/docs/man/man1/kubectl-expose.1 index cb48a3ad21f..d148ffcaf0d 100644 --- a/docs/man/man1/kubectl-expose.1 +++ b/docs/man/man1/kubectl-expose.1 @@ -46,10 +46,6 @@ re\-use the labels from the resource it exposes. \fB\-\-generator\fP="service/v2" The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for expose - .PP \fB\-l\fP, \fB\-\-labels\fP="" Labels to apply to the service created by this call. diff --git a/docs/man/man1/kubectl-get.1 b/docs/man/man1/kubectl-get.1 index 85f302ba21b..3c94adeb246 100644 --- a/docs/man/man1/kubectl-get.1 +++ b/docs/man/man1/kubectl-get.1 @@ -35,10 +35,6 @@ of the \-\-template flag, you can filter the attributes of the fetched resource( \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to a file identifying the resource to get from a server. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for get - .PP \fB\-L\fP, \fB\-\-label\-columns\fP=[] Accepts a comma separated list of labels that are going to be presented as columns. Names are case\-sensitive. You can also use multiple flag statements like \-L label1 \-L label2... diff --git a/docs/man/man1/kubectl-label.1 b/docs/man/man1/kubectl-label.1 index 22eb98fce49..98cf53ae948 100644 --- a/docs/man/man1/kubectl-label.1 +++ b/docs/man/man1/kubectl-label.1 @@ -34,10 +34,6 @@ If \-\-resource\-version is specified, then updates will use this resource versi \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to a file identifying the resource to update the labels -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for label - .PP \fB\-\-no\-headers\fP=false When using the default output, don't print headers. diff --git a/docs/man/man1/kubectl-logs.1 b/docs/man/man1/kubectl-logs.1 index 83b2f7b0a26..ca4c7783538 100644 --- a/docs/man/man1/kubectl-logs.1 +++ b/docs/man/man1/kubectl-logs.1 @@ -25,10 +25,6 @@ Print the logs for a container in a pod. If the pod has only one container, the \fB\-f\fP, \fB\-\-follow\fP=false Specify if the logs should be streamed. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for logs - .PP \fB\-\-interactive\fP=true If true, prompt the user for input when required. Default true. diff --git a/docs/man/man1/kubectl-namespace.1 b/docs/man/man1/kubectl-namespace.1 index c2d0ab18efc..98af4283c49 100644 --- a/docs/man/man1/kubectl-namespace.1 +++ b/docs/man/man1/kubectl-namespace.1 @@ -19,12 +19,6 @@ SUPERSEDED: Set and view the current Kubernetes namespace scope for command lin namespace has been superseded by the context.namespace field of .kubeconfig files. See 'kubectl config set\-context \-\-help' for more details. -.SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for namespace - - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP \fB\-\-alsologtostderr\fP=false diff --git a/docs/man/man1/kubectl-patch.1 b/docs/man/man1/kubectl-patch.1 index ff692ad024d..db3c8a520f7 100644 --- a/docs/man/man1/kubectl-patch.1 +++ b/docs/man/man1/kubectl-patch.1 @@ -28,10 +28,6 @@ Please refer to the models in \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to a file identifying the resource to update -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for patch - .PP \fB\-o\fP, \fB\-\-output\fP="" Output mode. Use "\-o name" for shorter output (resource/name). diff --git a/docs/man/man1/kubectl-port-forward.1 b/docs/man/man1/kubectl-port-forward.1 index 4f158b237c0..883c9c04388 100644 --- a/docs/man/man1/kubectl-port-forward.1 +++ b/docs/man/man1/kubectl-port-forward.1 @@ -17,10 +17,6 @@ Forward one or more local ports to a pod. .SH OPTIONS -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for port\-forward - .PP \fB\-p\fP, \fB\-\-pod\fP="" Pod name diff --git a/docs/man/man1/kubectl-proxy.1 b/docs/man/man1/kubectl-proxy.1 index 8144e15e5df..b2e232da800 100644 --- a/docs/man/man1/kubectl-proxy.1 +++ b/docs/man/man1/kubectl-proxy.1 @@ -54,10 +54,6 @@ The above lets you 'curl localhost:8001/custom/api/v1/pods' \fB\-\-disable\-filter\fP=false If true, disable request filtering in the proxy. This is dangerous, and can leave you vulnerable to XSRF attacks, when used with an accessible port. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for proxy - .PP \fB\-p\fP, \fB\-\-port\fP=8001 The port on which to run the proxy. Set to 0 to pick a random port. diff --git a/docs/man/man1/kubectl-replace.1 b/docs/man/man1/kubectl-replace.1 index a60ddcd2e1e..b76175b1aa2 100644 --- a/docs/man/man1/kubectl-replace.1 +++ b/docs/man/man1/kubectl-replace.1 @@ -42,10 +42,6 @@ Please refer to the models in \fB\-\-grace\-period\fP=\-1 Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for replace - .PP \fB\-o\fP, \fB\-\-output\fP="" Output mode. Use "\-o name" for shorter output (resource/name). diff --git a/docs/man/man1/kubectl-rolling-update.1 b/docs/man/man1/kubectl-rolling-update.1 index ad28bb51d5d..32ae729db93 100644 --- a/docs/man/man1/kubectl-rolling-update.1 +++ b/docs/man/man1/kubectl-rolling-update.1 @@ -34,10 +34,6 @@ existing replication controller and overwrite at least one (common) label in its \fB\-f\fP, \fB\-\-filename\fP=[] Filename or URL to file to use to create the new replication controller. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for rolling\-update - .PP \fB\-\-image\fP="" Image to use for upgrading the replication controller. Can not be used with \-\-filename/\-f diff --git a/docs/man/man1/kubectl-run.1 b/docs/man/man1/kubectl-run.1 index 88dc6300517..5df0bb6c189 100644 --- a/docs/man/man1/kubectl-run.1 +++ b/docs/man/man1/kubectl-run.1 @@ -34,10 +34,6 @@ Creates a replication controller to manage the created container(s). \fB\-\-generator\fP="" The name of the API generator to use. Default is 'run/v1' if \-\-restart=Always, otherwise the default is 'run\-pod/v1'. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for run - .PP \fB\-\-hostport\fP=\-1 The host port mapping for the container port. To demonstrate a single\-machine container. diff --git a/docs/man/man1/kubectl-scale.1 b/docs/man/man1/kubectl-scale.1 index c0dea101f9b..800c53c9468 100644 --- a/docs/man/man1/kubectl-scale.1 +++ b/docs/man/man1/kubectl-scale.1 @@ -31,10 +31,6 @@ scale is sent to the server. \fB\-f\fP, \fB\-\-filename\fP=[] Filename, directory, or URL to a file identifying the replication controller to set a new size -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for scale - .PP \fB\-o\fP, \fB\-\-output\fP="" Output mode. Use "\-o name" for shorter output (resource/name). diff --git a/docs/man/man1/kubectl-stop.1 b/docs/man/man1/kubectl-stop.1 index efb409c5ee7..0bd2ea8cb1f 100644 --- a/docs/man/man1/kubectl-stop.1 +++ b/docs/man/man1/kubectl-stop.1 @@ -37,10 +37,6 @@ If the resource is scalable it will be scaled to 0 before deletion. \fB\-\-grace\-period\fP=\-1 Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for stop - .PP \fB\-\-ignore\-not\-found\fP=false Treat "resource not found" as a successful stop. diff --git a/docs/man/man1/kubectl-version.1 b/docs/man/man1/kubectl-version.1 index 557f7687f9f..916c43f3858 100644 --- a/docs/man/man1/kubectl-version.1 +++ b/docs/man/man1/kubectl-version.1 @@ -21,10 +21,6 @@ Print the client and server version information. \fB\-c\fP, \fB\-\-client\fP=false Client version only (no server required). -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for version - .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP diff --git a/docs/man/man1/kubectl.1 b/docs/man/man1/kubectl.1 index 26db3cab4a7..a95623c3aca 100644 --- a/docs/man/man1/kubectl.1 +++ b/docs/man/man1/kubectl.1 @@ -49,10 +49,6 @@ Find more information at \fB\-\-context\fP="" The name of the kubeconfig context to use -.PP -\fB\-h\fP, \fB\-\-help\fP=false - help for kubectl - .PP \fB\-\-insecure\-skip\-tls\-verify\fP=false If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. diff --git a/docs/user-guide/kubectl/kubectl.md b/docs/user-guide/kubectl/kubectl.md index 7b5e9f04f5a..cf8c2085cfb 100644 --- a/docs/user-guide/kubectl/kubectl.md +++ b/docs/user-guide/kubectl/kubectl.md @@ -56,7 +56,6 @@ kubectl --client-key="": Path to a client key file for TLS. --cluster="": The name of the kubeconfig cluster to use --context="": The name of the kubeconfig context to use - -h, --help[=false]: help for kubectl --insecure-skip-tls-verify[=false]: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. --kubeconfig="": Path to the kubeconfig file to use for CLI requests. --log-backtrace-at=:0: when logging hits line file:N, emit a stack trace @@ -101,7 +100,7 @@ kubectl * [kubectl stop](kubectl_stop.md) - Deprecated: Gracefully shut down a resource by name or filename. * [kubectl version](kubectl_version.md) - Print the client and server version information. -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.169032754 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476725335 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_annotate.md b/docs/user-guide/kubectl/kubectl_annotate.md index 2b89739012a..b185ff2c5e5 100644 --- a/docs/user-guide/kubectl/kubectl_annotate.md +++ b/docs/user-guide/kubectl/kubectl_annotate.md @@ -83,7 +83,6 @@ $ kubectl annotate pods foo description- ``` --all[=false]: select all resources in the namespace of the specified resource types -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update the annotation - -h, --help[=false]: help for annotate --overwrite[=false]: If true, allow annotations to be overwritten, otherwise reject annotation updates that overwrite existing annotations. --resource-version="": If non-empty, the annotation update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource. ``` @@ -120,7 +119,7 @@ $ kubectl annotate pods foo description- * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-27 02:40:25.687121316 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474197531 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_annotate.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_api-versions.md b/docs/user-guide/kubectl/kubectl_api-versions.md index a2f22363446..eaeef7866f5 100644 --- a/docs/user-guide/kubectl/kubectl_api-versions.md +++ b/docs/user-guide/kubectl/kubectl_api-versions.md @@ -44,12 +44,6 @@ Print available API versions. kubectl api-versions ``` -### Options - -``` - -h, --help[=false]: help for api-versions -``` - ### Options inherited from parent commands ``` @@ -82,7 +76,7 @@ kubectl api-versions * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.168773226 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476265479 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_api-versions.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_attach.md b/docs/user-guide/kubectl/kubectl_attach.md index e6d0126dfcf..6155446b7d0 100644 --- a/docs/user-guide/kubectl/kubectl_attach.md +++ b/docs/user-guide/kubectl/kubectl_attach.md @@ -62,7 +62,6 @@ $ kubectl attach 123456-7890 -c ruby-container -i -t ``` -c, --container="": Container name - -h, --help[=false]: help for attach -i, --stdin[=false]: Pass stdin to the container -t, --tty[=false]: Stdin is a TTY ``` @@ -99,7 +98,7 @@ $ kubectl attach 123456-7890 -c ruby-container -i -t * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-02 09:55:50.948089316 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471309711 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_attach.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_cluster-info.md b/docs/user-guide/kubectl/kubectl_cluster-info.md index 9a4b821eb20..f7728387eab 100644 --- a/docs/user-guide/kubectl/kubectl_cluster-info.md +++ b/docs/user-guide/kubectl/kubectl_cluster-info.md @@ -44,12 +44,6 @@ Display addresses of the master and services with label kubernetes.io/cluster-se kubectl cluster-info ``` -### Options - -``` - -h, --help[=false]: help for cluster-info -``` - ### Options inherited from parent commands ``` @@ -82,7 +76,7 @@ kubectl cluster-info * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.168659453 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476078738 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_cluster-info.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config.md b/docs/user-guide/kubectl/kubectl_config.md index c44a1656e01..a16a4603b21 100644 --- a/docs/user-guide/kubectl/kubectl_config.md +++ b/docs/user-guide/kubectl/kubectl_config.md @@ -53,7 +53,6 @@ kubectl config SUBCOMMAND ### Options ``` - -h, --help[=false]: help for config --kubeconfig="": use a particular kubeconfig file ``` @@ -95,7 +94,7 @@ kubectl config SUBCOMMAND * [kubectl config use-context](kubectl_config_use-context.md) - Sets the current-context in a kubeconfig file * [kubectl config view](kubectl_config_view.md) - displays Merged kubeconfig settings or a specified kubeconfig file. -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.16853102 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475888484 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set-cluster.md b/docs/user-guide/kubectl/kubectl_config_set-cluster.md index 03d96efde53..a24a48be32e 100644 --- a/docs/user-guide/kubectl/kubectl_config_set-cluster.md +++ b/docs/user-guide/kubectl/kubectl_config_set-cluster.md @@ -64,7 +64,6 @@ $ kubectl config set-cluster e2e --insecure-skip-tls-verify=true --api-version="": api-version for the cluster entry in kubeconfig --certificate-authority="": path to certificate-authority for the cluster entry in kubeconfig --embed-certs=false: embed-certs for the cluster entry in kubeconfig - -h, --help[=false]: help for set-cluster --insecure-skip-tls-verify=false: insecure-skip-tls-verify for the cluster entry in kubeconfig --server="": server for the cluster entry in kubeconfig ``` @@ -97,7 +96,7 @@ $ kubectl config set-cluster e2e --insecure-skip-tls-verify=true * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.167359915 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474677631 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set-cluster.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set-context.md b/docs/user-guide/kubectl/kubectl_config_set-context.md index fb5f519a2aa..116d2802511 100644 --- a/docs/user-guide/kubectl/kubectl_config_set-context.md +++ b/docs/user-guide/kubectl/kubectl_config_set-context.md @@ -56,7 +56,6 @@ $ kubectl config set-context gce --user=cluster-admin ``` --cluster="": cluster for the context entry in kubeconfig - -h, --help[=false]: help for set-context --namespace="": namespace for the context entry in kubeconfig --user="": user for the context entry in kubeconfig ``` @@ -90,7 +89,7 @@ $ kubectl config set-context gce --user=cluster-admin * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.168034038 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475093212 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set-context.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set-credentials.md b/docs/user-guide/kubectl/kubectl_config_set-credentials.md index dc6d3da6bef..dc84b808ce6 100644 --- a/docs/user-guide/kubectl/kubectl_config_set-credentials.md +++ b/docs/user-guide/kubectl/kubectl_config_set-credentials.md @@ -77,7 +77,6 @@ $ kubectl config set-credentials cluster-admin --client-certificate=~/.kube/admi --client-certificate="": path to client-certificate for the user entry in kubeconfig --client-key="": path to client-key for the user entry in kubeconfig --embed-certs=false: embed client cert/key for the user entry in kubeconfig - -h, --help[=false]: help for set-credentials --password="": password for the user entry in kubeconfig --token="": token for the user entry in kubeconfig --username="": username for the user entry in kubeconfig @@ -110,7 +109,7 @@ $ kubectl config set-credentials cluster-admin --client-certificate=~/.kube/admi * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.167500874 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474882527 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set-credentials.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set.md b/docs/user-guide/kubectl/kubectl_config_set.md index 6c15b3a31f7..ab0229e3a81 100644 --- a/docs/user-guide/kubectl/kubectl_config_set.md +++ b/docs/user-guide/kubectl/kubectl_config_set.md @@ -46,12 +46,6 @@ PROPERTY_VALUE is the new value you wish to set. kubectl config set PROPERTY_NAME PROPERTY_VALUE ``` -### Options - -``` - -h, --help[=false]: help for set -``` - ### Options inherited from parent commands ``` @@ -84,7 +78,7 @@ kubectl config set PROPERTY_NAME PROPERTY_VALUE * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.16816699 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475281504 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_unset.md b/docs/user-guide/kubectl/kubectl_config_unset.md index a78b673632a..5f86a0f58d9 100644 --- a/docs/user-guide/kubectl/kubectl_config_unset.md +++ b/docs/user-guide/kubectl/kubectl_config_unset.md @@ -45,12 +45,6 @@ PROPERTY_NAME is a dot delimited name where each token represents either a attri kubectl config unset PROPERTY_NAME ``` -### Options - -``` - -h, --help[=false]: help for unset -``` - ### Options inherited from parent commands ``` @@ -83,7 +77,7 @@ kubectl config unset PROPERTY_NAME * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.168279315 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475473658 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_unset.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_use-context.md b/docs/user-guide/kubectl/kubectl_config_use-context.md index 5247161b5f8..6a62618aa4c 100644 --- a/docs/user-guide/kubectl/kubectl_config_use-context.md +++ b/docs/user-guide/kubectl/kubectl_config_use-context.md @@ -44,12 +44,6 @@ Sets the current-context in a kubeconfig file kubectl config use-context CONTEXT_NAME ``` -### Options - -``` - -h, --help[=false]: help for use-context -``` - ### Options inherited from parent commands ``` @@ -82,7 +76,7 @@ kubectl config use-context CONTEXT_NAME * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.168411074 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475674294 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_use-context.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_view.md b/docs/user-guide/kubectl/kubectl_config_view.md index a47132d6985..d92a26f1646 100644 --- a/docs/user-guide/kubectl/kubectl_config_view.md +++ b/docs/user-guide/kubectl/kubectl_config_view.md @@ -60,7 +60,6 @@ $ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2 ``` --flatten[=false]: flatten the resulting kubeconfig file into self contained output (useful for creating portable kubeconfig files) - -h, --help[=false]: help for view --merge=true: merge together the full hierarchy of kubeconfig files --minify[=false]: remove all information not used by current-context from the output --no-headers[=false]: When using the default output, don't print headers. @@ -104,7 +103,7 @@ $ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2 * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-26 09:03:39.977436672 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474467216 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_view.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_create.md b/docs/user-guide/kubectl/kubectl_create.md index 2ac1b734ea7..f718dc10fee 100644 --- a/docs/user-guide/kubectl/kubectl_create.md +++ b/docs/user-guide/kubectl/kubectl_create.md @@ -60,7 +60,6 @@ $ cat pod.json | kubectl create -f - ``` -f, --filename=[]: Filename, directory, or URL to file to use to create the resource - -h, --help[=false]: help for create -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). --validate[=true]: If true, use a schema to validate the input before sending it ``` @@ -97,7 +96,7 @@ $ cat pod.json | kubectl create -f - * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-27 08:49:26.55743532 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469492371 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_create.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_delete.md b/docs/user-guide/kubectl/kubectl_delete.md index 9a9b596ec26..18fc8061ca9 100644 --- a/docs/user-guide/kubectl/kubectl_delete.md +++ b/docs/user-guide/kubectl/kubectl_delete.md @@ -81,7 +81,6 @@ $ kubectl delete pods --all --cascade[=true]: If true, cascade the deletion of the resources managed by this resource (e.g. Pods created by a ReplicationController). Default true. -f, --filename=[]: Filename, directory, or URL to a file containing the resource to delete. --grace-period=-1: Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. - -h, --help[=false]: help for delete --ignore-not-found[=false]: Treat "resource not found" as a successful delete. Defaults to "true" when --all is specified. -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). -l, --selector="": Selector (label query) to filter on. @@ -120,7 +119,7 @@ $ kubectl delete pods --all * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-25 10:17:24.591839542 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470182255 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_delete.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_describe.md b/docs/user-guide/kubectl/kubectl_describe.md index 015860bc4b0..5fa18144d32 100644 --- a/docs/user-guide/kubectl/kubectl_describe.md +++ b/docs/user-guide/kubectl/kubectl_describe.md @@ -84,7 +84,6 @@ $ kubectl describe pods frontend ``` -f, --filename=[]: Filename, directory, or URL to a file containing the resource to describe - -h, --help[=false]: help for describe -l, --selector="": Selector (label query) to filter on ``` @@ -120,7 +119,7 @@ $ kubectl describe pods frontend * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 07:07:55.972896481 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469291072 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_describe.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_exec.md b/docs/user-guide/kubectl/kubectl_exec.md index bec8202b442..a1471e6f347 100644 --- a/docs/user-guide/kubectl/kubectl_exec.md +++ b/docs/user-guide/kubectl/kubectl_exec.md @@ -62,7 +62,6 @@ $ kubectl exec 123456-7890 -c ruby-container -i -t -- bash -il ``` -c, --container="": Container name. If omitted, the first container in the pod will be chosen - -h, --help[=false]: help for exec -p, --pod="": Pod name -i, --stdin[=false]: Pass stdin to the container -t, --tty[=false]: Stdin is a TTY @@ -100,7 +99,7 @@ $ kubectl exec 123456-7890 -c ruby-container -i -t -- bash -il * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-02 09:55:50.948300118 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471517301 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_exec.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_expose.md b/docs/user-guide/kubectl/kubectl_expose.md index a98ed70224f..2d0531dada0 100644 --- a/docs/user-guide/kubectl/kubectl_expose.md +++ b/docs/user-guide/kubectl/kubectl_expose.md @@ -72,7 +72,6 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream --external-ip="": External IP address to set for the service. The service can be accessed by this IP in addition to its generated service IP. -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to expose a service --generator="service/v2": The name of the API generator to use. There are 2 generators: 'service/v1' and 'service/v2'. The only difference between them is that service port in v1 is named 'default', while it is left unnamed in v2. Default is 'service/v2'. - -h, --help[=false]: help for expose -l, --labels="": Labels to apply to the service created by this call. --name="": The name for the newly created object. --no-headers[=false]: When using the default output, don't print headers. @@ -122,7 +121,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 03:58:51.196935872 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.473647619 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_get.md b/docs/user-guide/kubectl/kubectl_get.md index 79499a8717c..c9eed590858 100644 --- a/docs/user-guide/kubectl/kubectl_get.md +++ b/docs/user-guide/kubectl/kubectl_get.md @@ -88,7 +88,6 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 ``` --all-namespaces[=false]: If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace. -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to get from a server. - -h, --help[=false]: help for get -L, --label-columns=[]: Accepts a comma separated list of labels that are going to be presented as columns. Names are case-sensitive. You can also use multiple flag statements like -L label1 -L label2... --no-headers[=false]: When using the default output, don't print headers. -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. @@ -133,7 +132,7 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-26 09:03:39.972870101 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469014739 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_get.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_label.md b/docs/user-guide/kubectl/kubectl_label.md index 3e90c683358..d521ae3e3b4 100644 --- a/docs/user-guide/kubectl/kubectl_label.md +++ b/docs/user-guide/kubectl/kubectl_label.md @@ -77,7 +77,6 @@ $ kubectl label pods foo bar- --all[=false]: select all resources in the namespace of the specified resource types --dry-run[=false]: If true, only print the object that would be sent, without sending it. -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update the labels - -h, --help[=false]: help for label --no-headers[=false]: When using the default output, don't print headers. -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. --output-version="": Output the formatted object with the given version (default api-version). @@ -121,7 +120,7 @@ $ kubectl label pods foo bar- * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-31 12:51:55.222410248 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-04 23:19:55.649428669 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_label.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_logs.md b/docs/user-guide/kubectl/kubectl_logs.md index 45c36b7ed4b..79ac2d3b0b6 100644 --- a/docs/user-guide/kubectl/kubectl_logs.md +++ b/docs/user-guide/kubectl/kubectl_logs.md @@ -62,7 +62,6 @@ $ kubectl logs -f 123456-7890 ruby-container ``` -c, --container="": Container name -f, --follow[=false]: Specify if the logs should be streamed. - -h, --help[=false]: help for logs --interactive[=true]: If true, prompt the user for input when required. Default true. -p, --previous[=false]: If true, print the logs for the previous instance of the container in a pod if it exists. ``` @@ -99,7 +98,7 @@ $ kubectl logs -f 123456-7890 ruby-container * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-02 09:55:50.94749958 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470591683 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_logs.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_namespace.md b/docs/user-guide/kubectl/kubectl_namespace.md index da011d2bd93..3e686d46f00 100644 --- a/docs/user-guide/kubectl/kubectl_namespace.md +++ b/docs/user-guide/kubectl/kubectl_namespace.md @@ -47,12 +47,6 @@ namespace has been superseded by the context.namespace field of .kubeconfig file kubectl namespace [namespace] ``` -### Options - -``` - -h, --help[=false]: help for namespace -``` - ### Options inherited from parent commands ``` @@ -85,7 +79,7 @@ kubectl namespace [namespace] * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.164903046 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470380367 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_namespace.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_patch.md b/docs/user-guide/kubectl/kubectl_patch.md index 04b4a2c6ed3..aee0b3a18be 100644 --- a/docs/user-guide/kubectl/kubectl_patch.md +++ b/docs/user-guide/kubectl/kubectl_patch.md @@ -66,7 +66,6 @@ kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve ``` -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update - -h, --help[=false]: help for patch -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). -p, --patch="": The patch to be applied to the resource JSON file. ``` @@ -103,7 +102,7 @@ kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.164613432 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469927571 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_patch.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_port-forward.md b/docs/user-guide/kubectl/kubectl_port-forward.md index 84a6780bff6..ee8771f2a6a 100644 --- a/docs/user-guide/kubectl/kubectl_port-forward.md +++ b/docs/user-guide/kubectl/kubectl_port-forward.md @@ -64,7 +64,6 @@ $ kubectl port-forward mypod 0:5000 ### Options ``` - -h, --help[=false]: help for port-forward -p, --pod="": Pod name ``` @@ -100,7 +99,7 @@ $ kubectl port-forward mypod 0:5000 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-02 09:55:50.948456523 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471732563 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_port-forward.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_proxy.md b/docs/user-guide/kubectl/kubectl_proxy.md index 4aa613ced45..131d1c3060a 100644 --- a/docs/user-guide/kubectl/kubectl_proxy.md +++ b/docs/user-guide/kubectl/kubectl_proxy.md @@ -81,7 +81,6 @@ $ kubectl proxy --api-prefix=/k8s-api --accept-paths="^/.*": Regular expression for paths that the proxy should accept. --api-prefix="/api/": Prefix to serve the proxied API under. --disable-filter[=false]: If true, disable request filtering in the proxy. This is dangerous, and can leave you vulnerable to XSRF attacks, when used with an accessible port. - -h, --help[=false]: help for proxy -p, --port=8001: The port on which to run the proxy. Set to 0 to pick a random port. --reject-methods="POST,PUT,PATCH": Regular expression for HTTP methods that the proxy should reject. --reject-paths="^/api/.*/exec,^/api/.*/run": Regular expression for paths that the proxy should reject. @@ -122,7 +121,7 @@ $ kubectl proxy --api-prefix=/k8s-api * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.166284754 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.472010935 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_proxy.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_replace.md b/docs/user-guide/kubectl/kubectl_replace.md index c4e58c165ea..ad65549704d 100644 --- a/docs/user-guide/kubectl/kubectl_replace.md +++ b/docs/user-guide/kubectl/kubectl_replace.md @@ -73,7 +73,6 @@ kubectl replace --force -f ./pod.json -f, --filename=[]: Filename, directory, or URL to file to use to replace the resource. --force[=false]: Delete and re-create the specified resource --grace-period=-1: Only relevant during a force replace. Period of time in seconds given to the old resource to terminate gracefully. Ignored if negative. - -h, --help[=false]: help for replace -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). --timeout=0: Only relevant during a force replace. The length of time to wait before giving up on a delete of the old resource, zero means determine a timeout from the size of the object --validate[=true]: If true, use a schema to validate the input before sending it @@ -111,7 +110,7 @@ kubectl replace --force -f ./pod.json * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.164469074 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469727962 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_replace.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_rolling-update.md b/docs/user-guide/kubectl/kubectl_rolling-update.md index 63b142cb8c2..4d8e38bd48d 100644 --- a/docs/user-guide/kubectl/kubectl_rolling-update.md +++ b/docs/user-guide/kubectl/kubectl_rolling-update.md @@ -72,7 +72,6 @@ $ kubectl rolling-update frontend --image=image:v2 --deployment-label-key="deployment": The key to use to differentiate between two different controllers, default 'deployment'. Only relevant when --image is specified, ignored otherwise --dry-run[=false]: If true, print out the changes that would be made, but don't actually make them. -f, --filename=[]: Filename or URL to file to use to create the new replication controller. - -h, --help[=false]: help for rolling-update --image="": Image to use for upgrading the replication controller. Can not be used with --filename/-f --no-headers[=false]: When using the default output, don't print headers. -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. @@ -119,7 +118,7 @@ $ kubectl rolling-update frontend --image=image:v2 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-26 09:03:39.974410445 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470878033 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_run.md b/docs/user-guide/kubectl/kubectl_run.md index 1e96ae8a74b..ecfdcea2812 100644 --- a/docs/user-guide/kubectl/kubectl_run.md +++ b/docs/user-guide/kubectl/kubectl_run.md @@ -77,7 +77,6 @@ $ kubectl run nginx --image=nginx --command -- ... --command[=false]: If true and extra arguments are present, use them as the 'command' field in the container, rather than the 'args' field which is the default. --dry-run[=false]: If true, only print the object that would be sent, without sending it. --generator="": The name of the API generator to use. Default is 'run/v1' if --restart=Always, otherwise the default is 'run-pod/v1'. - -h, --help[=false]: help for run --hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container. --image="": The image for the container to run. -l, --labels="": Labels to apply to the pod(s). @@ -127,7 +126,7 @@ $ kubectl run nginx --image=nginx --command -- ... * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-02 09:55:50.948932668 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.472292491 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_run.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_scale.md b/docs/user-guide/kubectl/kubectl_scale.md index fcb061bdd82..068cbed9df0 100644 --- a/docs/user-guide/kubectl/kubectl_scale.md +++ b/docs/user-guide/kubectl/kubectl_scale.md @@ -70,7 +70,6 @@ $ kubectl scale --replicas=5 rc/foo rc/bar ``` --current-replicas=-1: Precondition for current size. Requires that the current size of the replication controller match this value in order to scale. -f, --filename=[]: Filename, directory, or URL to a file identifying the replication controller to set a new size - -h, --help[=false]: help for scale -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). --replicas=-1: The new desired number of replicas. Required. --resource-version="": Precondition for resource version. Requires that the current resource version match this value in order to scale. @@ -109,7 +108,7 @@ $ kubectl scale --replicas=5 rc/foo rc/bar * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.165785015 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471116954 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_scale.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_stop.md b/docs/user-guide/kubectl/kubectl_stop.md index 90144527f83..33c5fe100c9 100644 --- a/docs/user-guide/kubectl/kubectl_stop.md +++ b/docs/user-guide/kubectl/kubectl_stop.md @@ -72,7 +72,6 @@ $ kubectl stop -f path/to/resources --all[=false]: [-all] to select all the specified resources. -f, --filename=[]: Filename, directory, or URL to file of resource(s) to be stopped. --grace-period=-1: Period of time in seconds given to the resource to terminate gracefully. Ignored if negative. - -h, --help[=false]: help for stop --ignore-not-found[=false]: Treat "resource not found" as a successful stop. -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). -l, --selector="": Selector (label query) to filter on. @@ -111,7 +110,7 @@ $ kubectl stop -f path/to/resources * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.166601667 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.47250815 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_stop.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_version.md b/docs/user-guide/kubectl/kubectl_version.md index 5bc9266e2d2..b5743a53c9a 100644 --- a/docs/user-guide/kubectl/kubectl_version.md +++ b/docs/user-guide/kubectl/kubectl_version.md @@ -48,7 +48,6 @@ kubectl version ``` -c, --client[=false]: Client version only (no server required). - -h, --help[=false]: help for version ``` ### Options inherited from parent commands @@ -83,7 +82,7 @@ kubectl version * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-21 17:18:05.1688832 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476464324 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_version.md?pixel)]() From 9fc79e9d99c0db4c75ba8b5c3b1065b958353999 Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Fri, 4 Sep 2015 00:06:01 -0700 Subject: [PATCH 064/101] refactor testapi and test scripts to prepare for multiple API groups. --- cmd/integration/integration.go | 25 +-- contrib/mesos/pkg/executor/executor_test.go | 16 +- contrib/mesos/pkg/scheduler/plugin_test.go | 14 +- hack/test-go.sh | 21 ++- hack/test-integration.sh | 14 +- pkg/api/conversion_test.go | 6 +- pkg/api/copy_test.go | 2 +- pkg/api/serialization_test.go | 4 +- pkg/api/testapi/testapi.go | 164 ++++++++++++++---- pkg/api/testapi/testapi_test.go | 22 +-- pkg/api/util/group_version.go | 39 +++++ pkg/api/util/group_version_test.go | 63 +++++++ pkg/api/validation/schema_test.go | 6 +- pkg/api/validation/validation_test.go | 10 +- pkg/apiserver/handlers_test.go | 22 +-- .../unversioned/cache/listwatch_test.go | 22 +-- pkg/client/unversioned/client_test.go | 48 ++++- pkg/client/unversioned/daemon_test.go | 24 +-- pkg/client/unversioned/deployment_test.go | 26 +-- pkg/client/unversioned/endpoints_test.go | 10 +- pkg/client/unversioned/events_test.go | 28 +-- pkg/client/unversioned/fake.go | 10 +- pkg/client/unversioned/helper_test.go | 22 +-- .../horizontalpodautoscaler_test.go | 26 +-- pkg/client/unversioned/limit_ranges_test.go | 28 +-- pkg/client/unversioned/namespaces_test.go | 28 +-- pkg/client/unversioned/nodes_test.go | 28 +-- .../unversioned/persistentvolume_test.go | 28 +-- .../unversioned/persistentvolumeclaim_test.go | 28 +-- pkg/client/unversioned/pod_templates_test.go | 24 +-- pkg/client/unversioned/pods_test.go | 32 ++-- .../replication_controllers_test.go | 22 +-- pkg/client/unversioned/request_test.go | 68 ++++---- .../unversioned/resource_quotas_test.go | 28 +-- pkg/client/unversioned/restclient_test.go | 52 +++--- pkg/client/unversioned/services_test.go | 32 ++-- ...horizontalpodautoscaler_controller_test.go | 6 +- .../autoscaler/metrics/metrics_client_test.go | 4 +- pkg/controller/controller_utils_test.go | 8 +- .../endpoint/endpoints_controller_test.go | 50 +++--- .../replication_controller_test.go | 56 +++--- .../serviceaccounts_controller_test.go | 4 +- pkg/kubectl/cmd/annotate_test.go | 8 +- pkg/kubectl/cmd/attach_test.go | 2 +- pkg/kubectl/cmd/cmd_test.go | 8 +- pkg/kubectl/cmd/config/config_test.go | 4 +- pkg/kubectl/cmd/delete_test.go | 4 +- pkg/kubectl/cmd/exec_test.go | 2 +- pkg/kubectl/cmd/get_test.go | 14 +- pkg/kubectl/cmd/label_test.go | 6 +- pkg/kubectl/cmd/portforward_test.go | 4 +- pkg/kubectl/cmd/util/helpers_test.go | 10 +- pkg/kubectl/resource/builder_test.go | 6 +- pkg/kubectl/resource/helper_test.go | 32 ++-- pkg/kubectl/resource_printer_test.go | 12 +- pkg/kubectl/rolling_updater_test.go | 22 +-- pkg/kubelet/config/common_test.go | 8 +- pkg/kubelet/config/file_test.go | 4 +- pkg/kubelet/config/http_test.go | 14 +- pkg/kubelet/container/ref_test.go | 14 +- pkg/kubelet/dockertools/manager_test.go | 6 +- pkg/kubelet/kubelet_test.go | 8 +- pkg/registry/controller/etcd/etcd_test.go | 2 +- pkg/registry/daemon/etcd/etcd_test.go | 2 +- pkg/registry/deployment/etcd/etcd_test.go | 2 +- pkg/registry/endpoint/etcd/etcd_test.go | 2 +- pkg/registry/event/etcd/etcd_test.go | 2 +- pkg/registry/event/strategy_test.go | 4 +- .../experimental/controller/etcd/etcd_test.go | 8 +- pkg/registry/generic/etcd/etcd_test.go | 24 +-- .../horizontalpodautoscaler/etcd/etcd_test.go | 2 +- pkg/registry/limitrange/etcd/etcd_test.go | 2 +- pkg/registry/minion/etcd/etcd_test.go | 2 +- pkg/registry/namespace/etcd/etcd_test.go | 6 +- .../persistentvolume/etcd/etcd_test.go | 4 +- .../persistentvolumeclaim/etcd/etcd_test.go | 4 +- pkg/registry/pod/etcd/etcd_test.go | 20 +-- pkg/registry/podtemplate/etcd/etcd_test.go | 2 +- pkg/registry/registrytest/etcd.go | 51 +++++- pkg/registry/resourcequota/etcd/etcd_test.go | 4 +- pkg/registry/secret/etcd/etcd_test.go | 2 +- .../service/allocator/etcd/etcd_test.go | 4 +- pkg/registry/service/etcd/etcd_test.go | 2 +- .../service/ipallocator/etcd/etcd_test.go | 4 +- pkg/registry/serviceaccount/etcd/etcd_test.go | 2 +- .../thirdpartyresource/etcd/etcd_test.go | 2 +- .../thirdpartyresourcedata/etcd/etcd_test.go | 2 +- pkg/runtime/helper_test.go | 2 +- pkg/runtime/unstructured_test.go | 2 +- pkg/storage/cacher_test.go | 40 ++--- pkg/storage/etcd/etcd_helper_test.go | 44 ++--- pkg/watch/json/decoder_test.go | 6 +- pkg/watch/json/encoder_test.go | 6 +- plugin/pkg/scheduler/factory/factory_test.go | 18 +- plugin/pkg/scheduler/scheduler_test.go | 2 +- shippable.yml | 15 +- test/e2e/persistent_volumes.go | 2 +- test/e2e/proxy.go | 2 +- test/integration/auth_test.go | 22 +-- test/integration/client_test.go | 6 +- test/integration/etcd_tools_test.go | 14 +- test/integration/framework/etcd_utils.go | 2 +- test/integration/framework/master_utils.go | 4 +- test/integration/metrics_test.go | 2 +- test/integration/persistent_volumes_test.go | 2 +- test/integration/scheduler_test.go | 2 +- test/integration/secret_test.go | 4 +- test/integration/service_account_test.go | 6 +- test/integration/utils.go | 2 +- 109 files changed, 1010 insertions(+), 714 deletions(-) create mode 100644 pkg/api/util/group_version.go create mode 100644 pkg/api/util/group_version_test.go diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index 1bf01c11157..1acd1a8cc8d 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/pkg/api" apierrors "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/latest" + "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/apiserver" client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/client/unversioned/record" @@ -69,8 +70,6 @@ import ( var ( fakeDocker1, fakeDocker2 dockertools.FakeDockerClient - // API version that should be used by the client to talk to the server. - apiVersion string // Limit the number of concurrent tests. maxConcurrency int ) @@ -93,7 +92,7 @@ func (h *delegateHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { w.WriteHeader(http.StatusNotFound) } -func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (string, string) { +func startComponents(firstManifestURL, secondManifestURL string) (string, string) { // Setup servers := []string{} glog.Infof("Creating etcd client pointing to %v", servers) @@ -126,13 +125,17 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st glog.Fatalf("Failed to connect to etcd") } - cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion}) + cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.Version()}) - etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix()) + // TODO: caesarxuchao: hacky way to specify version of Experimental client. + // We will fix this by supporting multiple group versions in Config + cl.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Experimental.Version()}) + + etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, testapi.Default.Version(), etcdtest.PathPrefix()) if err != nil { glog.Fatalf("Unable to get etcd storage: %v", err) } - expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix()) + expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, testapi.Experimental.Version(), etcdtest.PathPrefix()) if err != nil { glog.Fatalf("Unable to get etcd storage for experimental: %v", err) } @@ -891,7 +894,6 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) { type testFunc func(*client.Client) func addFlags(fs *pflag.FlagSet) { - fs.StringVar(&apiVersion, "api-version", latest.Version, "API version that should be used by the client for communicating with the server") fs.IntVar( &maxConcurrency, "max-concurrency", -1, "Maximum number of tests to be run simultaneously. Unlimited if set to negative.") } @@ -911,18 +913,21 @@ func main() { glog.Fatalf("This test has timed out.") }() - glog.Infof("Running tests for APIVersion: %s", apiVersion) + glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API")) firstManifestURL := ServeCachedManifestFile(testPodSpecFile) secondManifestURL := ServeCachedManifestFile(testPodSpecFile) - apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL, apiVersion) + apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL) // Ok. we're good to go. glog.Infof("API Server started on %s", apiServerURL) // Wait for the synchronization threads to come up. time.Sleep(time.Second * 10) - kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: apiVersion}) + kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Default.Version()}) + // TODO: caesarxuchao: hacky way to specify version of Experimental client. + // We will fix this by supporting multiple group versions in Config + kubeClient.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServerURL, Version: testapi.Experimental.Version()}) // Run tests in parallel testFuncs := []testFunc{ diff --git a/contrib/mesos/pkg/executor/executor_test.go b/contrib/mesos/pkg/executor/executor_test.go index 2295244ecac..2ea53cbad30 100644 --- a/contrib/mesos/pkg/executor/executor_test.go +++ b/contrib/mesos/pkg/executor/executor_test.go @@ -318,7 +318,7 @@ func TestExecutorLaunchAndKillTask(t *testing.T) { Updates: updates, APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, - Version: testapi.Version(), + Version: testapi.Default.Version(), }), Kubelet: &fakeKubelet{ Kubelet: &kubelet.Kubelet{}, @@ -355,7 +355,7 @@ func TestExecutorLaunchAndKillTask(t *testing.T) { assert.Equal(t, nil, err, "must be able to create a task from a pod") taskInfo := podTask.BuildTaskInfo() - data, err := testapi.Codec().Encode(pod) + data, err := testapi.Default.Codec().Encode(pod) assert.Equal(t, nil, err, "must be able to encode a pod's spec data") taskInfo.Data = data var statusUpdateCalls sync.WaitGroup @@ -484,7 +484,7 @@ func TestExecutorStaticPods(t *testing.T) { Updates: make(chan interface{}, 1), // allow kube-executor source to proceed past init APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, - Version: testapi.Version(), + Version: testapi.Default.Version(), }), Kubelet: &kubelet.Kubelet{}, PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) { @@ -565,7 +565,7 @@ func TestExecutorFrameworkMessage(t *testing.T) { Updates: make(chan interface{}, 1024), APIClient: client.NewOrDie(&client.Config{ Host: testApiServer.server.URL, - Version: testapi.Version(), + Version: testapi.Default.Version(), }), Kubelet: &fakeKubelet{ Kubelet: &kubelet.Kubelet{}, @@ -602,7 +602,7 @@ func TestExecutorFrameworkMessage(t *testing.T) { *pod, &mesosproto.ExecutorInfo{}) taskInfo := podTask.BuildTaskInfo() - data, _ := testapi.Codec().Encode(pod) + data, _ := testapi.Default.Codec().Encode(pod) taskInfo.Data = data mockDriver.On( @@ -660,11 +660,11 @@ func TestExecutorFrameworkMessage(t *testing.T) { func NewTestPod(i int) *api.Pod { name := fmt.Sprintf("pod%d", i) return &api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, ObjectMeta: api.ObjectMeta{ Name: name, Namespace: api.NamespaceDefault, - SelfLink: testapi.SelfLink("pods", string(i)), + SelfLink: testapi.Default.SelfLink("pods", string(i)), }, Spec: api.PodSpec{ Containers: []api.Container{ @@ -710,7 +710,7 @@ func NewTestServer(t *testing.T, namespace string, pods *api.PodList) *TestServe } mux := http.NewServeMux() - mux.HandleFunc(testapi.ResourcePath("bindings", namespace, ""), func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc(testapi.Default.ResourcePath("bindings", namespace, ""), func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) diff --git a/contrib/mesos/pkg/scheduler/plugin_test.go b/contrib/mesos/pkg/scheduler/plugin_test.go index 06da09d5c55..fded56da701 100644 --- a/contrib/mesos/pkg/scheduler/plugin_test.go +++ b/contrib/mesos/pkg/scheduler/plugin_test.go @@ -61,13 +61,13 @@ func NewTestServer(t *testing.T, namespace string, mockPodListWatch *MockPodsLis } mux := http.NewServeMux() - mux.HandleFunc(testapi.ResourcePath("pods", namespace, ""), func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc(testapi.Default.ResourcePath("pods", namespace, ""), func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) pods := mockPodListWatch.Pods() - w.Write([]byte(runtime.EncodeOrDie(testapi.Codec(), &pods))) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &pods))) }) - podsPrefix := testapi.ResourcePath("pods", namespace, "") + "/" + podsPrefix := testapi.Default.ResourcePath("pods", namespace, "") + "/" mux.HandleFunc(podsPrefix, func(w http.ResponseWriter, r *http.Request) { name := r.URL.Path[len(podsPrefix):] @@ -79,13 +79,13 @@ func NewTestServer(t *testing.T, namespace string, mockPodListWatch *MockPodsLis p := mockPodListWatch.GetPod(name) if p != nil { w.WriteHeader(http.StatusOK) - w.Write([]byte(runtime.EncodeOrDie(testapi.Codec(), p))) + w.Write([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), p))) return } w.WriteHeader(http.StatusNotFound) }) - mux.HandleFunc(testapi.ResourcePath("events", namespace, ""), func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc(testapi.Default.ResourcePath("events", namespace, ""), func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) @@ -196,7 +196,7 @@ func NewTestPod() (*api.Pod, int) { currentPodNum = currentPodNum + 1 name := fmt.Sprintf("pod%d", currentPodNum) return &api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, ObjectMeta: api.ObjectMeta{ Name: name, Namespace: api.NamespaceDefault, @@ -398,7 +398,7 @@ func TestPlugin_LifeCycle(t *testing.T) { podtask.NewDefaultProcurement(mresource.DefaultDefaultContainerCPULimit, mresource.DefaultDefaultContainerMemLimit)) testScheduler := New(Config{ Executor: executor, - Client: client.NewOrDie(&client.Config{Host: testApiServer.server.URL, Version: testapi.Version()}), + Client: client.NewOrDie(&client.Config{Host: testApiServer.server.URL, Version: testapi.Default.Version()}), Scheduler: NewFCFSPodScheduler(as), Schedcfg: *schedcfg.CreateDefaultConfig(), }) diff --git a/hack/test-go.sh b/hack/test-go.sh index 91a5553d6d2..59e8881c0c1 100755 --- a/hack/test-go.sh +++ b/hack/test-go.sh @@ -52,8 +52,13 @@ KUBE_COVERPROCS=${KUBE_COVERPROCS:-4} KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing # Set to the goveralls binary path to report coverage results to Coveralls.io. KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-} -# Comma separated list of API Versions that should be tested. -KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1"} +# Lists of API Versions of each groups that should be tested, groups are +# separated by comma, lists are separated by semicolon. e.g., +# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" +# TODO: It's going to be: +# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"} +KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1"} +# once we have multiple group supports # Run tests with the standard (registry) and a custom etcd prefix # (kubernetes.io/registry). KUBE_TEST_ETCD_PREFIXES=${KUBE_TEST_ETCD_PREFIXES:-"registry,kubernetes.io/registry"} @@ -131,7 +136,8 @@ junitFilenamePrefix() { return fi mkdir -p "${KUBE_JUNIT_REPORT_DIR}" - echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_API_VERSION}_$(kube::util::sortable_date)" + local KUBE_TEST_API_NO_SLASH=echo "${KUBE_TEST_API//\//-}" + echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_NO_SLASH}_$(kube::util::sortable_date)" } produceJUnitXMLReport() { @@ -205,7 +211,7 @@ runTests() { fi # Create coverage report directories. - cover_report_dir="/tmp/k8s_coverage/${KUBE_API_VERSION}/$(kube::util::sortable_date)" + cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API}/$(kube::util::sortable_date)" cover_profile="coverage.out" # Name for each individual coverage profile kube::log::status "Saving coverage output in '${cover_report_dir}'" mkdir -p "${@+${@/#/${cover_report_dir}/}}" @@ -266,7 +272,7 @@ reportCoverageToCoveralls() { } # Convert the CSVs to arrays. -IFS=',' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" +IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" IFS=',' read -a etcdPrefixes <<< "${KUBE_TEST_ETCD_PREFIXES}" apiVersionsCount=${#apiVersions[@]} etcdPrefixesCount=${#etcdPrefixes[@]} @@ -274,7 +280,10 @@ for (( i=0, j=0; ; )); do apiVersion=${apiVersions[i]} etcdPrefix=${etcdPrefixes[j]} echo "Running tests for APIVersion: $apiVersion with etcdPrefix: $etcdPrefix" - KUBE_API_VERSION="${apiVersion}" KUBE_API_VERSIONS="v1" ETCD_PREFIX=${etcdPrefix} runTests "$@" + # KUBE_TEST_API sets the version of each group to be tested. KUBE_API_VERSIONS + # register the groups/versions as supported by k8s. So KUBE_API_VERSIONS + # needs to be the superset of KUBE_TEST_API. + KUBE_TEST_API="${apiVersion}" KUBE_API_VERSIONS="v1" ETCD_PREFIX=${etcdPrefix} runTests "$@" i=${i}+1 j=${j}+1 if [[ i -eq ${apiVersionsCount} ]] && [[ j -eq ${etcdPrefixesCount} ]]; then diff --git a/hack/test-integration.sh b/hack/test-integration.sh index d299037571e..dcd9c9e87d4 100755 --- a/hack/test-integration.sh +++ b/hack/test-integration.sh @@ -24,8 +24,12 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. source "${KUBE_ROOT}/hack/lib/init.sh" -# Comma separated list of API Versions that should be tested. -KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1"} +# Lists of API Versions of each groups that should be tested, groups are +# separated by comma, lists are separated by semicolon. e.g., +# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" +# TODO: It's going to be: +# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"} +KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1"} KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"} LOG_LEVEL=${LOG_LEVEL:-2} @@ -48,8 +52,8 @@ runTests() { kube::log::status "Running integration test scenario" - KUBE_API_VERSIONS="v1" "${KUBE_OUTPUT_HOSTBIN}/integration" --v=${LOG_LEVEL} --api-version="$1" \ - --max-concurrency="${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY}" + KUBE_API_VERSIONS="v1" KUBE_TEST_API_VERSIONS="$1" "${KUBE_OUTPUT_HOSTBIN}/integration" --v=${LOG_LEVEL} \ + --max-concurrency="${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY}" cleanup } @@ -60,7 +64,7 @@ KUBE_API_VERSIONS="v1" "${KUBE_ROOT}/hack/build-go.sh" "$@" cmd/integration trap cleanup EXIT # Convert the CSV to an array of API versions to test -IFS=',' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" +IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}" for apiVersion in "${apiVersions[@]}"; do runTests "${apiVersion}" done diff --git a/pkg/api/conversion_test.go b/pkg/api/conversion_test.go index 0ad68dfdd11..e3449def625 100644 --- a/pkg/api/conversion_test.go +++ b/pkg/api/conversion_test.go @@ -37,7 +37,7 @@ func BenchmarkPodConversion(b *testing.B) { scheme := api.Scheme.Raw() var result *api.Pod for i := 0; i < b.N; i++ { - versionedObj, err := scheme.ConvertToVersion(&pod, testapi.Version()) + versionedObj, err := scheme.ConvertToVersion(&pod, testapi.Default.Version()) if err != nil { b.Fatalf("Conversion error: %v", err) } @@ -65,7 +65,7 @@ func BenchmarkNodeConversion(b *testing.B) { scheme := api.Scheme.Raw() var result *api.Node for i := 0; i < b.N; i++ { - versionedObj, err := scheme.ConvertToVersion(&node, testapi.Version()) + versionedObj, err := scheme.ConvertToVersion(&node, testapi.Default.Version()) if err != nil { b.Fatalf("Conversion error: %v", err) } @@ -93,7 +93,7 @@ func BenchmarkReplicationControllerConversion(b *testing.B) { scheme := api.Scheme.Raw() var result *api.ReplicationController for i := 0; i < b.N; i++ { - versionedObj, err := scheme.ConvertToVersion(&replicationController, testapi.Version()) + versionedObj, err := scheme.ConvertToVersion(&replicationController, testapi.Default.Version()) if err != nil { b.Fatalf("Conversion error: %v", err) } diff --git a/pkg/api/copy_test.go b/pkg/api/copy_test.go index 79796ed2c54..c3c55283a7f 100644 --- a/pkg/api/copy_test.go +++ b/pkg/api/copy_test.go @@ -28,7 +28,7 @@ import ( func TestDeepCopyApiObjects(t *testing.T) { for i := 0; i < *fuzzIters; i++ { - for _, version := range []string{"", testapi.Version()} { + for _, version := range []string{"", testapi.Default.Version()} { f := apitesting.FuzzerFor(t, version, rand.NewSource(rand.Int63())) for kind := range api.Scheme.KnownTypes(version) { item, err := api.Scheme.New(version, kind) diff --git a/pkg/api/serialization_test.go b/pkg/api/serialization_test.go index 6eed0666be9..45a57aee5b1 100644 --- a/pkg/api/serialization_test.go +++ b/pkg/api/serialization_test.go @@ -90,10 +90,10 @@ func roundTripSame(t *testing.T, item runtime.Object, except ...string) { set := util.NewStringSet(except...) seed := rand.Int63() fuzzInternalObject(t, "", item, seed) - version := testapi.Version() + version := testapi.Default.Version() if !set.Has(version) { fuzzInternalObject(t, version, item, seed) - roundTrip(t, testapi.Codec(), item) + roundTrip(t, testapi.Default.Codec(), item) } } diff --git a/pkg/api/testapi/testapi.go b/pkg/api/testapi/testapi.go index bd294c00a18..3d58968ead5 100644 --- a/pkg/api/testapi/testapi.go +++ b/pkg/api/testapi/testapi.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package testapi provides a helper for retrieving the KUBE_API_VERSION environment variable. +// Package testapi provides a helper for retrieving the KUBE_TEST_API environment variable. package testapi import ( @@ -24,63 +24,161 @@ import ( "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/meta" + apiutil "k8s.io/kubernetes/pkg/api/util" + explatest "k8s.io/kubernetes/pkg/expapi/latest" "k8s.io/kubernetes/pkg/runtime" ) -// Version returns the API version to test against, as set by the KUBE_API_VERSION env var. -func Version() string { - version := os.Getenv("KUBE_API_VERSION") - if version == "" { - version = latest.Version +var ( + Groups = make(map[string]TestGroup) + Default TestGroup + Experimental TestGroup +) + +type TestGroup struct { + // Name of the group + Group string + // Version of the group Group under test + VersionUnderTest string + // Group and Version. In most cases equals to Group + "/" + VersionUnverTest + GroupVersionUnderTest string +} + +func init() { + kubeTestAPI := os.Getenv("KUBE_TEST_API") + if kubeTestAPI != "" { + testGroupVersions := strings.Split(kubeTestAPI, ",") + for _, groupVersion := range testGroupVersions { + // TODO: caesarxuchao: the apiutil package is hacky, it will be replaced + // by a following PR. + Groups[apiutil.GetGroup(groupVersion)] = + TestGroup{apiutil.GetGroup(groupVersion), apiutil.GetVersion(groupVersion), groupVersion} + } } - return version + + // TODO: caesarxuchao: we need a central place to store all available API + // groups and their metadata. + if _, ok := Groups[""]; !ok { + // TODO: The second latest.Version will be latest.GroupVersion after we + // have multiple group support + Groups[""] = TestGroup{"", latest.Version, latest.Version} + } + if _, ok := Groups["experimental"]; !ok { + Groups["experimental"] = TestGroup{"experimental", explatest.Version, explatest.Version} + } + + Default = Groups[""] + Experimental = Groups["experimental"] +} + +// Version returns the API version to test against, as set by the KUBE_TEST_API env var. +func (g TestGroup) Version() string { + return g.VersionUnderTest +} + +// GroupAndVersion returns the API version to test against for a group, as set +// by the KUBE_TEST_API env var. +// Return value is in the form of "group/version". +func (g TestGroup) GroupAndVersion() string { + return g.GroupVersionUnderTest } // Codec returns the codec for the API version to test against, as set by the -// KUBE_API_VERSION env var. -func Codec() runtime.Codec { - interfaces, err := latest.InterfacesFor(Version()) - if err != nil { - panic(err) +// KUBE_TEST_API env var. +func (g TestGroup) Codec() runtime.Codec { + // TODO: caesarxuchao: Restructure the body once we have a central `latest`. + if g.Group == "" { + interfaces, err := latest.InterfacesFor(g.VersionUnderTest) + if err != nil { + panic(err) + } + return interfaces.Codec } - return interfaces.Codec + if g.Group == "experimental" { + interfaces, err := explatest.InterfacesFor(g.VersionUnderTest) + if err != nil { + panic(err) + } + return interfaces.Codec + } + panic(fmt.Errorf("cannot test group %s", g.Group)) } // Converter returns the api.Scheme for the API version to test against, as set by the -// KUBE_API_VERSION env var. -func Converter() runtime.ObjectConvertor { - interfaces, err := latest.InterfacesFor(Version()) - if err != nil { - panic(err) +// KUBE_TEST_API env var. +func (g TestGroup) Converter() runtime.ObjectConvertor { + // TODO: caesarxuchao: Restructure the body once we have a central `latest`. + if g.Group == "" { + interfaces, err := latest.InterfacesFor(g.VersionUnderTest) + if err != nil { + panic(err) + } + return interfaces.ObjectConvertor } - return interfaces.ObjectConvertor + if g.Group == "experimental" { + interfaces, err := explatest.InterfacesFor(g.VersionUnderTest) + if err != nil { + panic(err) + } + return interfaces.ObjectConvertor + } + panic(fmt.Errorf("cannot test group %s", g.Group)) + } // MetadataAccessor returns the MetadataAccessor for the API version to test against, -// as set by the KUBE_API_VERSION env var. -func MetadataAccessor() meta.MetadataAccessor { - interfaces, err := latest.InterfacesFor(Version()) - if err != nil { - panic(err) +// as set by the KUBE_TEST_API env var. +func (g TestGroup) MetadataAccessor() meta.MetadataAccessor { + // TODO: caesarxuchao: Restructure the body once we have a central `latest`. + if g.Group == "" { + interfaces, err := latest.InterfacesFor(g.VersionUnderTest) + if err != nil { + panic(err) + } + return interfaces.MetadataAccessor } - return interfaces.MetadataAccessor + if g.Group == "experimental" { + interfaces, err := explatest.InterfacesFor(g.VersionUnderTest) + if err != nil { + panic(err) + } + return interfaces.MetadataAccessor + } + panic(fmt.Errorf("cannot test group %s", g.Group)) } // SelfLink returns a self link that will appear to be for the version Version(). // 'resource' should be the resource path, e.g. "pods" for the Pod type. 'name' should be // empty for lists. -func SelfLink(resource, name string) string { - if name == "" { - return fmt.Sprintf("/api/%s/%s", Version(), resource) +func (g TestGroup) SelfLink(resource, name string) string { + if g.Group == "" { + if name == "" { + return fmt.Sprintf("/api/%s/%s", g.Version(), resource) + } + return fmt.Sprintf("/api/%s/%s/%s", g.Version(), resource, name) + } else { + // TODO: will need a /apis prefix once we have proper multi-group + // support + if name == "" { + return fmt.Sprintf("/%s/%s/%s", g.Group, g.Version(), resource) + } + return fmt.Sprintf("/%s/%s/%s/%s", g.Group, g.Version(), resource, name) } - return fmt.Sprintf("/api/%s/%s/%s", Version(), resource, name) } // Returns the appropriate path for the given prefix (watch, proxy, redirect, etc), resource, namespace and name. // For ex, this is of the form: // /api/v1/watch/namespaces/foo/pods/pod0 for v1. -func ResourcePathWithPrefix(prefix, resource, namespace, name string) string { - path := "/api/" + Version() +func (g TestGroup) ResourcePathWithPrefix(prefix, resource, namespace, name string) string { + var path string + if len(g.Group) == 0 { + path = "/api/" + g.Version() + } else { + // TODO: switch back once we have proper multiple group support + // path = "/apis/" + g.Group + "/" + Version(group...) + path = "/" + g.Group + "/" + g.Version() + } + if prefix != "" { path = path + "/" + prefix } @@ -101,6 +199,6 @@ func ResourcePathWithPrefix(prefix, resource, namespace, name string) string { // Returns the appropriate path for the given resource, namespace and name. // For example, this is of the form: // /api/v1/namespaces/foo/pods/pod0 for v1. -func ResourcePath(resource, namespace, name string) string { - return ResourcePathWithPrefix("", resource, namespace, name) +func (g TestGroup) ResourcePath(resource, namespace, name string) string { + return g.ResourcePathWithPrefix("", resource, namespace, name) } diff --git a/pkg/api/testapi/testapi_test.go b/pkg/api/testapi/testapi_test.go index 220833a2045..056c769709e 100644 --- a/pkg/api/testapi/testapi_test.go +++ b/pkg/api/testapi/testapi_test.go @@ -28,14 +28,14 @@ func TestResourcePathWithPrefix(t *testing.T) { name string expected string }{ - {"prefix", "resource", "mynamespace", "myresource", "/api/" + Version() + "/prefix/namespaces/mynamespace/resource/myresource"}, - {"prefix", "resource", "", "myresource", "/api/" + Version() + "/prefix/resource/myresource"}, - {"prefix", "resource", "mynamespace", "", "/api/" + Version() + "/prefix/namespaces/mynamespace/resource"}, - {"prefix", "resource", "", "", "/api/" + Version() + "/prefix/resource"}, - {"", "resource", "mynamespace", "myresource", "/api/" + Version() + "/namespaces/mynamespace/resource/myresource"}, + {"prefix", "resource", "mynamespace", "myresource", "/api/" + Default.Version() + "/prefix/namespaces/mynamespace/resource/myresource"}, + {"prefix", "resource", "", "myresource", "/api/" + Default.Version() + "/prefix/resource/myresource"}, + {"prefix", "resource", "mynamespace", "", "/api/" + Default.Version() + "/prefix/namespaces/mynamespace/resource"}, + {"prefix", "resource", "", "", "/api/" + Default.Version() + "/prefix/resource"}, + {"", "resource", "mynamespace", "myresource", "/api/" + Default.Version() + "/namespaces/mynamespace/resource/myresource"}, } for _, item := range testCases { - if actual := ResourcePathWithPrefix(item.prefix, item.resource, item.namespace, item.name); actual != item.expected { + if actual := Default.ResourcePathWithPrefix(item.prefix, item.resource, item.namespace, item.name); actual != item.expected { t.Errorf("Expected: %s, got: %s for prefix: %s, resource: %s, namespace: %s and name: %s", item.expected, actual, item.prefix, item.resource, item.namespace, item.name) } } @@ -48,13 +48,13 @@ func TestResourcePath(t *testing.T) { name string expected string }{ - {"resource", "mynamespace", "myresource", "/api/" + Version() + "/namespaces/mynamespace/resource/myresource"}, - {"resource", "", "myresource", "/api/" + Version() + "/resource/myresource"}, - {"resource", "mynamespace", "", "/api/" + Version() + "/namespaces/mynamespace/resource"}, - {"resource", "", "", "/api/" + Version() + "/resource"}, + {"resource", "mynamespace", "myresource", "/api/" + Default.Version() + "/namespaces/mynamespace/resource/myresource"}, + {"resource", "", "myresource", "/api/" + Default.Version() + "/resource/myresource"}, + {"resource", "mynamespace", "", "/api/" + Default.Version() + "/namespaces/mynamespace/resource"}, + {"resource", "", "", "/api/" + Default.Version() + "/resource"}, } for _, item := range testCases { - if actual := ResourcePath(item.resource, item.namespace, item.name); actual != item.expected { + if actual := Default.ResourcePath(item.resource, item.namespace, item.name); actual != item.expected { t.Errorf("Expected: %s, got: %s for resource: %s, namespace: %s and name: %s", item.expected, actual, item.resource, item.namespace, item.name) } } diff --git a/pkg/api/util/group_version.go b/pkg/api/util/group_version.go new file mode 100644 index 00000000000..1a9cab0985e --- /dev/null +++ b/pkg/api/util/group_version.go @@ -0,0 +1,39 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: This GetVersion/GetGroup arrangement is temporary and will be replaced +// with a GroupAndVersion type. +package util + +import "strings" + +func GetVersion(groupVersion string) string { + s := strings.Split(groupVersion, "/") + if len(s) != 2 { + // e.g. return "v1" for groupVersion="v1" + return s[len(s)-1] + } + return s[1] +} + +func GetGroup(groupVersion string) string { + s := strings.Split(groupVersion, "/") + if len(s) == 1 { + // e.g. return "" for groupVersion="v1" + return "" + } + return s[0] +} diff --git a/pkg/api/util/group_version_test.go b/pkg/api/util/group_version_test.go new file mode 100644 index 00000000000..163cd5f3f67 --- /dev/null +++ b/pkg/api/util/group_version_test.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "testing" + +func TestGetVersion(t *testing.T) { + testCases := []struct { + groupVersion string + output string + }{ + { + "v1", + "v1", + }, + { + "experimental/v1alpha1", + "v1alpha1", + }, + } + for _, test := range testCases { + actual := GetVersion(test.groupVersion) + if test.output != actual { + t.Errorf("expect version: %s, got: %s\n", test.output, actual) + } + } +} + +func TestGetGroup(t *testing.T) { + testCases := []struct { + groupVersion string + output string + }{ + { + "v1", + "", + }, + { + "experimental/v1alpha1", + "experimental", + }, + } + for _, test := range testCases { + actual := GetGroup(test.groupVersion) + if test.output != actual { + t.Errorf("expect version: %s, got: %s\n", test.output, actual) + } + } +} diff --git a/pkg/api/validation/schema_test.go b/pkg/api/validation/schema_test.go index 4c22a273db5..d1d386f7218 100644 --- a/pkg/api/validation/schema_test.go +++ b/pkg/api/validation/schema_test.go @@ -28,7 +28,7 @@ import ( ) func readPod(filename string) (string, error) { - data, err := ioutil.ReadFile("testdata/" + testapi.Version() + "/" + filename) + data, err := ioutil.ReadFile("testdata/" + testapi.Default.Version() + "/" + filename) if err != nil { return "", err } @@ -36,7 +36,7 @@ func readPod(filename string) (string, error) { } func loadSchemaForTest() (Schema, error) { - pathToSwaggerSpec := "../../../api/swagger-spec/" + testapi.Version() + ".json" + pathToSwaggerSpec := "../../../api/swagger-spec/" + testapi.Default.Version() + ".json" data, err := ioutil.ReadFile(pathToSwaggerSpec) if err != nil { return nil, err @@ -71,7 +71,7 @@ func TestValidateOk(t *testing.T) { for _, test := range tests { testObj := test.obj apiObjectFuzzer.Fuzz(testObj) - data, err := testapi.Codec().Encode(testObj) + data, err := testapi.Default.Codec().Encode(testObj) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index 21693a5dc5a..3f71b703950 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -638,7 +638,7 @@ func TestValidateEnv(t *testing.T) { Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "metadata.name", }, }, @@ -670,7 +670,7 @@ func TestValidateEnv(t *testing.T) { Value: "foo", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "metadata.name", }, }, @@ -683,7 +683,7 @@ func TestValidateEnv(t *testing.T) { Name: "abc", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), }, }, }}, @@ -708,7 +708,7 @@ func TestValidateEnv(t *testing.T) { ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: "metadata.whoops", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), }, }, }}, @@ -747,7 +747,7 @@ func TestValidateEnv(t *testing.T) { ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ FieldPath: "status.phase", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), }, }, }}, diff --git a/pkg/apiserver/handlers_test.go b/pkg/apiserver/handlers_test.go index 30ec81da66a..b9d948d73ba 100644 --- a/pkg/apiserver/handlers_test.go +++ b/pkg/apiserver/handlers_test.go @@ -52,11 +52,11 @@ func expectHTTP(url string, code int, t *testing.T) { } func getPath(resource, namespace, name string) string { - return testapi.ResourcePath(resource, namespace, name) + return testapi.Default.ResourcePath(resource, namespace, name) } func pathWithPrefix(prefix, resource, namespace, name string) string { - return testapi.ResourcePathWithPrefix(prefix, resource, namespace, name) + return testapi.Default.ResourcePathWithPrefix(prefix, resource, namespace, name) } func TestMaxInFlight(t *testing.T) { @@ -231,15 +231,15 @@ func TestGetAPIRequestInfo(t *testing.T) { {"GET", "/watch/namespaces/other/pods", "watch", "", "other", "pods", "", "Pod", "", []string{"pods"}}, // fully-qualified paths - {"GET", getPath("pods", "other", ""), "list", testapi.Version(), "other", "pods", "", "Pod", "", []string{"pods"}}, - {"GET", getPath("pods", "other", "foo"), "get", testapi.Version(), "other", "pods", "", "Pod", "foo", []string{"pods", "foo"}}, - {"GET", getPath("pods", "", ""), "list", testapi.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, - {"POST", getPath("pods", "", ""), "create", testapi.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, - {"GET", getPath("pods", "", "foo"), "get", testapi.Version(), api.NamespaceAll, "pods", "", "Pod", "foo", []string{"pods", "foo"}}, - {"GET", pathWithPrefix("proxy", "pods", "", "foo"), "proxy", testapi.Version(), api.NamespaceAll, "pods", "", "Pod", "foo", []string{"pods", "foo"}}, - {"GET", pathWithPrefix("watch", "pods", "", ""), "watch", testapi.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, - {"GET", pathWithPrefix("redirect", "pods", "", ""), "redirect", testapi.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, - {"GET", pathWithPrefix("watch", "pods", "other", ""), "watch", testapi.Version(), "other", "pods", "", "Pod", "", []string{"pods"}}, + {"GET", getPath("pods", "other", ""), "list", testapi.Default.Version(), "other", "pods", "", "Pod", "", []string{"pods"}}, + {"GET", getPath("pods", "other", "foo"), "get", testapi.Default.Version(), "other", "pods", "", "Pod", "foo", []string{"pods", "foo"}}, + {"GET", getPath("pods", "", ""), "list", testapi.Default.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, + {"POST", getPath("pods", "", ""), "create", testapi.Default.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, + {"GET", getPath("pods", "", "foo"), "get", testapi.Default.Version(), api.NamespaceAll, "pods", "", "Pod", "foo", []string{"pods", "foo"}}, + {"GET", pathWithPrefix("proxy", "pods", "", "foo"), "proxy", testapi.Default.Version(), api.NamespaceAll, "pods", "", "Pod", "foo", []string{"pods", "foo"}}, + {"GET", pathWithPrefix("watch", "pods", "", ""), "watch", testapi.Default.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, + {"GET", pathWithPrefix("redirect", "pods", "", ""), "redirect", testapi.Default.Version(), api.NamespaceAll, "pods", "", "Pod", "", []string{"pods"}}, + {"GET", pathWithPrefix("watch", "pods", "other", ""), "watch", testapi.Default.Version(), "other", "pods", "", "Pod", "", []string{"pods"}}, // subresource identification {"GET", "/namespaces/other/pods/foo/status", "get", "", "other", "pods", "status", "Pod", "foo", []string{"pods", "foo", "status"}}, diff --git a/pkg/client/unversioned/cache/listwatch_test.go b/pkg/client/unversioned/cache/listwatch_test.go index 3d2e4f1ef8d..33f45db26e7 100644 --- a/pkg/client/unversioned/cache/listwatch_test.go +++ b/pkg/client/unversioned/cache/listwatch_test.go @@ -54,7 +54,7 @@ func buildLocation(resourcePath string, query url.Values) string { } func TestListWatchesCanList(t *testing.T) { - fieldSelectorQueryParamName := api.FieldSelectorQueryParam(testapi.Version()) + fieldSelectorQueryParamName := api.FieldSelectorQueryParam(testapi.Default.Version()) table := []struct { location string resource string @@ -63,7 +63,7 @@ func TestListWatchesCanList(t *testing.T) { }{ // Minion { - location: testapi.ResourcePath("minions", api.NamespaceAll, ""), + location: testapi.Default.ResourcePath("minions", api.NamespaceAll, ""), resource: "minions", namespace: api.NamespaceAll, fieldSelector: parseSelectorOrDie(""), @@ -71,7 +71,7 @@ func TestListWatchesCanList(t *testing.T) { // pod with "assigned" field selector. { location: buildLocation( - testapi.ResourcePath("pods", api.NamespaceAll, ""), + testapi.Default.ResourcePath("pods", api.NamespaceAll, ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})), resource: "pods", namespace: api.NamespaceAll, @@ -80,7 +80,7 @@ func TestListWatchesCanList(t *testing.T) { // pod in namespace "foo" { location: buildLocation( - testapi.ResourcePath("pods", "foo", ""), + testapi.Default.ResourcePath("pods", "foo", ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})), resource: "pods", namespace: "foo", @@ -95,7 +95,7 @@ func TestListWatchesCanList(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}) lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) // This test merely tests that the correct request is made. lw.List() @@ -104,7 +104,7 @@ func TestListWatchesCanList(t *testing.T) { } func TestListWatchesCanWatch(t *testing.T) { - fieldSelectorQueryParamName := api.FieldSelectorQueryParam(testapi.Version()) + fieldSelectorQueryParamName := api.FieldSelectorQueryParam(testapi.Default.Version()) table := []struct { rv string location string @@ -115,7 +115,7 @@ func TestListWatchesCanWatch(t *testing.T) { // Minion { location: buildLocation( - testapi.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""), + testapi.Default.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""), buildQueryValues(url.Values{"resourceVersion": []string{""}})), rv: "", resource: "minions", @@ -124,7 +124,7 @@ func TestListWatchesCanWatch(t *testing.T) { }, { location: buildLocation( - testapi.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""), + testapi.Default.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""), buildQueryValues(url.Values{"resourceVersion": []string{"42"}})), rv: "42", resource: "minions", @@ -134,7 +134,7 @@ func TestListWatchesCanWatch(t *testing.T) { // pod with "assigned" field selector. { location: buildLocation( - testapi.ResourcePathWithPrefix("watch", "pods", api.NamespaceAll, ""), + testapi.Default.ResourcePathWithPrefix("watch", "pods", api.NamespaceAll, ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}, "resourceVersion": []string{"0"}})), rv: "0", resource: "pods", @@ -144,7 +144,7 @@ func TestListWatchesCanWatch(t *testing.T) { // pod with namespace foo and assigned field selector { location: buildLocation( - testapi.ResourcePathWithPrefix("watch", "pods", "foo", ""), + testapi.Default.ResourcePathWithPrefix("watch", "pods", "foo", ""), buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}, "resourceVersion": []string{"0"}})), rv: "0", resource: "pods", @@ -161,7 +161,7 @@ func TestListWatchesCanWatch(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}) lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector) // This test merely tests that the correct request is made. lw.Watch(item.rv) diff --git a/pkg/client/unversioned/client_test.go b/pkg/client/unversioned/client_test.go index 5671518704f..72f412b4778 100644 --- a/pkg/client/unversioned/client_test.go +++ b/pkg/client/unversioned/client_test.go @@ -68,23 +68,34 @@ type testClient struct { QueryValidator map[string]func(string, string) bool } -func (c *testClient) Setup() *testClient { +func (c *testClient) Setup(t *testing.T) *testClient { c.handler = &util.FakeHandler{ StatusCode: c.Response.StatusCode, } - if responseBody := body(c.Response.Body, c.Response.RawBody); responseBody != nil { + if responseBody := body(t, c.Response.Body, c.Response.RawBody); responseBody != nil { c.handler.ResponseBody = *responseBody } c.server = httptest.NewServer(c.handler) if c.Client == nil { version := c.Version if len(version) == 0 { - version = testapi.Version() + version = testapi.Default.Version() } c.Client = NewOrDie(&Config{ Host: c.server.URL, Version: version, }) + + // TODO: caesarxuchao: hacky way to specify version of Experimental client. + // We will fix this by supporting multiple group versions in Config + version = c.Version + if len(version) == 0 { + version = testapi.Experimental.Version() + } + c.ExperimentalClient = NewExperimentalOrDie(&Config{ + Host: c.server.URL, + Version: version, + }) } c.QueryValidator = map[string]func(string, string) bool{} return c @@ -124,7 +135,7 @@ func (c *testClient) ValidateCommon(t *testing.T, err error) { return } - requestBody := body(c.Request.Body, c.Request.RawBody) + requestBody := body(t, c.Request.Body, c.Request.RawBody) actualQuery := c.handler.RequestReceived.URL.Query() t.Logf("got query: %v", actualQuery) t.Logf("path: %v", c.Request.Path) @@ -136,9 +147,9 @@ func (c *testClient) ValidateCommon(t *testing.T, err error) { validator, ok := c.QueryValidator[key] if !ok { switch key { - case api.LabelSelectorQueryParam(testapi.Version()): + case api.LabelSelectorQueryParam(testapi.Default.Version()): validator = validateLabels - case api.FieldSelectorQueryParam(testapi.Version()): + case api.FieldSelectorQueryParam(testapi.Default.Version()): validator = validateFields default: validator = func(a, b string) bool { return a == b } @@ -200,9 +211,30 @@ func validateFields(a, b string) bool { return sA.String() == sB.String() } -func body(obj runtime.Object, raw *string) *string { +func body(t *testing.T, obj runtime.Object, raw *string) *string { if obj != nil { - bs, _ := testapi.Codec().Encode(obj) + _, kind, err := api.Scheme.ObjectVersionAndKind(obj) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + } + // TODO: caesarxuchao: we should detect which group an object belongs to + // by using the version returned by Schem.ObjectVersionAndKind() once we + // split the schemes for internal objects. + // TODO: caesarxuchao: we should add a map from kind to group in Scheme. + var bs []byte + if api.Scheme.Recognizes(testapi.Default.GroupAndVersion(), kind) { + bs, err = testapi.Default.Codec().Encode(obj) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + } + } else if api.Scheme.Recognizes(testapi.Experimental.GroupAndVersion(), kind) { + bs, err = testapi.Experimental.Codec().Encode(obj) + if err != nil { + t.Errorf("unexpected encoding error: %v", err) + } + } else { + t.Errorf("unexpected kind: %v", kind) + } body := string(bs) return &body } diff --git a/pkg/client/unversioned/daemon_test.go b/pkg/client/unversioned/daemon_test.go index 42ee7a75ce1..0f1f96b1cda 100644 --- a/pkg/client/unversioned/daemon_test.go +++ b/pkg/client/unversioned/daemon_test.go @@ -20,8 +20,8 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/testapi" "k8s.io/kubernetes/pkg/labels" ) @@ -34,7 +34,7 @@ func TestListDaemons(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getDCResourceName(), ns, ""), + Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, ""), }, Response: Response{StatusCode: 200, Body: &expapi.DaemonList{ @@ -55,7 +55,7 @@ func TestListDaemons(t *testing.T) { }, }, } - receivedControllerList, err := c.Setup().Experimental().Daemons(ns).List(labels.Everything()) + receivedControllerList, err := c.Setup(t).Experimental().Daemons(ns).List(labels.Everything()) c.Validate(t, receivedControllerList, err) } @@ -63,7 +63,7 @@ func TestListDaemons(t *testing.T) { func TestGetDaemon(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, Body: &expapi.Daemon{ @@ -80,14 +80,14 @@ func TestGetDaemon(t *testing.T) { }, }, } - receivedController, err := c.Setup().Experimental().Daemons(ns).Get("foo") + receivedController, err := c.Setup(t).Experimental().Daemons(ns).Get("foo") c.Validate(t, receivedController, err) } func TestGetDaemonWithNoName(t *testing.T) { ns := api.NamespaceDefault c := &testClient{Error: true} - receivedPod, err := c.Setup().Experimental().Daemons(ns).Get("") + receivedPod, err := c.Setup(t).Experimental().Daemons(ns).Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } @@ -101,7 +101,7 @@ func TestUpdateDaemon(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, Body: &expapi.Daemon{ @@ -118,17 +118,17 @@ func TestUpdateDaemon(t *testing.T) { }, }, } - receivedController, err := c.Setup().Experimental().Daemons(ns).Update(requestController) + receivedController, err := c.Setup(t).Experimental().Daemons(ns).Update(requestController) c.Validate(t, receivedController, err) } func TestDeleteDaemon(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().Experimental().Daemons(ns).Delete("foo") + err := c.Setup(t).Experimental().Daemons(ns).Delete("foo") c.Validate(t, nil, err) } @@ -138,7 +138,7 @@ func TestCreateDaemon(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "foo"}, } c := &testClient{ - Request: testRequest{Method: "POST", Path: testapi.ResourcePath(getDCResourceName(), ns, ""), Body: requestController, Query: buildQueryValues(nil)}, + Request: testRequest{Method: "POST", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, ""), Body: requestController, Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, Body: &expapi.Daemon{ @@ -155,6 +155,6 @@ func TestCreateDaemon(t *testing.T) { }, }, } - receivedController, err := c.Setup().Experimental().Daemons(ns).Create(requestController) + receivedController, err := c.Setup(t).Experimental().Daemons(ns).Create(requestController) c.Validate(t, receivedController, err) } diff --git a/pkg/client/unversioned/deployment_test.go b/pkg/client/unversioned/deployment_test.go index 3c5330e5547..352479de238 100644 --- a/pkg/client/unversioned/deployment_test.go +++ b/pkg/client/unversioned/deployment_test.go @@ -21,8 +21,8 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/testapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" ) @@ -42,14 +42,14 @@ func TestDeploymentCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getDeploymentsResoureName(), ns, ""), + Path: testapi.Experimental.ResourcePath(getDeploymentsResoureName(), ns, ""), Query: buildQueryValues(nil), Body: &deployment, }, Response: Response{StatusCode: 200, Body: &deployment}, } - response, err := c.Setup().Deployments(ns).Create(&deployment) + response, err := c.Setup(t).Deployments(ns).Create(&deployment) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -67,14 +67,14 @@ func TestDeploymentGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getDeploymentsResoureName(), ns, "abc"), + Path: testapi.Experimental.ResourcePath(getDeploymentsResoureName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: deployment}, } - response, err := c.Setup().Deployments(ns).Get("abc") + response, err := c.Setup(t).Deployments(ns).Get("abc") c.Validate(t, response, err) } @@ -93,13 +93,13 @@ func TestDeploymentList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getDeploymentsResoureName(), ns, ""), + Path: testapi.Experimental.ResourcePath(getDeploymentsResoureName(), ns, ""), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: deploymentList}, } - response, err := c.Setup().Deployments(ns).List(labels.Everything(), fields.Everything()) + response, err := c.Setup(t).Deployments(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, response, err) } @@ -115,12 +115,12 @@ func TestDeploymentUpdate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "PUT", - Path: testapi.ResourcePath(getDeploymentsResoureName(), ns, "abc"), + Path: testapi.Experimental.ResourcePath(getDeploymentsResoureName(), ns, "abc"), Query: buildQueryValues(nil), }, Response: Response{StatusCode: 200, Body: deployment}, } - response, err := c.Setup().Deployments(ns).Update(deployment) + response, err := c.Setup(t).Deployments(ns).Update(deployment) c.Validate(t, response, err) } @@ -129,12 +129,12 @@ func TestDeploymentDelete(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "DELETE", - Path: testapi.ResourcePath(getDeploymentsResoureName(), ns, "foo"), + Path: testapi.Experimental.ResourcePath(getDeploymentsResoureName(), ns, "foo"), Query: buildQueryValues(nil), }, Response: Response{StatusCode: 200}, } - err := c.Setup().Deployments(ns).Delete("foo", nil) + err := c.Setup(t).Deployments(ns).Delete("foo", nil) c.Validate(t, nil, err) } @@ -142,11 +142,11 @@ func TestDeploymentWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", getDeploymentsResoureName(), "", ""), + Path: testapi.Experimental.ResourcePathWithPrefix("watch", getDeploymentsResoureName(), "", ""), Query: url.Values{"resourceVersion": []string{}}, }, Response: Response{StatusCode: 200}, } - _, err := c.Setup().Deployments(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).Deployments(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/endpoints_test.go b/pkg/client/unversioned/endpoints_test.go index d416171488f..0c39ec5bca0 100644 --- a/pkg/client/unversioned/endpoints_test.go +++ b/pkg/client/unversioned/endpoints_test.go @@ -27,7 +27,7 @@ import ( func TestListEndpoints(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.ResourcePath("endpoints", ns, ""), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath("endpoints", ns, ""), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.EndpointsList{ Items: []api.Endpoints{ @@ -42,24 +42,24 @@ func TestListEndpoints(t *testing.T) { }, }, } - receivedEndpointsList, err := c.Setup().Endpoints(ns).List(labels.Everything()) + receivedEndpointsList, err := c.Setup(t).Endpoints(ns).List(labels.Everything()) c.Validate(t, receivedEndpointsList, err) } func TestGetEndpoints(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.ResourcePath("endpoints", ns, "endpoint-1"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath("endpoints", ns, "endpoint-1"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.Endpoints{ObjectMeta: api.ObjectMeta{Name: "endpoint-1"}}}, } - response, err := c.Setup().Endpoints(ns).Get("endpoint-1") + response, err := c.Setup(t).Endpoints(ns).Get("endpoint-1") c.Validate(t, response, err) } func TestGetEndpointWithNoName(t *testing.T) { ns := api.NamespaceDefault c := &testClient{Error: true} - receivedPod, err := c.Setup().Endpoints(ns).Get("") + receivedPod, err := c.Setup(t).Endpoints(ns).Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } diff --git a/pkg/client/unversioned/events_test.go b/pkg/client/unversioned/events_test.go index cc48a43a7e6..e8d7de4957e 100644 --- a/pkg/client/unversioned/events_test.go +++ b/pkg/client/unversioned/events_test.go @@ -32,24 +32,24 @@ func TestEventSearch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("events", "baz", ""), + Path: testapi.Default.ResourcePath("events", "baz", ""), Query: url.Values{ - api.FieldSelectorQueryParam(testapi.Version()): []string{ - getInvolvedObjectNameFieldLabel(testapi.Version()) + "=foo,", + api.FieldSelectorQueryParam(testapi.Default.Version()): []string{ + getInvolvedObjectNameFieldLabel(testapi.Default.Version()) + "=foo,", "involvedObject.namespace=baz,", "involvedObject.kind=Pod", }, - api.LabelSelectorQueryParam(testapi.Version()): []string{}, + api.LabelSelectorQueryParam(testapi.Default.Version()): []string{}, }, }, Response: Response{StatusCode: 200, Body: &api.EventList{}}, } - eventList, err := c.Setup().Events("baz").Search( + eventList, err := c.Setup(t).Events("baz").Search( &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: "baz", - SelfLink: testapi.SelfLink("pods", ""), + SelfLink: testapi.Default.SelfLink("pods", ""), }, }, ) @@ -78,13 +78,13 @@ func TestEventCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath("events", api.NamespaceDefault, ""), + Path: testapi.Default.ResourcePath("events", api.NamespaceDefault, ""), Body: event, }, Response: Response{StatusCode: 200, Body: event}, } - response, err := c.Setup().Events(api.NamespaceDefault).Create(event) + response, err := c.Setup(t).Events(api.NamespaceDefault).Create(event) if err != nil { t.Fatalf("%v should be nil.", err) @@ -117,13 +117,13 @@ func TestEventGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("events", "other", "1"), + Path: testapi.Default.ResourcePath("events", "other", "1"), Body: nil, }, Response: Response{StatusCode: 200, Body: event}, } - response, err := c.Setup().Events("other").Get("1") + response, err := c.Setup(t).Events("other").Get("1") if err != nil { t.Fatalf("%v should be nil.", err) @@ -158,12 +158,12 @@ func TestEventList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("events", ns, ""), + Path: testapi.Default.ResourcePath("events", ns, ""), Body: nil, }, Response: Response{StatusCode: 200, Body: eventList}, } - response, err := c.Setup().Events(ns).List(labels.Everything(), + response, err := c.Setup(t).Events(ns).List(labels.Everything(), fields.Everything()) if err != nil { @@ -186,10 +186,10 @@ func TestEventDelete(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "DELETE", - Path: testapi.ResourcePath("events", ns, "foo"), + Path: testapi.Default.ResourcePath("events", ns, "foo"), }, Response: Response{StatusCode: 200}, } - err := c.Setup().Events(ns).Delete("foo") + err := c.Setup(t).Events(ns).Delete("foo") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/fake.go b/pkg/client/unversioned/fake.go index f7f7fc35574..a52cdb09727 100644 --- a/pkg/client/unversioned/fake.go +++ b/pkg/client/unversioned/fake.go @@ -41,23 +41,23 @@ type FakeRESTClient struct { } func (c *FakeRESTClient) Get() *Request { - return NewRequest(c, "GET", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec) + return NewRequest(c, "GET", &url.URL{Host: "localhost"}, testapi.Default.Version(), c.Codec) } func (c *FakeRESTClient) Put() *Request { - return NewRequest(c, "PUT", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec) + return NewRequest(c, "PUT", &url.URL{Host: "localhost"}, testapi.Default.Version(), c.Codec) } func (c *FakeRESTClient) Patch(_ api.PatchType) *Request { - return NewRequest(c, "PATCH", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec) + return NewRequest(c, "PATCH", &url.URL{Host: "localhost"}, testapi.Default.Version(), c.Codec) } func (c *FakeRESTClient) Post() *Request { - return NewRequest(c, "POST", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec) + return NewRequest(c, "POST", &url.URL{Host: "localhost"}, testapi.Default.Version(), c.Codec) } func (c *FakeRESTClient) Delete() *Request { - return NewRequest(c, "DELETE", &url.URL{Host: "localhost"}, testapi.Version(), c.Codec) + return NewRequest(c, "DELETE", &url.URL{Host: "localhost"}, testapi.Default.Version(), c.Codec) } func (c *FakeRESTClient) Do(req *http.Request) (*http.Response, error) { diff --git a/pkg/client/unversioned/helper_test.go b/pkg/client/unversioned/helper_test.go index a540a2d6159..a81c5d9a612 100644 --- a/pkg/client/unversioned/helper_test.go +++ b/pkg/client/unversioned/helper_test.go @@ -401,40 +401,40 @@ func TestNegotiateVersion(t *testing.T) { name: "server supports client default", version: "version1", config: &Config{}, - serverVersions: []string{"version1", testapi.Version()}, - clientVersions: []string{"version1", testapi.Version()}, + serverVersions: []string{"version1", testapi.Default.Version()}, + clientVersions: []string{"version1", testapi.Default.Version()}, expectedVersion: "version1", expectErr: false, }, { name: "server falls back to client supported", - version: testapi.Version(), + version: testapi.Default.Version(), config: &Config{}, serverVersions: []string{"version1"}, - clientVersions: []string{"version1", testapi.Version()}, + clientVersions: []string{"version1", testapi.Default.Version()}, expectedVersion: "version1", expectErr: false, }, { name: "explicit version supported", version: "", - config: &Config{Version: testapi.Version()}, - serverVersions: []string{"version1", testapi.Version()}, - clientVersions: []string{"version1", testapi.Version()}, - expectedVersion: testapi.Version(), + config: &Config{Version: testapi.Default.Version()}, + serverVersions: []string{"version1", testapi.Default.Version()}, + clientVersions: []string{"version1", testapi.Default.Version()}, + expectedVersion: testapi.Default.Version(), expectErr: false, }, { name: "explicit version not supported", version: "", - config: &Config{Version: testapi.Version()}, + config: &Config{Version: testapi.Default.Version()}, serverVersions: []string{"version1"}, - clientVersions: []string{"version1", testapi.Version()}, + clientVersions: []string{"version1", testapi.Default.Version()}, expectedVersion: "", expectErr: true, }, } - codec := testapi.Codec() + codec := testapi.Default.Codec() for _, test := range tests { fakeClient := &FakeRESTClient{ diff --git a/pkg/client/unversioned/horizontalpodautoscaler_test.go b/pkg/client/unversioned/horizontalpodautoscaler_test.go index d1fdd5aa16b..0ca1b5f0a1a 100644 --- a/pkg/client/unversioned/horizontalpodautoscaler_test.go +++ b/pkg/client/unversioned/horizontalpodautoscaler_test.go @@ -21,8 +21,8 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/expapi/testapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" ) @@ -42,14 +42,14 @@ func TestHorizontalPodAutoscalerCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, ""), + Path: testapi.Experimental.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, ""), Query: buildQueryValues(nil), Body: &horizontalPodAutoscaler, }, Response: Response{StatusCode: 200, Body: &horizontalPodAutoscaler}, } - response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Create(&horizontalPodAutoscaler) + response, err := c.Setup(t).Experimental().HorizontalPodAutoscalers(ns).Create(&horizontalPodAutoscaler) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -67,14 +67,14 @@ func TestHorizontalPodAutoscalerGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc"), + Path: testapi.Experimental.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: horizontalPodAutoscaler}, } - response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Get("abc") + response, err := c.Setup(t).Experimental().HorizontalPodAutoscalers(ns).Get("abc") c.Validate(t, response, err) } @@ -93,13 +93,13 @@ func TestHorizontalPodAutoscalerList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, ""), + Path: testapi.Experimental.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, ""), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: horizontalPodAutoscalerList}, } - response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) + response, err := c.Setup(t).Experimental().HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, response, err) } @@ -113,20 +113,20 @@ func TestHorizontalPodAutoscalerUpdate(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Experimental.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: horizontalPodAutoscaler}, } - response, err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Update(horizontalPodAutoscaler) + response, err := c.Setup(t).Experimental().HorizontalPodAutoscalers(ns).Update(horizontalPodAutoscaler) c.Validate(t, response, err) } func TestHorizontalPodAutoscalerDelete(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Experimental.ResourcePath(getHorizontalPodAutoscalersResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().Experimental().HorizontalPodAutoscalers(ns).Delete("foo", nil) + err := c.Setup(t).Experimental().HorizontalPodAutoscalers(ns).Delete("foo", nil) c.Validate(t, nil, err) } @@ -134,10 +134,10 @@ func TestHorizontalPodAutoscalerWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", getHorizontalPodAutoscalersResoureName(), "", ""), + Path: testapi.Experimental.ResourcePathWithPrefix("watch", getHorizontalPodAutoscalersResoureName(), "", ""), Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().Experimental().HorizontalPodAutoscalers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).Experimental().HorizontalPodAutoscalers(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/limit_ranges_test.go b/pkg/client/unversioned/limit_ranges_test.go index e6d8134d823..cba367172fd 100644 --- a/pkg/client/unversioned/limit_ranges_test.go +++ b/pkg/client/unversioned/limit_ranges_test.go @@ -56,14 +56,14 @@ func TestLimitRangeCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getLimitRangesResourceName(), ns, ""), + Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, ""), Query: buildQueryValues(nil), Body: limitRange, }, Response: Response{StatusCode: 200, Body: limitRange}, } - response, err := c.Setup().LimitRanges(ns).Create(limitRange) + response, err := c.Setup(t).LimitRanges(ns).Create(limitRange) c.Validate(t, response, err) } @@ -92,14 +92,14 @@ func TestLimitRangeGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getLimitRangesResourceName(), ns, "abc"), + Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: limitRange}, } - response, err := c.Setup().LimitRanges(ns).Get("abc") + response, err := c.Setup(t).LimitRanges(ns).Get("abc") c.Validate(t, response, err) } @@ -116,13 +116,13 @@ func TestLimitRangeList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getLimitRangesResourceName(), ns, ""), + Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, ""), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: limitRangeList}, } - response, err := c.Setup().LimitRanges(ns).List(labels.Everything()) + response, err := c.Setup(t).LimitRanges(ns).List(labels.Everything()) c.Validate(t, response, err) } @@ -150,10 +150,10 @@ func TestLimitRangeUpdate(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: limitRange}, } - response, err := c.Setup().LimitRanges(ns).Update(limitRange) + response, err := c.Setup(t).LimitRanges(ns).Update(limitRange) c.Validate(t, response, err) } @@ -180,10 +180,10 @@ func TestInvalidLimitRangeUpdate(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: limitRange}, } - _, err := c.Setup().LimitRanges(ns).Update(limitRange) + _, err := c.Setup(t).LimitRanges(ns).Update(limitRange) if err == nil { t.Errorf("Expected an error due to missing ResourceVersion") } @@ -192,10 +192,10 @@ func TestInvalidLimitRangeUpdate(t *testing.T) { func TestLimitRangeDelete(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getLimitRangesResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath(getLimitRangesResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().LimitRanges(ns).Delete("foo") + err := c.Setup(t).LimitRanges(ns).Delete("foo") c.Validate(t, nil, err) } @@ -203,10 +203,10 @@ func TestLimitRangeWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", getLimitRangesResourceName(), "", ""), + Path: testapi.Default.ResourcePathWithPrefix("watch", getLimitRangesResourceName(), "", ""), Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().LimitRanges(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).LimitRanges(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/namespaces_test.go b/pkg/client/unversioned/namespaces_test.go index 6ca87a98d19..7fc36157345 100644 --- a/pkg/client/unversioned/namespaces_test.go +++ b/pkg/client/unversioned/namespaces_test.go @@ -34,14 +34,14 @@ func TestNamespaceCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath("namespaces", "", ""), + Path: testapi.Default.ResourcePath("namespaces", "", ""), Body: namespace, }, Response: Response{StatusCode: 200, Body: namespace}, } // from the source ns, provision a new global namespace "foo" - response, err := c.Setup().Namespaces().Create(namespace) + response, err := c.Setup(t).Namespaces().Create(namespace) if err != nil { t.Errorf("%#v should be nil.", err) @@ -59,13 +59,13 @@ func TestNamespaceGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("namespaces", "", "foo"), + Path: testapi.Default.ResourcePath("namespaces", "", "foo"), Body: nil, }, Response: Response{StatusCode: 200, Body: namespace}, } - response, err := c.Setup().Namespaces().Get("foo") + response, err := c.Setup(t).Namespaces().Get("foo") if err != nil { t.Errorf("%#v should be nil.", err) @@ -87,12 +87,12 @@ func TestNamespaceList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("namespaces", "", ""), + Path: testapi.Default.ResourcePath("namespaces", "", ""), Body: nil, }, Response: Response{StatusCode: 200, Body: namespaceList}, } - response, err := c.Setup().Namespaces().List(labels.Everything(), fields.Everything()) + response, err := c.Setup(t).Namespaces().List(labels.Everything(), fields.Everything()) if err != nil { t.Errorf("%#v should be nil.", err) @@ -125,10 +125,10 @@ func TestNamespaceUpdate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "PUT", - Path: testapi.ResourcePath("namespaces", "", "foo")}, + Path: testapi.Default.ResourcePath("namespaces", "", "foo")}, Response: Response{StatusCode: 200, Body: requestNamespace}, } - receivedNamespace, err := c.Setup().Namespaces().Update(requestNamespace) + receivedNamespace, err := c.Setup(t).Namespaces().Update(requestNamespace) c.Validate(t, receivedNamespace, err) } @@ -149,20 +149,20 @@ func TestNamespaceFinalize(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "PUT", - Path: testapi.ResourcePath("namespaces", "", "foo") + "/finalize", + Path: testapi.Default.ResourcePath("namespaces", "", "foo") + "/finalize", }, Response: Response{StatusCode: 200, Body: requestNamespace}, } - receivedNamespace, err := c.Setup().Namespaces().Finalize(requestNamespace) + receivedNamespace, err := c.Setup(t).Namespaces().Finalize(requestNamespace) c.Validate(t, receivedNamespace, err) } func TestNamespaceDelete(t *testing.T) { c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath("namespaces", "", "foo")}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath("namespaces", "", "foo")}, Response: Response{StatusCode: 200}, } - err := c.Setup().Namespaces().Delete("foo") + err := c.Setup(t).Namespaces().Delete("foo") c.Validate(t, nil, err) } @@ -170,10 +170,10 @@ func TestNamespaceWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", "namespaces", "", ""), + Path: testapi.Default.ResourcePathWithPrefix("watch", "namespaces", "", ""), Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().Namespaces().Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).Namespaces().Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/nodes_test.go b/pkg/client/unversioned/nodes_test.go index d621d5a9039..44ca27bc6cf 100644 --- a/pkg/client/unversioned/nodes_test.go +++ b/pkg/client/unversioned/nodes_test.go @@ -35,20 +35,20 @@ func TestListMinions(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getNodesResourceName(), "", ""), + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), }, Response: Response{StatusCode: 200, Body: &api.NodeList{ListMeta: api.ListMeta{ResourceVersion: "1"}}}, } - response, err := c.Setup().Nodes().List(labels.Everything(), fields.Everything()) + response, err := c.Setup(t).Nodes().List(labels.Everything(), fields.Everything()) c.Validate(t, response, err) } func TestListMinionsLabels(t *testing.T) { - labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Version()) + labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Default.Version()) c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getNodesResourceName(), "", ""), + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), Query: buildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})}, Response: Response{ StatusCode: 200, @@ -66,7 +66,7 @@ func TestListMinionsLabels(t *testing.T) { }, }, } - c.Setup() + c.Setup(t) c.QueryValidator[labelSelectorQueryParamName] = validateLabels selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() receivedNodeList, err := c.Nodes().List(selector, fields.Everything()) @@ -77,17 +77,17 @@ func TestGetMinion(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getNodesResourceName(), "", "1"), + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "1"), }, Response: Response{StatusCode: 200, Body: &api.Node{ObjectMeta: api.ObjectMeta{Name: "minion-1"}}}, } - response, err := c.Setup().Nodes().Get("1") + response, err := c.Setup(t).Nodes().Get("1") c.Validate(t, response, err) } func TestGetMinionWithNoName(t *testing.T) { c := &testClient{Error: true} - receivedNode, err := c.Setup().Nodes().Get("") + receivedNode, err := c.Setup(t).Nodes().Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } @@ -113,14 +113,14 @@ func TestCreateMinion(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getNodesResourceName(), "", ""), + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), Body: requestMinion}, Response: Response{ StatusCode: 200, Body: requestMinion, }, } - receivedMinion, err := c.Setup().Nodes().Create(requestMinion) + receivedMinion, err := c.Setup(t).Nodes().Create(requestMinion) c.Validate(t, receivedMinion, err) } @@ -128,11 +128,11 @@ func TestDeleteMinion(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "DELETE", - Path: testapi.ResourcePath(getNodesResourceName(), "", "foo"), + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"), }, Response: Response{StatusCode: 200}, } - err := c.Setup().Nodes().Delete("foo") + err := c.Setup(t).Nodes().Delete("foo") c.Validate(t, nil, err) } @@ -155,10 +155,10 @@ func TestUpdateMinion(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "PUT", - Path: testapi.ResourcePath(getNodesResourceName(), "", "foo"), + Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"), }, Response: Response{StatusCode: 200, Body: requestMinion}, } - response, err := c.Setup().Nodes().Update(requestMinion) + response, err := c.Setup(t).Nodes().Update(requestMinion) c.Validate(t, response, err) } diff --git a/pkg/client/unversioned/persistentvolume_test.go b/pkg/client/unversioned/persistentvolume_test.go index 87cfb73cad0..6b09f5f518b 100644 --- a/pkg/client/unversioned/persistentvolume_test.go +++ b/pkg/client/unversioned/persistentvolume_test.go @@ -49,14 +49,14 @@ func TestPersistentVolumeCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getPersistentVolumesResoureName(), "", ""), + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", ""), Query: buildQueryValues(nil), Body: pv, }, Response: Response{StatusCode: 200, Body: pv}, } - response, err := c.Setup().PersistentVolumes().Create(pv) + response, err := c.Setup(t).PersistentVolumes().Create(pv) c.Validate(t, response, err) } @@ -78,14 +78,14 @@ func TestPersistentVolumeGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolume}, } - response, err := c.Setup().PersistentVolumes().Get("abc") + response, err := c.Setup(t).PersistentVolumes().Get("abc") c.Validate(t, response, err) } @@ -100,13 +100,13 @@ func TestPersistentVolumeList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getPersistentVolumesResoureName(), "", ""), + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", ""), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolumeList}, } - response, err := c.Setup().PersistentVolumes().List(labels.Everything(), fields.Everything()) + response, err := c.Setup(t).PersistentVolumes().List(labels.Everything(), fields.Everything()) c.Validate(t, response, err) } @@ -126,10 +126,10 @@ func TestPersistentVolumeUpdate(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: persistentVolume}, } - response, err := c.Setup().PersistentVolumes().Update(persistentVolume) + response, err := c.Setup(t).PersistentVolumes().Update(persistentVolume) c.Validate(t, response, err) } @@ -155,20 +155,20 @@ func TestPersistentVolumeStatusUpdate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "PUT", - Path: testapi.ResourcePath(getPersistentVolumesResoureName(), "", "abc") + "/status", + Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "abc") + "/status", Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: persistentVolume}, } - response, err := c.Setup().PersistentVolumes().UpdateStatus(persistentVolume) + response, err := c.Setup(t).PersistentVolumes().UpdateStatus(persistentVolume) c.Validate(t, response, err) } func TestPersistentVolumeDelete(t *testing.T) { c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getPersistentVolumesResoureName(), "", "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath(getPersistentVolumesResoureName(), "", "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().PersistentVolumes().Delete("foo") + err := c.Setup(t).PersistentVolumes().Delete("foo") c.Validate(t, nil, err) } @@ -176,10 +176,10 @@ func TestPersistentVolumeWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", getPersistentVolumesResoureName(), "", ""), + Path: testapi.Default.ResourcePathWithPrefix("watch", getPersistentVolumesResoureName(), "", ""), Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/persistentvolumeclaim_test.go b/pkg/client/unversioned/persistentvolumeclaim_test.go index 2bbffa2e773..ae50560aba9 100644 --- a/pkg/client/unversioned/persistentvolumeclaim_test.go +++ b/pkg/client/unversioned/persistentvolumeclaim_test.go @@ -53,14 +53,14 @@ func TestPersistentVolumeClaimCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, ""), + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, ""), Query: buildQueryValues(nil), Body: pv, }, Response: Response{StatusCode: 200, Body: pv}, } - response, err := c.Setup().PersistentVolumeClaims(ns).Create(pv) + response, err := c.Setup(t).PersistentVolumeClaims(ns).Create(pv) c.Validate(t, response, err) } @@ -86,14 +86,14 @@ func TestPersistentVolumeClaimGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolumeClaim}, } - response, err := c.Setup().PersistentVolumeClaims(ns).Get("abc") + response, err := c.Setup(t).PersistentVolumeClaims(ns).Get("abc") c.Validate(t, response, err) } @@ -109,13 +109,13 @@ func TestPersistentVolumeClaimList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, ""), + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, ""), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: persistentVolumeList}, } - response, err := c.Setup().PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything()) + response, err := c.Setup(t).PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, response, err) } @@ -139,10 +139,10 @@ func TestPersistentVolumeClaimUpdate(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: persistentVolumeClaim}, } - response, err := c.Setup().PersistentVolumeClaims(ns).Update(persistentVolumeClaim) + response, err := c.Setup(t).PersistentVolumeClaims(ns).Update(persistentVolumeClaim) c.Validate(t, response, err) } @@ -171,21 +171,21 @@ func TestPersistentVolumeClaimStatusUpdate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "PUT", - Path: testapi.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc") + "/status", + Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "abc") + "/status", Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: persistentVolumeClaim}, } - response, err := c.Setup().PersistentVolumeClaims(ns).UpdateStatus(persistentVolumeClaim) + response, err := c.Setup(t).PersistentVolumeClaims(ns).UpdateStatus(persistentVolumeClaim) c.Validate(t, response, err) } func TestPersistentVolumeClaimDelete(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath(getPersistentVolumeClaimsResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().PersistentVolumeClaims(ns).Delete("foo") + err := c.Setup(t).PersistentVolumeClaims(ns).Delete("foo") c.Validate(t, nil, err) } @@ -193,10 +193,10 @@ func TestPersistentVolumeClaimWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", getPersistentVolumeClaimsResoureName(), "", ""), + Path: testapi.Default.ResourcePathWithPrefix("watch", getPersistentVolumeClaimsResoureName(), "", ""), Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).PersistentVolumeClaims(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/pod_templates_test.go b/pkg/client/unversioned/pod_templates_test.go index 80fb9496171..5f35efa9d4f 100644 --- a/pkg/client/unversioned/pod_templates_test.go +++ b/pkg/client/unversioned/pod_templates_test.go @@ -42,14 +42,14 @@ func TestPodTemplateCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getPodTemplatesResoureName(), ns, ""), + Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, ""), Query: buildQueryValues(nil), Body: &podTemplate, }, Response: Response{StatusCode: 200, Body: &podTemplate}, } - response, err := c.Setup().PodTemplates(ns).Create(&podTemplate) + response, err := c.Setup(t).PodTemplates(ns).Create(&podTemplate) c.Validate(t, response, err) } @@ -65,14 +65,14 @@ func TestPodTemplateGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getPodTemplatesResoureName(), ns, "abc"), + Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: podTemplate}, } - response, err := c.Setup().PodTemplates(ns).Get("abc") + response, err := c.Setup(t).PodTemplates(ns).Get("abc") c.Validate(t, response, err) } @@ -91,13 +91,13 @@ func TestPodTemplateList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getPodTemplatesResoureName(), ns, ""), + Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, ""), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: podTemplateList}, } - response, err := c.Setup().PodTemplates(ns).List(labels.Everything(), fields.Everything()) + response, err := c.Setup(t).PodTemplates(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, response, err) } @@ -112,20 +112,20 @@ func TestPodTemplateUpdate(t *testing.T) { Template: api.PodTemplateSpec{}, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getPodTemplatesResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: podTemplate}, } - response, err := c.Setup().PodTemplates(ns).Update(podTemplate) + response, err := c.Setup(t).PodTemplates(ns).Update(podTemplate) c.Validate(t, response, err) } func TestPodTemplateDelete(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getPodTemplatesResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath(getPodTemplatesResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().PodTemplates(ns).Delete("foo", nil) + err := c.Setup(t).PodTemplates(ns).Delete("foo", nil) c.Validate(t, nil, err) } @@ -133,10 +133,10 @@ func TestPodTemplateWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", getPodTemplatesResoureName(), "", ""), + Path: testapi.Default.ResourcePathWithPrefix("watch", getPodTemplatesResoureName(), "", ""), Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().PodTemplates(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).PodTemplates(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/pods_test.go b/pkg/client/unversioned/pods_test.go index 280995e5739..ad28e089ddb 100644 --- a/pkg/client/unversioned/pods_test.go +++ b/pkg/client/unversioned/pods_test.go @@ -29,17 +29,17 @@ import ( func TestListEmptyPods(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.PodList{}}, } - podList, err := c.Setup().Pods(ns).List(labels.Everything(), fields.Everything()) + podList, err := c.Setup(t).Pods(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, podList, err) } func TestListPods(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.PodList{ Items: []api.Pod{ @@ -58,17 +58,17 @@ func TestListPods(t *testing.T) { }, }, } - receivedPodList, err := c.Setup().Pods(ns).List(labels.Everything(), fields.Everything()) + receivedPodList, err := c.Setup(t).Pods(ns).List(labels.Everything(), fields.Everything()) c.Validate(t, receivedPodList, err) } func TestListPodsLabels(t *testing.T) { ns := api.NamespaceDefault - labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Version()) + labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Default.Version()) c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("pods", ns, ""), + Path: testapi.Default.ResourcePath("pods", ns, ""), Query: buildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})}, Response: Response{ StatusCode: 200, @@ -89,7 +89,7 @@ func TestListPodsLabels(t *testing.T) { }, }, } - c.Setup() + c.Setup(t) c.QueryValidator[labelSelectorQueryParamName] = validateLabels selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() receivedPodList, err := c.Pods(ns).List(selector, fields.Everything()) @@ -99,7 +99,7 @@ func TestListPodsLabels(t *testing.T) { func TestGetPod(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.ResourcePath("pods", ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath("pods", ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, Body: &api.Pod{ @@ -115,14 +115,14 @@ func TestGetPod(t *testing.T) { }, }, } - receivedPod, err := c.Setup().Pods(ns).Get("foo") + receivedPod, err := c.Setup(t).Pods(ns).Get("foo") c.Validate(t, receivedPod, err) } func TestGetPodWithNoName(t *testing.T) { ns := api.NamespaceDefault c := &testClient{Error: true} - receivedPod, err := c.Setup().Pods(ns).Get("") + receivedPod, err := c.Setup(t).Pods(ns).Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } @@ -133,10 +133,10 @@ func TestGetPodWithNoName(t *testing.T) { func TestDeletePod(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath("pods", ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath("pods", ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().Pods(ns).Delete("foo", nil) + err := c.Setup(t).Pods(ns).Delete("foo", nil) c.Validate(t, nil, err) } @@ -154,13 +154,13 @@ func TestCreatePod(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "POST", Path: testapi.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil), Body: requestPod}, + Request: testRequest{Method: "POST", Path: testapi.Default.ResourcePath("pods", ns, ""), Query: buildQueryValues(nil), Body: requestPod}, Response: Response{ StatusCode: 200, Body: requestPod, }, } - receivedPod, err := c.Setup().Pods(ns).Create(requestPod) + receivedPod, err := c.Setup(t).Pods(ns).Create(requestPod) c.Validate(t, receivedPod, err) } @@ -180,9 +180,9 @@ func TestUpdatePod(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath("pods", ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath("pods", ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: requestPod}, } - receivedPod, err := c.Setup().Pods(ns).Update(requestPod) + receivedPod, err := c.Setup(t).Pods(ns).Update(requestPod) c.Validate(t, receivedPod, err) } diff --git a/pkg/client/unversioned/replication_controllers_test.go b/pkg/client/unversioned/replication_controllers_test.go index c5a0cc15a94..6908f42a654 100644 --- a/pkg/client/unversioned/replication_controllers_test.go +++ b/pkg/client/unversioned/replication_controllers_test.go @@ -33,7 +33,7 @@ func TestListControllers(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getRCResourceName(), ns, ""), + Path: testapi.Default.ResourcePath(getRCResourceName(), ns, ""), }, Response: Response{StatusCode: 200, Body: &api.ReplicationControllerList{ @@ -55,7 +55,7 @@ func TestListControllers(t *testing.T) { }, }, } - receivedControllerList, err := c.Setup().ReplicationControllers(ns).List(labels.Everything()) + receivedControllerList, err := c.Setup(t).ReplicationControllers(ns).List(labels.Everything()) c.Validate(t, receivedControllerList, err) } @@ -63,7 +63,7 @@ func TestListControllers(t *testing.T) { func TestGetController(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.ResourcePath(getRCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, Body: &api.ReplicationController{ @@ -81,14 +81,14 @@ func TestGetController(t *testing.T) { }, }, } - receivedController, err := c.Setup().ReplicationControllers(ns).Get("foo") + receivedController, err := c.Setup(t).ReplicationControllers(ns).Get("foo") c.Validate(t, receivedController, err) } func TestGetControllerWithNoName(t *testing.T) { ns := api.NamespaceDefault c := &testClient{Error: true} - receivedPod, err := c.Setup().ReplicationControllers(ns).Get("") + receivedPod, err := c.Setup(t).ReplicationControllers(ns).Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } @@ -102,7 +102,7 @@ func TestUpdateController(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getRCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, Body: &api.ReplicationController{ @@ -120,17 +120,17 @@ func TestUpdateController(t *testing.T) { }, }, } - receivedController, err := c.Setup().ReplicationControllers(ns).Update(requestController) + receivedController, err := c.Setup(t).ReplicationControllers(ns).Update(requestController) c.Validate(t, receivedController, err) } func TestDeleteController(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getRCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().ReplicationControllers(ns).Delete("foo") + err := c.Setup(t).ReplicationControllers(ns).Delete("foo") c.Validate(t, nil, err) } @@ -140,7 +140,7 @@ func TestCreateController(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "foo"}, } c := &testClient{ - Request: testRequest{Method: "POST", Path: testapi.ResourcePath(getRCResourceName(), ns, ""), Body: requestController, Query: buildQueryValues(nil)}, + Request: testRequest{Method: "POST", Path: testapi.Default.ResourcePath(getRCResourceName(), ns, ""), Body: requestController, Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, Body: &api.ReplicationController{ @@ -158,6 +158,6 @@ func TestCreateController(t *testing.T) { }, }, } - receivedController, err := c.Setup().ReplicationControllers(ns).Create(requestController) + receivedController, err := c.Setup(t).ReplicationControllers(ns).Create(requestController) c.Validate(t, receivedController, err) } diff --git a/pkg/client/unversioned/request_test.go b/pkg/client/unversioned/request_test.go index fd4b9e24008..7fbe26c5556 100644 --- a/pkg/client/unversioned/request_test.go +++ b/pkg/client/unversioned/request_test.go @@ -47,7 +47,7 @@ import ( func TestRequestWithErrorWontChange(t *testing.T) { original := Request{ err: errors.New("test"), - apiVersion: testapi.Version(), + apiVersion: testapi.Default.Version(), } r := original changed := r.Param("foo", "bar"). @@ -269,7 +269,7 @@ func TestTransformResponse(t *testing.T) { {Response: &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(invalid))}, Data: invalid}, } for i, test := range testCases { - r := NewRequest(nil, "", uri, testapi.Version(), testapi.Codec()) + r := NewRequest(nil, "", uri, testapi.Default.Version(), testapi.Default.Codec()) if test.Response.Body == nil { test.Response.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) } @@ -407,7 +407,7 @@ func TestRequestWatch(t *testing.T) { }, { Request: &Request{ - codec: testapi.Codec(), + codec: testapi.Default.Codec(), client: clientFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{StatusCode: http.StatusForbidden}, nil }), @@ -420,7 +420,7 @@ func TestRequestWatch(t *testing.T) { }, { Request: &Request{ - codec: testapi.Codec(), + codec: testapi.Default.Codec(), client: clientFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{StatusCode: http.StatusUnauthorized}, nil }), @@ -433,11 +433,11 @@ func TestRequestWatch(t *testing.T) { }, { Request: &Request{ - codec: testapi.Codec(), + codec: testapi.Default.Codec(), client: clientFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnauthorized, - Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Codec(), &api.Status{ + Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &api.Status{ Status: api.StatusFailure, Reason: api.StatusReasonUnauthorized, })))), @@ -537,7 +537,7 @@ func TestRequestStream(t *testing.T) { client: clientFunc(func(req *http.Request) (*http.Response, error) { return &http.Response{ StatusCode: http.StatusUnauthorized, - Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Codec(), &api.Status{ + Body: ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), &api.Status{ Status: api.StatusFailure, Reason: api.StatusReasonUnauthorized, })))), @@ -629,7 +629,7 @@ func TestRequestUpgrade(t *testing.T) { Err: true, }, { - Request: NewRequest(nil, "", uri, testapi.Version(), testapi.Codec()), + Request: NewRequest(nil, "", uri, testapi.Default.Version(), testapi.Default.Codec()), Config: &Config{ Username: "u", Password: "p", @@ -638,7 +638,7 @@ func TestRequestUpgrade(t *testing.T) { Err: false, }, { - Request: NewRequest(nil, "", uri, testapi.Version(), testapi.Codec()), + Request: NewRequest(nil, "", uri, testapi.Default.Version(), testapi.Default.Codec()), Config: &Config{ BearerToken: "b", }, @@ -719,7 +719,7 @@ func TestDoRequestNewWay(t *testing.T) { Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345), }}}} - expectedBody, _ := testapi.Codec().Encode(expectedObj) + expectedBody, _ := testapi.Default.Codec().Encode(expectedObj) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(expectedBody), @@ -727,7 +727,7 @@ func TestDoRequestNewWay(t *testing.T) { } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() - c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Version(), Username: "user", Password: "pass"}) + c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Default.Version(), Username: "user", Password: "pass"}) obj, err := c.Verb("POST"). Prefix("foo", "bar"). Suffix("baz"). @@ -743,7 +743,7 @@ func TestDoRequestNewWay(t *testing.T) { } else if !api.Semantic.DeepDerivative(expectedObj, obj) { t.Errorf("Expected: %#v, got %#v", expectedObj, obj) } - requestURL := testapi.ResourcePathWithPrefix("foo/bar", "", "", "baz") + requestURL := testapi.Default.ResourcePathWithPrefix("foo/bar", "", "", "baz") requestURL += "?timeout=1s" fakeHandler.ValidateRequest(t, requestURL, "POST", &reqBody) if fakeHandler.RequestReceived.Header["Authorization"] == nil { @@ -767,7 +767,7 @@ func TestCheckRetryClosesBody(t *testing.T) { })) defer testServer.Close() - c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Version(), Username: "user", Password: "pass"}) + c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Default.Version(), Username: "user", Password: "pass"}) _, err := c.Verb("POST"). Prefix("foo", "bar"). Suffix("baz"). @@ -796,7 +796,7 @@ func BenchmarkCheckRetryClosesBody(t *testing.B) { })) defer testServer.Close() - c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Version(), Username: "user", Password: "pass"}) + c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Default.Version(), Username: "user", Password: "pass"}) r := c.Verb("POST"). Prefix("foo", "bar"). Suffix("baz"). @@ -811,20 +811,20 @@ func BenchmarkCheckRetryClosesBody(t *testing.B) { } func TestDoRequestNewWayReader(t *testing.T) { reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - reqBodyExpected, _ := testapi.Codec().Encode(reqObj) + reqBodyExpected, _ := testapi.Default.Codec().Encode(reqObj) expectedObj := &api.Service{Spec: api.ServiceSpec{Ports: []api.ServicePort{{ Protocol: "TCP", Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345), }}}} - expectedBody, _ := testapi.Codec().Encode(expectedObj) + expectedBody, _ := testapi.Default.Codec().Encode(expectedObj) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(expectedBody), T: t, } testServer := httptest.NewServer(&fakeHandler) - c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Version(), Username: "user", Password: "pass"}) + c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Default.Version(), Username: "user", Password: "pass"}) obj, err := c.Verb("POST"). Resource("bar"). Name("baz"). @@ -843,8 +843,8 @@ func TestDoRequestNewWayReader(t *testing.T) { t.Errorf("Expected: %#v, got %#v", expectedObj, obj) } tmpStr := string(reqBodyExpected) - requestURL := testapi.ResourcePathWithPrefix("foo", "bar", "", "baz") - requestURL += "?" + api.LabelSelectorQueryParam(testapi.Version()) + "=name%3Dfoo&timeout=1s" + requestURL := testapi.Default.ResourcePathWithPrefix("foo", "bar", "", "baz") + requestURL += "?" + api.LabelSelectorQueryParam(testapi.Default.Version()) + "=name%3Dfoo&timeout=1s" fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr) if fakeHandler.RequestReceived.Header["Authorization"] == nil { t.Errorf("Request is missing authorization header: %#v", *fakeHandler.RequestReceived) @@ -853,20 +853,20 @@ func TestDoRequestNewWayReader(t *testing.T) { func TestDoRequestNewWayObj(t *testing.T) { reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - reqBodyExpected, _ := testapi.Codec().Encode(reqObj) + reqBodyExpected, _ := testapi.Default.Codec().Encode(reqObj) expectedObj := &api.Service{Spec: api.ServiceSpec{Ports: []api.ServicePort{{ Protocol: "TCP", Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345), }}}} - expectedBody, _ := testapi.Codec().Encode(expectedObj) + expectedBody, _ := testapi.Default.Codec().Encode(expectedObj) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(expectedBody), T: t, } testServer := httptest.NewServer(&fakeHandler) - c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Version(), Username: "user", Password: "pass"}) + c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Default.Version(), Username: "user", Password: "pass"}) obj, err := c.Verb("POST"). Suffix("baz"). Name("bar"). @@ -885,8 +885,8 @@ func TestDoRequestNewWayObj(t *testing.T) { t.Errorf("Expected: %#v, got %#v", expectedObj, obj) } tmpStr := string(reqBodyExpected) - requestURL := testapi.ResourcePath("foo", "", "bar/baz") - requestURL += "?" + api.LabelSelectorQueryParam(testapi.Version()) + "=name%3Dfoo&timeout=1s" + requestURL := testapi.Default.ResourcePath("foo", "", "bar/baz") + requestURL += "?" + api.LabelSelectorQueryParam(testapi.Default.Version()) + "=name%3Dfoo&timeout=1s" fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr) if fakeHandler.RequestReceived.Header["Authorization"] == nil { t.Errorf("Request is missing authorization header: %#v", *fakeHandler.RequestReceived) @@ -895,7 +895,7 @@ func TestDoRequestNewWayObj(t *testing.T) { func TestDoRequestNewWayFile(t *testing.T) { reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - reqBodyExpected, err := testapi.Codec().Encode(reqObj) + reqBodyExpected, err := testapi.Default.Codec().Encode(reqObj) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -915,14 +915,14 @@ func TestDoRequestNewWayFile(t *testing.T) { Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345), }}}} - expectedBody, _ := testapi.Codec().Encode(expectedObj) + expectedBody, _ := testapi.Default.Codec().Encode(expectedObj) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(expectedBody), T: t, } testServer := httptest.NewServer(&fakeHandler) - c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Version(), Username: "user", Password: "pass"}) + c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Default.Version(), Username: "user", Password: "pass"}) wasCreated := true obj, err := c.Verb("POST"). Prefix("foo/bar", "baz"). @@ -942,7 +942,7 @@ func TestDoRequestNewWayFile(t *testing.T) { t.Errorf("expected object was not created") } tmpStr := string(reqBodyExpected) - requestURL := testapi.ResourcePathWithPrefix("foo/bar/baz", "", "", "") + requestURL := testapi.Default.ResourcePathWithPrefix("foo/bar/baz", "", "", "") requestURL += "?timeout=1s" fakeHandler.ValidateRequest(t, requestURL, "POST", &tmpStr) if fakeHandler.RequestReceived.Header["Authorization"] == nil { @@ -952,7 +952,7 @@ func TestDoRequestNewWayFile(t *testing.T) { func TestWasCreated(t *testing.T) { reqObj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - reqBodyExpected, err := testapi.Codec().Encode(reqObj) + reqBodyExpected, err := testapi.Default.Codec().Encode(reqObj) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -962,14 +962,14 @@ func TestWasCreated(t *testing.T) { Port: 12345, TargetPort: util.NewIntOrStringFromInt(12345), }}}} - expectedBody, _ := testapi.Codec().Encode(expectedObj) + expectedBody, _ := testapi.Default.Codec().Encode(expectedObj) fakeHandler := util.FakeHandler{ StatusCode: 201, ResponseBody: string(expectedBody), T: t, } testServer := httptest.NewServer(&fakeHandler) - c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Version(), Username: "user", Password: "pass"}) + c := NewOrDie(&Config{Host: testServer.URL, Version: testapi.Default.Version(), Username: "user", Password: "pass"}) wasCreated := false obj, err := c.Verb("PUT"). Prefix("foo/bar", "baz"). @@ -990,7 +990,7 @@ func TestWasCreated(t *testing.T) { } tmpStr := string(reqBodyExpected) - requestURL := testapi.ResourcePathWithPrefix("foo/bar/baz", "", "", "") + requestURL := testapi.Default.ResourcePathWithPrefix("foo/bar/baz", "", "", "") requestURL += "?timeout=1s" fakeHandler.ValidateRequest(t, requestURL, "PUT", &tmpStr) if fakeHandler.RequestReceived.Header["Authorization"] == nil { @@ -1196,7 +1196,7 @@ func TestWatch(t *testing.T) { s, err := New(&Config{ Host: testServer.URL, - Version: testapi.Version(), + Version: testapi.Default.Version(), Username: "user", Password: "pass", }) @@ -1246,7 +1246,7 @@ func TestStream(t *testing.T) { s, err := New(&Config{ Host: testServer.URL, - Version: testapi.Version(), + Version: testapi.Default.Version(), Username: "user", Password: "pass", }) diff --git a/pkg/client/unversioned/resource_quotas_test.go b/pkg/client/unversioned/resource_quotas_test.go index 72df0528e53..4c01070ae85 100644 --- a/pkg/client/unversioned/resource_quotas_test.go +++ b/pkg/client/unversioned/resource_quotas_test.go @@ -52,14 +52,14 @@ func TestResourceQuotaCreate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath(getResourceQuotasResoureName(), ns, ""), + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, ""), Query: buildQueryValues(nil), Body: resourceQuota, }, Response: Response{StatusCode: 200, Body: resourceQuota}, } - response, err := c.Setup().ResourceQuotas(ns).Create(resourceQuota) + response, err := c.Setup(t).ResourceQuotas(ns).Create(resourceQuota) c.Validate(t, response, err) } @@ -84,14 +84,14 @@ func TestResourceQuotaGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getResourceQuotasResoureName(), ns, "abc"), + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "abc"), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: resourceQuota}, } - response, err := c.Setup().ResourceQuotas(ns).Get("abc") + response, err := c.Setup(t).ResourceQuotas(ns).Get("abc") c.Validate(t, response, err) } @@ -108,13 +108,13 @@ func TestResourceQuotaList(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath(getResourceQuotasResoureName(), ns, ""), + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, ""), Query: buildQueryValues(nil), Body: nil, }, Response: Response{StatusCode: 200, Body: resourceQuotaList}, } - response, err := c.Setup().ResourceQuotas(ns).List(labels.Everything()) + response, err := c.Setup(t).ResourceQuotas(ns).List(labels.Everything()) c.Validate(t, response, err) } @@ -138,10 +138,10 @@ func TestResourceQuotaUpdate(t *testing.T) { }, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath(getResourceQuotasResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "abc"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: resourceQuota}, } - response, err := c.Setup().ResourceQuotas(ns).Update(resourceQuota) + response, err := c.Setup(t).ResourceQuotas(ns).Update(resourceQuota) c.Validate(t, response, err) } @@ -167,21 +167,21 @@ func TestResourceQuotaStatusUpdate(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "PUT", - Path: testapi.ResourcePath(getResourceQuotasResoureName(), ns, "abc") + "/status", + Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "abc") + "/status", Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: resourceQuota}, } - response, err := c.Setup().ResourceQuotas(ns).UpdateStatus(resourceQuota) + response, err := c.Setup(t).ResourceQuotas(ns).UpdateStatus(resourceQuota) c.Validate(t, response, err) } func TestResourceQuotaDelete(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath(getResourceQuotasResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath(getResourceQuotasResoureName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().ResourceQuotas(ns).Delete("foo") + err := c.Setup(t).ResourceQuotas(ns).Delete("foo") c.Validate(t, nil, err) } @@ -189,10 +189,10 @@ func TestResourceQuotaWatch(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("watch", getResourceQuotasResoureName(), "", ""), + Path: testapi.Default.ResourcePathWithPrefix("watch", getResourceQuotasResoureName(), "", ""), Query: url.Values{"resourceVersion": []string{}}}, Response: Response{StatusCode: 200}, } - _, err := c.Setup().ResourceQuotas(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") + _, err := c.Setup(t).ResourceQuotas(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), "") c.Validate(t, nil, err) } diff --git a/pkg/client/unversioned/restclient_test.go b/pkg/client/unversioned/restclient_test.go index 38b037f4086..942ccf5570e 100644 --- a/pkg/client/unversioned/restclient_test.go +++ b/pkg/client/unversioned/restclient_test.go @@ -35,8 +35,8 @@ func TestSetsCodec(t *testing.T) { Prefix string Codec runtime.Codec }{ - testapi.Version(): {false, "/api/" + testapi.Version() + "/", testapi.Codec()}, - "invalidVersion": {true, "", nil}, + testapi.Default.Version(): {false, "/api/" + testapi.Default.Version() + "/", testapi.Default.Codec()}, + "invalidVersion": {true, "", nil}, } for version, expected := range testCases { client, err := New(&Config{Host: "127.0.0.1", Version: version}) @@ -60,13 +60,13 @@ func TestSetsCodec(t *testing.T) { } func TestRESTClientRequires(t *testing.T) { - if _, err := RESTClientFor(&Config{Host: "127.0.0.1", Version: "", Codec: testapi.Codec()}); err == nil { + if _, err := RESTClientFor(&Config{Host: "127.0.0.1", Version: "", Codec: testapi.Default.Codec()}); err == nil { t.Errorf("unexpected non-error") } - if _, err := RESTClientFor(&Config{Host: "127.0.0.1", Version: testapi.Version()}); err == nil { + if _, err := RESTClientFor(&Config{Host: "127.0.0.1", Version: testapi.Default.Version()}); err == nil { t.Errorf("unexpected non-error") } - if _, err := RESTClientFor(&Config{Host: "127.0.0.1", Version: testapi.Version(), Codec: testapi.Codec()}); err != nil { + if _, err := RESTClientFor(&Config{Host: "127.0.0.1", Version: testapi.Default.Version(), Codec: testapi.Default.Codec()}); err != nil { t.Errorf("unexpected error: %v", err) } } @@ -79,17 +79,17 @@ func TestValidatesHostParameter(t *testing.T) { URL string Err bool }{ - {"127.0.0.1", "", "http://127.0.0.1/" + testapi.Version() + "/", false}, - {"127.0.0.1:8080", "", "http://127.0.0.1:8080/" + testapi.Version() + "/", false}, - {"foo.bar.com", "", "http://foo.bar.com/" + testapi.Version() + "/", false}, - {"http://host/prefix", "", "http://host/prefix/" + testapi.Version() + "/", false}, - {"http://host", "", "http://host/" + testapi.Version() + "/", false}, - {"http://host", "/", "http://host/" + testapi.Version() + "/", false}, - {"http://host", "/other", "http://host/other/" + testapi.Version() + "/", false}, + {"127.0.0.1", "", "http://127.0.0.1/" + testapi.Default.Version() + "/", false}, + {"127.0.0.1:8080", "", "http://127.0.0.1:8080/" + testapi.Default.Version() + "/", false}, + {"foo.bar.com", "", "http://foo.bar.com/" + testapi.Default.Version() + "/", false}, + {"http://host/prefix", "", "http://host/prefix/" + testapi.Default.Version() + "/", false}, + {"http://host", "", "http://host/" + testapi.Default.Version() + "/", false}, + {"http://host", "/", "http://host/" + testapi.Default.Version() + "/", false}, + {"http://host", "/other", "http://host/other/" + testapi.Default.Version() + "/", false}, {"host/server", "", "", true}, } for i, testCase := range testCases { - c, err := RESTClientFor(&Config{Host: testCase.Host, Prefix: testCase.Prefix, Version: testapi.Version(), Codec: testapi.Codec()}) + c, err := RESTClientFor(&Config{Host: testCase.Host, Prefix: testCase.Prefix, Version: testapi.Default.Version(), Codec: testapi.Default.Codec()}) switch { case err == nil && testCase.Err: t.Errorf("expected error but was nil") @@ -120,8 +120,8 @@ func TestDoRequestBearer(t *testing.T) { request, _ := http.NewRequest("GET", testServer.URL, nil) c, err := RESTClientFor(&Config{ Host: testServer.URL, - Version: testapi.Version(), - Codec: testapi.Codec(), + Version: testapi.Default.Version(), + Codec: testapi.Default.Codec(), BearerToken: "test", }) if err != nil { @@ -148,8 +148,8 @@ func TestDoRequestWithoutPassword(t *testing.T) { defer testServer.Close() c, err := RESTClientFor(&Config{ Host: testServer.URL, - Version: testapi.Version(), - Codec: testapi.Codec(), + Version: testapi.Default.Version(), + Codec: testapi.Default.Codec(), Username: "test", }) if err != nil { @@ -172,7 +172,7 @@ func TestDoRequestWithoutPassword(t *testing.T) { if body != nil { t.Errorf("Expected nil body, but saw: '%s'", string(body)) } - fakeHandler.ValidateRequest(t, "/"+testapi.Version()+"/test", "GET", nil) + fakeHandler.ValidateRequest(t, "/"+testapi.Default.Version()+"/test", "GET", nil) } func TestDoRequestSuccess(t *testing.T) { @@ -187,8 +187,8 @@ func TestDoRequestSuccess(t *testing.T) { defer testServer.Close() c, err := RESTClientFor(&Config{ Host: testServer.URL, - Version: testapi.Version(), - Codec: testapi.Codec(), + Version: testapi.Default.Version(), + Codec: testapi.Default.Codec(), Username: "user", Password: "pass", }) @@ -209,7 +209,7 @@ func TestDoRequestSuccess(t *testing.T) { if !reflect.DeepEqual(status, statusOut) { t.Errorf("Unexpected mis-match. Expected %#v. Saw %#v", status, statusOut) } - fakeHandler.ValidateRequest(t, "/"+testapi.Version()+"/test", "GET", nil) + fakeHandler.ValidateRequest(t, "/"+testapi.Default.Version()+"/test", "GET", nil) } func TestDoRequestFailed(t *testing.T) { @@ -230,8 +230,8 @@ func TestDoRequestFailed(t *testing.T) { defer testServer.Close() c, err := RESTClientFor(&Config{ Host: testServer.URL, - Version: testapi.Version(), - Codec: testapi.Codec(), + Version: testapi.Default.Version(), + Codec: testapi.Default.Codec(), }) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -262,8 +262,8 @@ func TestDoRequestCreated(t *testing.T) { defer testServer.Close() c, err := RESTClientFor(&Config{ Host: testServer.URL, - Version: testapi.Version(), - Codec: testapi.Codec(), + Version: testapi.Default.Version(), + Codec: testapi.Default.Codec(), Username: "user", Password: "pass", }) @@ -285,5 +285,5 @@ func TestDoRequestCreated(t *testing.T) { if !reflect.DeepEqual(status, statusOut) { t.Errorf("Unexpected mis-match. Expected %#v. Saw %#v", status, statusOut) } - fakeHandler.ValidateRequest(t, "/"+testapi.Version()+"/test", "GET", nil) + fakeHandler.ValidateRequest(t, "/"+testapi.Default.Version()+"/test", "GET", nil) } diff --git a/pkg/client/unversioned/services_test.go b/pkg/client/unversioned/services_test.go index e97f62d37a6..fb1e638f056 100644 --- a/pkg/client/unversioned/services_test.go +++ b/pkg/client/unversioned/services_test.go @@ -30,7 +30,7 @@ func TestListServices(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("services", ns, ""), + Path: testapi.Default.ResourcePath("services", ns, ""), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.ServiceList{ @@ -53,18 +53,18 @@ func TestListServices(t *testing.T) { }, }, } - receivedServiceList, err := c.Setup().Services(ns).List(labels.Everything()) + receivedServiceList, err := c.Setup(t).Services(ns).List(labels.Everything()) t.Logf("received services: %v %#v", err, receivedServiceList) c.Validate(t, receivedServiceList, err) } func TestListServicesLabels(t *testing.T) { ns := api.NamespaceDefault - labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Version()) + labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Default.Version()) c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("services", ns, ""), + Path: testapi.Default.ResourcePath("services", ns, ""), Query: buildQueryValues(url.Values{labelSelectorQueryParamName: []string{"foo=bar,name=baz"}})}, Response: Response{StatusCode: 200, Body: &api.ServiceList{ @@ -87,7 +87,7 @@ func TestListServicesLabels(t *testing.T) { }, }, } - c.Setup() + c.Setup(t) c.QueryValidator[labelSelectorQueryParamName] = validateLabels selector := labels.Set{"foo": "bar", "name": "baz"}.AsSelector() receivedServiceList, err := c.Services(ns).List(selector) @@ -99,18 +99,18 @@ func TestGetService(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePath("services", ns, "1"), + Path: testapi.Default.ResourcePath("services", ns, "1"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}}, } - response, err := c.Setup().Services(ns).Get("1") + response, err := c.Setup(t).Services(ns).Get("1") c.Validate(t, response, err) } func TestGetServiceWithNoName(t *testing.T) { ns := api.NamespaceDefault c := &testClient{Error: true} - receivedPod, err := c.Setup().Services(ns).Get("") + receivedPod, err := c.Setup(t).Services(ns).Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } @@ -123,12 +123,12 @@ func TestCreateService(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "POST", - Path: testapi.ResourcePath("services", ns, ""), + Path: testapi.Default.ResourcePath("services", ns, ""), Body: &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}, Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}}, } - response, err := c.Setup().Services(ns).Create(&api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}) + response, err := c.Setup(t).Services(ns).Create(&api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1"}}) c.Validate(t, response, err) } @@ -136,20 +136,20 @@ func TestUpdateService(t *testing.T) { ns := api.NamespaceDefault svc := &api.Service{ObjectMeta: api.ObjectMeta{Name: "service-1", ResourceVersion: "1"}} c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.ResourcePath("services", ns, "service-1"), Body: svc, Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Default.ResourcePath("services", ns, "service-1"), Body: svc, Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200, Body: svc}, } - response, err := c.Setup().Services(ns).Update(svc) + response, err := c.Setup(t).Services(ns).Update(svc) c.Validate(t, response, err) } func TestDeleteService(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.ResourcePath("services", ns, "1"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Default.ResourcePath("services", ns, "1"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup().Services(ns).Delete("1") + err := c.Setup(t).Services(ns).Delete("1") c.Validate(t, nil, err) } @@ -159,11 +159,11 @@ func TestServiceProxyGet(t *testing.T) { c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.ResourcePathWithPrefix("proxy", "services", ns, "service-1") + "/foo", + Path: testapi.Default.ResourcePathWithPrefix("proxy", "services", ns, "service-1") + "/foo", Query: buildQueryValues(url.Values{"param-name": []string{"param-value"}}), }, Response: Response{StatusCode: 200, RawBody: &body}, } - response, err := c.Setup().Services(ns).ProxyGet("service-1", "foo", map[string]string{"param-name": "param-value"}).DoRaw() + response, err := c.Setup(t).Services(ns).ProxyGet("service-1", "foo", map[string]string{"param-name": "param-value"}).DoRaw() c.ValidateRaw(t, response, err) } diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go b/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go index 276dcff2c84..1147cc3087c 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go +++ b/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go @@ -80,7 +80,7 @@ func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httpte mkHandler := func(url string, response serverResponse) *util.FakeHandler { handler := util.FakeHandler{ StatusCode: response.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), response.obj.(runtime.Object)), + ResponseBody: runtime.EncodeOrDie(testapi.Experimental.Codec(), response.obj.(runtime.Object)), } mux.Handle(url, &handler) glog.Infof("Will handle %s", url) @@ -176,8 +176,8 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { }) defer testServer.Close() - kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) - expClient := client.NewExperimentalOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()}) + expClient := client.NewExperimentalOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()}) fakeRC := fakeResourceConsumptionClient{metrics: map[api.ResourceName]expapi.ResourceConsumption{ api.ResourceCPU: {Resource: api.ResourceCPU, Quantity: resource.MustParse("650m")}, diff --git a/pkg/controller/autoscaler/metrics/metrics_client_test.go b/pkg/controller/autoscaler/metrics/metrics_client_test.go index 22277e8806a..155842965cf 100644 --- a/pkg/controller/autoscaler/metrics/metrics_client_test.go +++ b/pkg/controller/autoscaler/metrics/metrics_client_test.go @@ -57,7 +57,7 @@ func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httpte mkHandler := func(url string, response serverResponse) *util.FakeHandler { handler := util.FakeHandler{ StatusCode: response.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), response.obj.(runtime.Object)), + ResponseBody: runtime.EncodeOrDie(testapi.Experimental.Codec(), response.obj.(runtime.Object)), } mux.Handle(url, &handler) glog.Infof("Will handle %s", url) @@ -119,7 +119,7 @@ func TestHeapsterResourceConsumptionGet(t *testing.T) { }) defer testServer.Close() - kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()}) metricsClient := NewHeapsterMetricsClient(kubeClient) diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index f7fa45449f8..ef6d69036df 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -48,7 +48,7 @@ func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectat func newReplicationController(replicas int) *api.ReplicationController { rc := &api.ReplicationController{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, ObjectMeta: api.ObjectMeta{ UID: util.NewUUID(), Name: "foobar", @@ -181,14 +181,14 @@ func TestControllerExpectations(t *testing.T) { func TestCreateReplica(t *testing.T) { ns := api.NamespaceDefault - body := runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}}) + body := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}}) fakeHandler := util.FakeHandler{ StatusCode: 200, ResponseBody: string(body), } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) podControl := RealPodControl{ KubeClient: client, @@ -207,7 +207,7 @@ func TestCreateReplica(t *testing.T) { }, Spec: controllerSpec.Spec.Template.Spec, } - fakeHandler.ValidateRequest(t, testapi.ResourcePath("pods", api.NamespaceDefault, ""), "POST", nil) + fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", api.NamespaceDefault, ""), "POST", nil) actualPod, err := client.Codec.Decode([]byte(fakeHandler.RequestBody)) if err != nil { t.Errorf("Unexpected error: %#v", err) diff --git a/pkg/controller/endpoint/endpoints_controller_test.go b/pkg/controller/endpoint/endpoints_controller_test.go index a80cec37268..49314368ffd 100644 --- a/pkg/controller/endpoint/endpoints_controller_test.go +++ b/pkg/controller/endpoint/endpoints_controller_test.go @@ -35,7 +35,7 @@ import ( func addPods(store cache.Store, namespace string, nPods int, nPorts int) { for i := 0; i < nPods; i++ { p := &api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, ObjectMeta: api.ObjectMeta{ Namespace: namespace, Name: fmt.Sprintf("pod%d", i), @@ -156,11 +156,11 @@ type serverResponse struct { func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResponse) (*httptest.Server, *util.FakeHandler) { fakeEndpointsHandler := util.FakeHandler{ StatusCode: endpointsResponse.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), endpointsResponse.obj.(runtime.Object)), + ResponseBody: runtime.EncodeOrDie(testapi.Default.Codec(), endpointsResponse.obj.(runtime.Object)), } mux := http.NewServeMux() - mux.Handle(testapi.ResourcePath("endpoints", namespace, ""), &fakeEndpointsHandler) - mux.Handle(testapi.ResourcePath("endpoints/", namespace, ""), &fakeEndpointsHandler) + mux.Handle(testapi.Default.ResourcePath("endpoints", namespace, ""), &fakeEndpointsHandler) + mux.Handle(testapi.Default.ResourcePath("endpoints/", namespace, ""), &fakeEndpointsHandler) mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) { t.Errorf("unexpected request: %v", req.RequestURI) res.WriteHeader(http.StatusNotFound) @@ -183,7 +183,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { }}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, @@ -215,7 +215,7 @@ func TestCheckLeftoverEndpoints(t *testing.T) { }}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) endpoints.checkLeftoverEndpoints() @@ -243,7 +243,7 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) { }}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, @@ -271,7 +271,7 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) { }}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) endpoints.serviceStore.Store.Add(&api.Service{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, @@ -296,7 +296,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { Subsets: []api.EndpointSubset{}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, ns, 1, 1) endpoints.serviceStore.Store.Add(&api.Service{ @@ -307,7 +307,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { }, }) endpoints.syncService(ns + "/foo") - data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, @@ -318,7 +318,7 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }) - endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, "foo"), "PUT", &data) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) } func TestSyncEndpointsItemsPreexisting(t *testing.T) { @@ -336,7 +336,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) { }}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, ns, 1, 1) endpoints.serviceStore.Store.Add(&api.Service{ @@ -347,7 +347,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) { }, }) endpoints.syncService(ns + "/foo") - data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, @@ -358,7 +358,7 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) { Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }) - endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, "foo"), "PUT", &data) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) } func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { @@ -376,7 +376,7 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { }}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1) endpoints.serviceStore.Store.Add(&api.Service{ @@ -387,7 +387,7 @@ func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { }, }) endpoints.syncService(ns + "/foo") - endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil) } func TestSyncEndpointsItems(t *testing.T) { @@ -395,7 +395,7 @@ func TestSyncEndpointsItems(t *testing.T) { testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{}}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, ns, 3, 2) addPods(endpoints.podStore.Store, "blah", 5, 2) // make sure these aren't found! @@ -421,7 +421,7 @@ func TestSyncEndpointsItems(t *testing.T) { {Name: "port1", Port: 8088, Protocol: "TCP"}, }, }} - data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "", }, @@ -429,7 +429,7 @@ func TestSyncEndpointsItems(t *testing.T) { }) // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". endpointsHandler.ValidateRequestCount(t, 2) - endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, ""), "POST", &data) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, ""), "POST", &data) } func TestSyncEndpointsItemsWithLabels(t *testing.T) { @@ -437,7 +437,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) { testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{}}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, ns, 3, 2) serviceLabels := map[string]string{"foo": "bar"} @@ -467,7 +467,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) { {Name: "port1", Port: 8088, Protocol: "TCP"}, }, }} - data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "", Labels: serviceLabels, @@ -476,7 +476,7 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) { }) // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". endpointsHandler.ValidateRequestCount(t, 2) - endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, ""), "POST", &data) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, ""), "POST", &data) } func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { @@ -497,7 +497,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { }}, }}) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) endpoints := NewEndpointController(client) addPods(endpoints.podStore.Store, ns, 1, 1) serviceLabels := map[string]string{"baz": "blah"} @@ -513,7 +513,7 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { }, }) endpoints.syncService(ns + "/foo") - data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ + data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: ns, @@ -525,5 +525,5 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }) - endpointsHandler.ValidateRequest(t, testapi.ResourcePath("endpoints", ns, "foo"), "PUT", &data) + endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) } diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index 014c82f5004..5376a69bf09 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -95,7 +95,7 @@ func getKey(rc *api.ReplicationController, t *testing.T) string { func newReplicationController(replicas int) *api.ReplicationController { rc := &api.ReplicationController{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, ObjectMeta: api.ObjectMeta{ UID: util.NewUUID(), Name: "foobar", @@ -176,24 +176,24 @@ type serverResponse struct { func makeTestServer(t *testing.T, namespace, name string, podResponse, controllerResponse, updateResponse serverResponse) (*httptest.Server, *util.FakeHandler) { fakePodHandler := util.FakeHandler{ StatusCode: podResponse.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), podResponse.obj.(runtime.Object)), + ResponseBody: runtime.EncodeOrDie(testapi.Default.Codec(), podResponse.obj.(runtime.Object)), } fakeControllerHandler := util.FakeHandler{ StatusCode: controllerResponse.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), controllerResponse.obj.(runtime.Object)), + ResponseBody: runtime.EncodeOrDie(testapi.Default.Codec(), controllerResponse.obj.(runtime.Object)), } fakeUpdateHandler := util.FakeHandler{ StatusCode: updateResponse.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), updateResponse.obj.(runtime.Object)), + ResponseBody: runtime.EncodeOrDie(testapi.Default.Codec(), updateResponse.obj.(runtime.Object)), } mux := http.NewServeMux() - mux.Handle(testapi.ResourcePath("pods", namespace, ""), &fakePodHandler) - mux.Handle(testapi.ResourcePath(replicationControllerResourceName(), "", ""), &fakeControllerHandler) + mux.Handle(testapi.Default.ResourcePath("pods", namespace, ""), &fakePodHandler) + mux.Handle(testapi.Default.ResourcePath(replicationControllerResourceName(), "", ""), &fakeControllerHandler) if namespace != "" { - mux.Handle(testapi.ResourcePath(replicationControllerResourceName(), namespace, ""), &fakeControllerHandler) + mux.Handle(testapi.Default.ResourcePath(replicationControllerResourceName(), namespace, ""), &fakeControllerHandler) } if name != "" { - mux.Handle(testapi.ResourcePath(replicationControllerResourceName(), namespace, name), &fakeUpdateHandler) + mux.Handle(testapi.Default.ResourcePath(replicationControllerResourceName(), namespace, name), &fakeUpdateHandler) } mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) { t.Errorf("unexpected request: %v", req.RequestURI) @@ -219,7 +219,7 @@ func startManagerAndWait(manager *ReplicationManager, pods int, t *testing.T) ch } func TestSyncReplicationControllerDoesNothing(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady @@ -235,7 +235,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) { } func TestSyncReplicationControllerDeletes(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady @@ -251,7 +251,7 @@ func TestSyncReplicationControllerDeletes(t *testing.T) { } func TestDeleteFinalStateUnknown(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady @@ -284,7 +284,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) { } func TestSyncReplicationControllerCreates(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady @@ -306,7 +306,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady @@ -328,15 +328,15 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) { // This response body is just so we don't err out decoding the http response, all // we care about is the request body sent below. - response := runtime.EncodeOrDie(testapi.Codec(), &api.ReplicationController{}) + response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) fakeHandler.ResponseBody = response rc.Generation = rc.Generation + 1 manager.syncReplicationController(getKey(rc, t)) rc.Status.ObservedGeneration = rc.Generation - updatedRc := runtime.EncodeOrDie(testapi.Codec(), rc) - fakeHandler.ValidateRequest(t, testapi.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &updatedRc) + updatedRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) + fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &updatedRc) } func TestControllerUpdateReplicas(t *testing.T) { @@ -348,7 +348,7 @@ func TestControllerUpdateReplicas(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady @@ -361,7 +361,7 @@ func TestControllerUpdateReplicas(t *testing.T) { newPodList(manager.podStore.Store, 4, api.PodRunning, rc) // This response body is just so we don't err out decoding the http response - response := runtime.EncodeOrDie(testapi.Codec(), &api.ReplicationController{}) + response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) fakeHandler.ResponseBody = response fakePodControl := FakePodControl{} @@ -373,8 +373,8 @@ func TestControllerUpdateReplicas(t *testing.T) { // 2. Every update to the status should include the Generation of the spec. rc.Status = api.ReplicationControllerStatus{Replicas: 4, ObservedGeneration: 1} - decRc := runtime.EncodeOrDie(testapi.Codec(), rc) - fakeHandler.ValidateRequest(t, testapi.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &decRc) + decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) + fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name), "PUT", &decRc) validateSyncReplication(t, &fakePodControl, 1, 0) } @@ -386,7 +386,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) { } testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, BurstReplicas) @@ -435,7 +435,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) { } func TestPodControllerLookup(t *testing.T) { - manager := NewReplicationManager(client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}), BurstReplicas) + manager := NewReplicationManager(client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}), BurstReplicas) manager.podStoreSynced = alwaysReady testCases := []struct { inRCs []*api.ReplicationController @@ -649,7 +649,7 @@ func TestControllerUpdateRequeue(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()}) manager := NewReplicationManager(client, BurstReplicas) manager.podStoreSynced = alwaysReady @@ -729,7 +729,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { } func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, burstReplicas) manager.podStoreSynced = alwaysReady @@ -849,7 +849,7 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool { // TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods // and checking expectations. func TestRCSyncExpectations(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, 2) manager.podStoreSynced = alwaysReady @@ -874,7 +874,7 @@ func TestRCSyncExpectations(t *testing.T) { } func TestDeleteControllerAndExpectations(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) manager := NewReplicationManager(client, 10) manager.podStoreSynced = alwaysReady @@ -916,7 +916,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) { } func TestRCManagerNotReady(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) fakePodControl := FakePodControl{} manager := NewReplicationManager(client, 2) manager.podControl = &fakePodControl @@ -953,7 +953,7 @@ func shuffle(controllers []*api.ReplicationController) []*api.ReplicationControl } func TestOverlappingRCs(t *testing.T) { - client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.Version()}) for i := 0; i < 5; i++ { manager := NewReplicationManager(client, 10) diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller_test.go b/pkg/controller/serviceaccount/serviceaccounts_controller_test.go index fe59258c98c..e1525bd8aa7 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller_test.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller_test.go @@ -36,11 +36,11 @@ type serverResponse struct { func makeTestServer(t *testing.T, namespace string, serviceAccountResponse serverResponse) (*httptest.Server, *util.FakeHandler) { fakeServiceAccountsHandler := util.FakeHandler{ StatusCode: serviceAccountResponse.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), serviceAccountResponse.obj.(runtime.Object)), + ResponseBody: runtime.EncodeOrDie(testapi.Default.Codec(), serviceAccountResponse.obj.(runtime.Object)), } mux := http.NewServeMux() - mux.Handle(testapi.ResourcePath("serviceAccounts", namespace, ""), &fakeServiceAccountsHandler) + mux.Handle(testapi.Default.ResourcePath("serviceAccounts", namespace, ""), &fakeServiceAccountsHandler) mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) { t.Errorf("unexpected request: %v", req.RequestURI) res.WriteHeader(http.StatusNotFound) diff --git a/pkg/kubectl/cmd/annotate_test.go b/pkg/kubectl/cmd/annotate_test.go index 53c407c1d9e..ea45b030d06 100644 --- a/pkg/kubectl/cmd/annotate_test.go +++ b/pkg/kubectl/cmd/annotate_test.go @@ -391,7 +391,7 @@ func TestAnnotateErrors(t *testing.T) { f, tf, _ := NewAPIFactory() tf.Printer = &testPrinter{} tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} buf := bytes.NewBuffer([]byte{}) cmd := NewCmdAnnotate(f, buf) @@ -447,7 +447,7 @@ func TestAnnotateObject(t *testing.T) { }), } tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} options := &AnnotateOptions{} args := []string{"pods/foo", "a=b", "c-"} @@ -494,7 +494,7 @@ func TestAnnotateObjectFromFile(t *testing.T) { }), } tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} options := &AnnotateOptions{} options.filenames = []string{"../../../examples/cassandra/cassandra.yaml"} @@ -544,7 +544,7 @@ func TestAnnotateMultipleObjects(t *testing.T) { }), } tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} options := &AnnotateOptions{} options.all = true diff --git a/pkg/kubectl/cmd/attach_test.go b/pkg/kubectl/cmd/attach_test.go index 7e8328ba4ed..b7d336cd06f 100644 --- a/pkg/kubectl/cmd/attach_test.go +++ b/pkg/kubectl/cmd/attach_test.go @@ -105,7 +105,7 @@ func TestPodAndContainerAttach(t *testing.T) { } func TestAttach(t *testing.T) { - version := testapi.Version() + version := testapi.Default.Version() tests := []struct { name, version, podPath, attachPath, container string pod *api.Pod diff --git a/pkg/kubectl/cmd/cmd_test.go b/pkg/kubectl/cmd/cmd_test.go index acf4fae94a1..681af5ee3cf 100644 --- a/pkg/kubectl/cmd/cmd_test.go +++ b/pkg/kubectl/cmd/cmd_test.go @@ -79,10 +79,10 @@ func newExternalScheme() (*runtime.Scheme, meta.RESTMapper, runtime.Codec) { scheme.AddKnownTypeWithName("", "Type", &internalType{}) scheme.AddKnownTypeWithName("unlikelyversion", "Type", &externalType{}) //This tests that kubectl will not confuse the external scheme with the internal scheme, even when they accidentally have versions of the same name. - scheme.AddKnownTypeWithName(testapi.Version(), "Type", &ExternalType2{}) + scheme.AddKnownTypeWithName(testapi.Default.Version(), "Type", &ExternalType2{}) codec := runtime.CodecFor(scheme, "unlikelyversion") - validVersion := testapi.Version() + validVersion := testapi.Default.Version() mapper := meta.NewDefaultRESTMapper("apitest", []string{"unlikelyversion", validVersion}, func(version string) (*meta.VersionInterfaces, error) { return &meta.VersionInterfaces{ Codec: runtime.CodecFor(scheme, version), @@ -228,7 +228,7 @@ func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { generator, ok := generators[name] return generator, ok }, - }, t, testapi.Codec() + }, t, testapi.Default.Codec() } func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser { @@ -245,7 +245,7 @@ func stringBody(body string) io.ReadCloser { //func TestClientVersions(t *testing.T) { // f := cmdutil.NewFactory(nil) // -// version := testapi.Version() +// version := testapi.Default.Version() // mapping := &meta.RESTMapping{ // APIVersion: version, // } diff --git a/pkg/kubectl/cmd/config/config_test.go b/pkg/kubectl/cmd/config/config_test.go index d95762848ac..3fe325e5e04 100644 --- a/pkg/kubectl/cmd/config/config_test.go +++ b/pkg/kubectl/cmd/config/config_test.go @@ -577,13 +577,13 @@ func TestNewEmptyCluster(t *testing.T) { func TestAdditionalCluster(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() cluster := clientcmdapi.NewCluster() - cluster.APIVersion = testapi.Version() + cluster.APIVersion = testapi.Default.Version() cluster.CertificateAuthority = "/ca-location" cluster.InsecureSkipTLSVerify = false cluster.Server = "serverlocation" expectedConfig.Clusters["different-cluster"] = cluster test := configCommandTest{ - args: []string{"set-cluster", "different-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation", "--" + clientcmd.FlagInsecure + "=false", "--" + clientcmd.FlagCAFile + "=/ca-location", "--" + clientcmd.FlagAPIVersion + "=" + testapi.Version()}, + args: []string{"set-cluster", "different-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation", "--" + clientcmd.FlagInsecure + "=false", "--" + clientcmd.FlagCAFile + "=/ca-location", "--" + clientcmd.FlagAPIVersion + "=" + testapi.Default.Version()}, startingConfig: newRedFederalCowHammerConfig(), expectedConfig: expectedConfig, } diff --git a/pkg/kubectl/cmd/delete_test.go b/pkg/kubectl/cmd/delete_test.go index f12124440f8..8be48805d0a 100644 --- a/pkg/kubectl/cmd/delete_test.go +++ b/pkg/kubectl/cmd/delete_test.go @@ -416,12 +416,12 @@ func TestDeleteMultipleSelector(t *testing.T) { Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == "/namespaces/test/pods" && m == "GET": - if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Version())) != "a=b" { + if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Default.Version())) != "a=b" { t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) } return &http.Response{StatusCode: 200, Body: objBody(codec, pods)}, nil case p == "/namespaces/test/services" && m == "GET": - if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Version())) != "a=b" { + if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Default.Version())) != "a=b" { t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) } return &http.Response{StatusCode: 200, Body: objBody(codec, svc)}, nil diff --git a/pkg/kubectl/cmd/exec_test.go b/pkg/kubectl/cmd/exec_test.go index 3cf18afee69..34b147718a8 100644 --- a/pkg/kubectl/cmd/exec_test.go +++ b/pkg/kubectl/cmd/exec_test.go @@ -129,7 +129,7 @@ func TestPodAndContainer(t *testing.T) { } func TestExec(t *testing.T) { - version := testapi.Version() + version := testapi.Default.Version() tests := []struct { name, version, podPath, execPath, container string pod *api.Pod diff --git a/pkg/kubectl/cmd/get_test.go b/pkg/kubectl/cmd/get_test.go index fab0093dcfc..b15d6ec67e3 100644 --- a/pkg/kubectl/cmd/get_test.go +++ b/pkg/kubectl/cmd/get_test.go @@ -176,14 +176,14 @@ func TestGetUnknownSchemaObjectListGeneric(t *testing.T) { rcVersion: latest.Version, // see expected behavior 3b }, "handles common version": { - outputVersion: testapi.Version(), - listVersion: testapi.Version(), + outputVersion: testapi.Default.Version(), + listVersion: testapi.Default.Version(), testtypeVersion: "unlikelyversion", - rcVersion: testapi.Version(), + rcVersion: testapi.Default.Version(), }, } for k, test := range testCases { - apiCodec := runtime.CodecFor(api.Scheme, testapi.Version()) + apiCodec := runtime.CodecFor(api.Scheme, testapi.Default.Version()) regularClient := &client.FakeRESTClient{ Codec: apiCodec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { @@ -440,7 +440,7 @@ func TestGetMultipleTypeObjectsAsList(t *testing.T) { }), } tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} buf := bytes.NewBuffer([]byte{}) cmd := NewCmdGet(f, buf) @@ -488,7 +488,7 @@ func TestGetMultipleTypeObjectsWithSelector(t *testing.T) { tf.Client = &client.FakeRESTClient{ Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { - if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Version())) != "a=b" { + if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Default.Version())) != "a=b" { t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) } switch req.URL.Path { @@ -624,7 +624,7 @@ func TestWatchSelector(t *testing.T) { tf.Client = &client.FakeRESTClient{ Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { - if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Version())) != "a=b" { + if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Default.Version())) != "a=b" { t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) } switch req.URL.Path { diff --git a/pkg/kubectl/cmd/label_test.go b/pkg/kubectl/cmd/label_test.go index 934e847963a..814aa605239 100644 --- a/pkg/kubectl/cmd/label_test.go +++ b/pkg/kubectl/cmd/label_test.go @@ -300,7 +300,7 @@ func TestLabelErrors(t *testing.T) { f, tf, _ := NewAPIFactory() tf.Printer = &testPrinter{} tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} buf := bytes.NewBuffer([]byte{}) cmd := NewCmdLabel(f, buf) @@ -353,7 +353,7 @@ func TestLabelForResourceFromFile(t *testing.T) { }), } tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} buf := bytes.NewBuffer([]byte{}) cmd := NewCmdLabel(f, buf) @@ -402,7 +402,7 @@ func TestLabelMultipleObjects(t *testing.T) { }), } tf.Namespace = "test" - tf.ClientConfig = &client.Config{Version: testapi.Version()} + tf.ClientConfig = &client.Config{Version: testapi.Default.Version()} buf := bytes.NewBuffer([]byte{}) cmd := NewCmdLabel(f, buf) diff --git a/pkg/kubectl/cmd/portforward_test.go b/pkg/kubectl/cmd/portforward_test.go index d2f3a16d8f1..05923191081 100644 --- a/pkg/kubectl/cmd/portforward_test.go +++ b/pkg/kubectl/cmd/portforward_test.go @@ -39,7 +39,7 @@ func (f *fakePortForwarder) ForwardPorts(req *client.Request, config *client.Con } func TestPortForward(t *testing.T) { - version := testapi.Version() + version := testapi.Default.Version() tests := []struct { name, version, podPath, pfPath, container string @@ -101,7 +101,7 @@ func TestPortForward(t *testing.T) { } func TestPortForwardWithPFlag(t *testing.T) { - version := testapi.Version() + version := testapi.Default.Version() tests := []struct { name, version, podPath, pfPath, container string diff --git a/pkg/kubectl/cmd/util/helpers_test.go b/pkg/kubectl/cmd/util/helpers_test.go index 0a9cd7459a0..2547c10fb0d 100644 --- a/pkg/kubectl/cmd/util/helpers_test.go +++ b/pkg/kubectl/cmd/util/helpers_test.go @@ -49,7 +49,7 @@ func TestMerge(t *testing.T) { Name: "foo", }, }, - fragment: fmt.Sprintf(`{ "apiVersion": "%s" }`, testapi.Version()), + fragment: fmt.Sprintf(`{ "apiVersion": "%s" }`, testapi.Default.Version()), expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -82,7 +82,7 @@ func TestMerge(t *testing.T) { }, }, }, - fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "containers": [ { "name": "c1", "image": "green-image" } ] } }`, testapi.Version()), + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "containers": [ { "name": "c1", "image": "green-image" } ] } }`, testapi.Default.Version()), expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -108,7 +108,7 @@ func TestMerge(t *testing.T) { Name: "foo", }, }, - fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "volumes": [ {"name": "v1"}, {"name": "v2"} ] } }`, testapi.Version()), + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "volumes": [ {"name": "v1"}, {"name": "v2"} ] } }`, testapi.Default.Version()), expected: &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -148,7 +148,7 @@ func TestMerge(t *testing.T) { obj: &api.Service{ Spec: api.ServiceSpec{}, }, - fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "ports": [ { "port": 0 } ] } }`, testapi.Version()), + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "ports": [ { "port": 0 } ] } }`, testapi.Default.Version()), expected: &api.Service{ Spec: api.ServiceSpec{ SessionAffinity: "None", @@ -171,7 +171,7 @@ func TestMerge(t *testing.T) { }, }, }, - fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "selector": { "version": "v2" } } }`, testapi.Version()), + fragment: fmt.Sprintf(`{ "apiVersion": "%s", "spec": { "selector": { "version": "v2" } } }`, testapi.Default.Version()), expected: &api.Service{ Spec: api.ServiceSpec{ SessionAffinity: "None", diff --git a/pkg/kubectl/resource/builder_test.go b/pkg/kubectl/resource/builder_test.go index d2921d63547..97322f3d5d1 100644 --- a/pkg/kubectl/resource/builder_test.go +++ b/pkg/kubectl/resource/builder_test.go @@ -512,7 +512,7 @@ func TestResourceByNameAndEmptySelector(t *testing.T) { func TestSelector(t *testing.T) { pods, svc := testData() - labelKey := api.LabelSelectorQueryParam(testapi.Version()) + labelKey := api.LabelSelectorQueryParam(testapi.Default.Version()) b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, pods), "/namespaces/test/services?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, svc), @@ -808,7 +808,7 @@ func TestSingularRootScopedObject(t *testing.T) { func TestListObject(t *testing.T) { pods, _ := testData() - labelKey := api.LabelSelectorQueryParam(testapi.Version()) + labelKey := api.LabelSelectorQueryParam(testapi.Default.Version()) b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, pods), })). @@ -841,7 +841,7 @@ func TestListObject(t *testing.T) { func TestListObjectWithDifferentVersions(t *testing.T) { pods, svc := testData() - labelKey := api.LabelSelectorQueryParam(testapi.Version()) + labelKey := api.LabelSelectorQueryParam(testapi.Default.Version()) obj, err := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, pods), "/namespaces/test/services?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, svc), diff --git a/pkg/kubectl/resource/helper_test.go b/pkg/kubectl/resource/helper_test.go index a1a443891ee..dcc1ede0160 100644 --- a/pkg/kubectl/resource/helper_test.go +++ b/pkg/kubectl/resource/helper_test.go @@ -34,7 +34,7 @@ import ( ) func objBody(obj runtime.Object) io.ReadCloser { - return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Codec(), obj)))) + return ioutil.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(testapi.Default.Codec(), obj)))) } // splitPath returns the segments for a URL path. @@ -93,7 +93,7 @@ func TestHelperDelete(t *testing.T) { } for _, test := range tests { client := &client.FakeRESTClient{ - Codec: testapi.Codec(), + Codec: testapi.Default.Codec(), Resp: test.Resp, Err: test.HttpErr, } @@ -184,7 +184,7 @@ func TestHelperCreate(t *testing.T) { } for i, test := range tests { client := &client.FakeRESTClient{ - Codec: testapi.Codec(), + Codec: testapi.Default.Codec(), Resp: test.Resp, Err: test.HttpErr, } @@ -193,13 +193,13 @@ func TestHelperCreate(t *testing.T) { } modifier := &Helper{ RESTClient: client, - Codec: testapi.Codec(), - Versioner: testapi.MetadataAccessor(), + Codec: testapi.Default.Codec(), + Versioner: testapi.Default.MetadataAccessor(), NamespaceScoped: true, } data := []byte{} if test.Object != nil { - data = []byte(runtime.EncodeOrDie(testapi.Codec(), test.Object)) + data = []byte(runtime.EncodeOrDie(testapi.Default.Codec(), test.Object)) } _, err := modifier.Create("bar", test.Modify, data) if (err != nil) != test.Err { @@ -218,7 +218,7 @@ func TestHelperCreate(t *testing.T) { t.Logf("got body: %s", string(body)) expect := []byte{} if test.ExpectObject != nil { - expect = []byte(runtime.EncodeOrDie(testapi.Codec(), test.ExpectObject)) + expect = []byte(runtime.EncodeOrDie(testapi.Default.Codec(), test.ExpectObject)) } if !reflect.DeepEqual(expect, body) { t.Errorf("%d: unexpected body: %s", i, string(body)) @@ -270,7 +270,7 @@ func TestHelperGet(t *testing.T) { } for _, test := range tests { client := &client.FakeRESTClient{ - Codec: testapi.Codec(), + Codec: testapi.Default.Codec(), Resp: test.Resp, Err: test.HttpErr, } @@ -331,7 +331,7 @@ func TestHelperList(t *testing.T) { t.Errorf("url doesn't contain name: %#v", req.URL) return false } - if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Version())) != labels.SelectorFromSet(labels.Set{"foo": "baz"}).String() { + if req.URL.Query().Get(api.LabelSelectorQueryParam(testapi.Default.Version())) != labels.SelectorFromSet(labels.Set{"foo": "baz"}).String() { t.Errorf("url doesn't contain query parameters: %#v", req.URL) return false } @@ -341,7 +341,7 @@ func TestHelperList(t *testing.T) { } for _, test := range tests { client := &client.FakeRESTClient{ - Codec: testapi.Codec(), + Codec: testapi.Default.Codec(), Resp: test.Resp, Err: test.HttpErr, } @@ -349,7 +349,7 @@ func TestHelperList(t *testing.T) { RESTClient: client, NamespaceScoped: true, } - obj, err := modifier.List("bar", testapi.Version(), labels.SelectorFromSet(labels.Set{"foo": "baz"})) + obj, err := modifier.List("bar", testapi.Default.Version(), labels.SelectorFromSet(labels.Set{"foo": "baz"})) if (err != nil) != test.Err { t.Errorf("unexpected error: %t %v", test.Err, err) } @@ -444,7 +444,7 @@ func TestHelperReplace(t *testing.T) { } for i, test := range tests { client := &client.FakeRESTClient{ - Codec: testapi.Codec(), + Codec: testapi.Default.Codec(), Resp: test.Resp, Err: test.HttpErr, } @@ -453,13 +453,13 @@ func TestHelperReplace(t *testing.T) { } modifier := &Helper{ RESTClient: client, - Codec: testapi.Codec(), - Versioner: testapi.MetadataAccessor(), + Codec: testapi.Default.Codec(), + Versioner: testapi.Default.MetadataAccessor(), NamespaceScoped: true, } data := []byte{} if test.Object != nil { - data = []byte(runtime.EncodeOrDie(testapi.Codec(), test.Object)) + data = []byte(runtime.EncodeOrDie(testapi.Default.Codec(), test.Object)) } _, err := modifier.Replace("bar", "foo", test.Overwrite, data) if (err != nil) != test.Err { @@ -478,7 +478,7 @@ func TestHelperReplace(t *testing.T) { t.Logf("got body: %s", string(body)) expect := []byte{} if test.ExpectObject != nil { - expect = []byte(runtime.EncodeOrDie(testapi.Codec(), test.ExpectObject)) + expect = []byte(runtime.EncodeOrDie(testapi.Default.Codec(), test.ExpectObject)) } if !reflect.DeepEqual(expect, body) { t.Errorf("%d: unexpected body: %s", i, string(body)) diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index 29d7721f088..4197ea98e84 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -49,7 +49,7 @@ func (ts *testStruct) IsAnAPIObject() {} func init() { api.Scheme.AddKnownTypes("", &testStruct{}) - api.Scheme.AddKnownTypes(testapi.Version(), &testStruct{}) + api.Scheme.AddKnownTypes(testapi.Default.Version(), &testStruct{}) } var testData = testStruct{ @@ -72,7 +72,7 @@ func TestVersionedPrinter(t *testing.T) { return nil }), api.Scheme, - testapi.Version(), + testapi.Default.Version(), ) if err := p.PrintObj(original, nil); err != nil { t.Errorf("unexpected error: %v", err) @@ -110,7 +110,7 @@ func TestPrinter(t *testing.T) { }, } emptyListTest := &api.PodList{} - testapi, err := api.Scheme.ConvertToVersion(podTest, testapi.Version()) + testapi, err := api.Scheme.ConvertToVersion(podTest, testapi.Default.Version()) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -183,7 +183,7 @@ func testPrinter(t *testing.T, printer ResourcePrinter, unmarshalFunc func(data } // Use real decode function to undo the versioning process. poutput = testStruct{} - err = runtime.YAMLDecoder(testapi.Codec()).DecodeInto(buf.Bytes(), &poutput) + err = runtime.YAMLDecoder(testapi.Default.Codec()).DecodeInto(buf.Bytes(), &poutput) if err != nil { t.Fatal(err) } @@ -204,7 +204,7 @@ func testPrinter(t *testing.T, printer ResourcePrinter, unmarshalFunc func(data } // Use real decode function to undo the versioning process. objOut = api.Pod{} - err = runtime.YAMLDecoder(testapi.Codec()).DecodeInto(buf.Bytes(), &objOut) + err = runtime.YAMLDecoder(testapi.Default.Codec()).DecodeInto(buf.Bytes(), &objOut) if err != nil { t.Fatal(err) } @@ -430,7 +430,7 @@ func TestTemplateStrings(t *testing.T) { t.Fatalf("tmpl fail: %v", err) } - printer := NewVersionedPrinter(p, api.Scheme, testapi.Version()) + printer := NewVersionedPrinter(p, api.Scheme, testapi.Default.Version()) for name, item := range table { buffer := &bytes.Buffer{} diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index ae38ceee888..4b6b6406e93 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -1008,7 +1008,7 @@ func TestUpdateExistingReplicationController(t *testing.T) { } func TestUpdateWithRetries(t *testing.T) { - codec := testapi.Codec() + codec := testapi.Default.Codec() grace := int64(30) rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{Name: "rc", @@ -1055,7 +1055,7 @@ func TestUpdateWithRetries(t *testing.T) { Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { - case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": + case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": update := updates[0] updates = updates[1:] // We should always get an update with a valid rc even when the get fails. The rc should always @@ -1068,7 +1068,7 @@ func TestUpdateWithRetries(t *testing.T) { delete(c.Spec.Selector, "baz") } return update, nil - case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "GET": + case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "GET": get := gets[0] gets = gets[1:] return get, nil @@ -1078,7 +1078,7 @@ func TestUpdateWithRetries(t *testing.T) { } }), } - clientConfig := &client.Config{Version: testapi.Version()} + clientConfig := &client.Config{Version: testapi.Default.Version()} client := client.NewOrDie(clientConfig) client.Client = fakeClient.Client @@ -1115,7 +1115,7 @@ func objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser { func TestAddDeploymentHash(t *testing.T) { buf := &bytes.Buffer{} - codec := testapi.Codec() + codec := testapi.Default.Codec() rc := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{Name: "rc"}, Spec: api.ReplicationControllerSpec{ @@ -1146,27 +1146,27 @@ func TestAddDeploymentHash(t *testing.T) { Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { - case p == testapi.ResourcePath("pods", "default", "") && m == "GET": + case p == testapi.Default.ResourcePath("pods", "default", "") && m == "GET": if req.URL.RawQuery != "labelSelector=foo%3Dbar" { t.Errorf("Unexpected query string: %s", req.URL.RawQuery) } return &http.Response{StatusCode: 200, Body: objBody(codec, podList)}, nil - case p == testapi.ResourcePath("pods", "default", "foo") && m == "PUT": + case p == testapi.Default.ResourcePath("pods", "default", "foo") && m == "PUT": seen.Insert("foo") obj := readOrDie(t, req, codec) podList.Items[0] = *(obj.(*api.Pod)) return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[0])}, nil - case p == testapi.ResourcePath("pods", "default", "bar") && m == "PUT": + case p == testapi.Default.ResourcePath("pods", "default", "bar") && m == "PUT": seen.Insert("bar") obj := readOrDie(t, req, codec) podList.Items[1] = *(obj.(*api.Pod)) return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[1])}, nil - case p == testapi.ResourcePath("pods", "default", "baz") && m == "PUT": + case p == testapi.Default.ResourcePath("pods", "default", "baz") && m == "PUT": seen.Insert("baz") obj := readOrDie(t, req, codec) podList.Items[2] = *(obj.(*api.Pod)) return &http.Response{StatusCode: 200, Body: objBody(codec, &podList.Items[2])}, nil - case p == testapi.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": + case p == testapi.Default.ResourcePath("replicationcontrollers", "default", "rc") && m == "PUT": updatedRc = true return &http.Response{StatusCode: 200, Body: objBody(codec, rc)}, nil default: @@ -1175,7 +1175,7 @@ func TestAddDeploymentHash(t *testing.T) { } }), } - clientConfig := &client.Config{Version: testapi.Version()} + clientConfig := &client.Config{Version: testapi.Default.Version()} client := client.NewOrDie(clientConfig) client.Client = fakeClient.Client diff --git a/pkg/kubelet/config/common_test.go b/pkg/kubelet/config/common_test.go index fcf16781bab..ccfb88effaa 100644 --- a/pkg/kubelet/config/common_test.go +++ b/pkg/kubelet/config/common_test.go @@ -54,7 +54,7 @@ func TestDecodeSinglePod(t *testing.T) { }}, }, } - json, err := testapi.Codec().Encode(pod) + json, err := testapi.Default.Codec().Encode(pod) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -70,7 +70,7 @@ func TestDecodeSinglePod(t *testing.T) { } for _, version := range registered.RegisteredVersions { - externalPod, err := testapi.Converter().ConvertToVersion(pod, version) + externalPod, err := testapi.Default.Converter().ConvertToVersion(pod, version) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -119,7 +119,7 @@ func TestDecodePodList(t *testing.T) { podList := &api.PodList{ Items: []api.Pod{*pod}, } - json, err := testapi.Codec().Encode(podList) + json, err := testapi.Default.Codec().Encode(podList) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -135,7 +135,7 @@ func TestDecodePodList(t *testing.T) { } for _, version := range registered.RegisteredVersions { - externalPodList, err := testapi.Converter().ConvertToVersion(podList, version) + externalPodList, err := testapi.Default.Converter().ConvertToVersion(podList, version) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/kubelet/config/file_test.go b/pkg/kubelet/config/file_test.go index 66ca28ba97e..94ad000d3f4 100644 --- a/pkg/kubelet/config/file_test.go +++ b/pkg/kubelet/config/file_test.go @@ -117,11 +117,11 @@ func TestReadPodsFromFile(t *testing.T) { for _, testCase := range testCases { func() { var versionedPod runtime.Object - err := testapi.Converter().Convert(&testCase.pod, &versionedPod) + err := testapi.Default.Converter().Convert(&testCase.pod, &versionedPod) if err != nil { t.Fatalf("%s: error in versioning the pod: %v", testCase.desc, err) } - fileContents, err := testapi.Codec().Encode(versionedPod) + fileContents, err := testapi.Default.Codec().Encode(versionedPod) if err != nil { t.Fatalf("%s: error in encoding the pod: %v", testCase.desc, err) } diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index 4279609414a..af700241621 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -67,7 +67,7 @@ func TestExtractInvalidPods(t *testing.T) { { desc: "Invalid volume name", pod: &api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, Spec: api.PodSpec{ Volumes: []api.Volume{{Name: "_INVALID_"}}, }, @@ -76,7 +76,7 @@ func TestExtractInvalidPods(t *testing.T) { { desc: "Duplicate volume names", pod: &api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, Spec: api.PodSpec{ Volumes: []api.Volume{{Name: "repeated"}, {Name: "repeated"}}, }, @@ -85,7 +85,7 @@ func TestExtractInvalidPods(t *testing.T) { { desc: "Unspecified container name", pod: &api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, Spec: api.PodSpec{ Containers: []api.Container{{Name: ""}}, }, @@ -94,7 +94,7 @@ func TestExtractInvalidPods(t *testing.T) { { desc: "Invalid container name", pod: &api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + TypeMeta: api.TypeMeta{APIVersion: testapi.Default.Version()}, Spec: api.PodSpec{ Containers: []api.Container{{Name: "_INVALID_"}}, }, @@ -252,11 +252,11 @@ func TestExtractPodsFromHTTP(t *testing.T) { for _, testCase := range testCases { var versionedPods runtime.Object - err := testapi.Converter().Convert(&testCase.pods, &versionedPods) + err := testapi.Default.Converter().Convert(&testCase.pods, &versionedPods) if err != nil { t.Fatalf("%s: error in versioning the pods: %s", testCase.desc, err) } - data, err := testapi.Codec().Encode(versionedPods) + data, err := testapi.Default.Codec().Encode(versionedPods) if err != nil { t.Fatalf("%s: error in encoding the pod: %v", testCase.desc, err) } @@ -288,7 +288,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { func TestURLWithHeader(t *testing.T) { pod := &api.Pod{ TypeMeta: api.TypeMeta{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), Kind: "Pod", }, ObjectMeta: api.ObjectMeta{ diff --git a/pkg/kubelet/container/ref_test.go b/pkg/kubelet/container/ref_test.go index 8f0f6f335d8..17c2a6f11dd 100644 --- a/pkg/kubelet/container/ref_test.go +++ b/pkg/kubelet/container/ref_test.go @@ -66,14 +66,14 @@ func TestGenerateContainerRef(t *testing.T) { okPod = api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), }, ObjectMeta: api.ObjectMeta{ Name: "ok", Namespace: "test-ns", UID: "bar", ResourceVersion: "42", - SelfLink: "/api/" + testapi.Version() + "/pods/foo", + SelfLink: "/api/" + testapi.Default.Version() + "/pods/foo", }, Spec: api.PodSpec{ Containers: []api.Container{ @@ -90,7 +90,7 @@ func TestGenerateContainerRef(t *testing.T) { noSelfLinkPod.Kind = "" noSelfLinkPod.APIVersion = "" noSelfLinkPod.ObjectMeta.SelfLink = "" - defaultedSelfLinkPod.ObjectMeta.SelfLink = "/api/" + testapi.Version() + "/pods/ok" + defaultedSelfLinkPod.ObjectMeta.SelfLink = "/api/" + testapi.Default.Version() + "/pods/ok" cases := []struct { name string @@ -107,7 +107,7 @@ func TestGenerateContainerRef(t *testing.T) { }, expected: &api.ObjectReference{ Kind: "Pod", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), Name: "ok", Namespace: "test-ns", UID: "bar", @@ -122,7 +122,7 @@ func TestGenerateContainerRef(t *testing.T) { container: &api.Container{}, expected: &api.ObjectReference{ Kind: "Pod", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), Name: "ok", Namespace: "test-ns", UID: "bar", @@ -146,7 +146,7 @@ func TestGenerateContainerRef(t *testing.T) { }, expected: &api.ObjectReference{ Kind: "Pod", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), Name: "ok", Namespace: "test-ns", UID: "bar", @@ -163,7 +163,7 @@ func TestGenerateContainerRef(t *testing.T) { }, expected: &api.ObjectReference{ Kind: "Pod", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), Name: "ok", Namespace: "test-ns", UID: "bar", diff --git a/pkg/kubelet/dockertools/manager_test.go b/pkg/kubelet/dockertools/manager_test.go index a3d65bf15b5..a6d54e71eba 100644 --- a/pkg/kubelet/dockertools/manager_test.go +++ b/pkg/kubelet/dockertools/manager_test.go @@ -449,7 +449,7 @@ func TestKillContainerInPodWithPreStop(t *testing.T) { }, {Name: "bar"}}}, } - podString, err := testapi.Codec().Encode(pod) + podString, err := testapi.Default.Codec().Encode(pod) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -2348,7 +2348,7 @@ func TestPodDependsOnPodIP(t *testing.T) { Name: "POD_IP", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "status.podIP", }, }, @@ -2369,7 +2369,7 @@ func TestPodDependsOnPodIP(t *testing.T) { Name: "POD_NAME", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "metadata.name", }, }, diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 816fed19923..b37f5145ea9 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -1227,7 +1227,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Name: "POD_NAME", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "metadata.name", }, }, @@ -1236,7 +1236,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Name: "POD_NAMESPACE", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "metadata.namespace", }, }, @@ -1245,7 +1245,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Name: "POD_IP", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "status.podIP", }, }, @@ -1273,7 +1273,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Name: "POD_NAME", ValueFrom: &api.EnvVarSource{ FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), FieldPath: "metadata.name", }, }, diff --git a/pkg/registry/controller/etcd/etcd_test.go b/pkg/registry/controller/etcd/etcd_test.go index 909e529963c..00e583c4db3 100644 --- a/pkg/registry/controller/etcd/etcd_test.go +++ b/pkg/registry/controller/etcd/etcd_test.go @@ -28,7 +28,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/daemon/etcd/etcd_test.go b/pkg/registry/daemon/etcd/etcd_test.go index ff289c2ddbc..5d2510a7bf6 100755 --- a/pkg/registry/daemon/etcd/etcd_test.go +++ b/pkg/registry/daemon/etcd/etcd_test.go @@ -29,7 +29,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/deployment/etcd/etcd_test.go b/pkg/registry/deployment/etcd/etcd_test.go index 9a3a2ac6206..0878d10883d 100755 --- a/pkg/registry/deployment/etcd/etcd_test.go +++ b/pkg/registry/deployment/etcd/etcd_test.go @@ -29,7 +29,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/endpoint/etcd/etcd_test.go b/pkg/registry/endpoint/etcd/etcd_test.go index cff15c74962..7ec7e3e106a 100644 --- a/pkg/registry/endpoint/etcd/etcd_test.go +++ b/pkg/registry/endpoint/etcd/etcd_test.go @@ -28,7 +28,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") return NewREST(etcdStorage, false), fakeClient } diff --git a/pkg/registry/event/etcd/etcd_test.go b/pkg/registry/event/etcd/etcd_test.go index fcc46e8e18d..6bc6fd6af07 100644 --- a/pkg/registry/event/etcd/etcd_test.go +++ b/pkg/registry/event/etcd/etcd_test.go @@ -28,7 +28,7 @@ import ( var testTTL uint64 = 60 func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") fakeClient.HideExpires = true return NewREST(etcdStorage, testTTL), fakeClient } diff --git a/pkg/registry/event/strategy_test.go b/pkg/registry/event/strategy_test.go index 797185d799d..03ebd771db2 100644 --- a/pkg/registry/event/strategy_test.go +++ b/pkg/registry/event/strategy_test.go @@ -48,7 +48,7 @@ func TestGetAttrs(t *testing.T) { Name: "foo", Namespace: "baz", UID: "long uid string", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), ResourceVersion: "0", FieldPath: "", }, @@ -68,7 +68,7 @@ func TestGetAttrs(t *testing.T) { "involvedObject.name": "foo", "involvedObject.namespace": "baz", "involvedObject.uid": "long uid string", - "involvedObject.apiVersion": testapi.Version(), + "involvedObject.apiVersion": testapi.Default.Version(), "involvedObject.resourceVersion": "0", "involvedObject.fieldPath": "", "reason": "ForTesting", diff --git a/pkg/registry/experimental/controller/etcd/etcd_test.go b/pkg/registry/experimental/controller/etcd/etcd_test.go index fa09096ae6f..192da9dfba1 100644 --- a/pkg/registry/experimental/controller/etcd/etcd_test.go +++ b/pkg/registry/experimental/controller/etcd/etcd_test.go @@ -30,7 +30,7 @@ import ( ) func newStorage(t *testing.T) (*ScaleREST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") return NewStorage(etcdStorage).Scale, fakeClient } @@ -82,7 +82,7 @@ func TestGet(t *testing.T) { ctx := api.WithNamespace(api.NewContext(), "test") key := etcdtest.AddPrefix("/controllers/test/foo") - if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &validController), 0); err != nil { + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Experimental.Codec(), &validController), 0); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -102,7 +102,7 @@ func TestUpdate(t *testing.T) { ctx := api.WithNamespace(api.NewContext(), "test") key := etcdtest.AddPrefix("/controllers/test/foo") - if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &validController), 0); err != nil { + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Experimental.Codec(), &validController), 0); err != nil { t.Fatalf("unexpected error: %v", err) } replicas := 12 @@ -122,7 +122,7 @@ func TestUpdate(t *testing.T) { } var controller api.ReplicationController - testapi.Codec().DecodeInto([]byte(response.Node.Value), &controller) + testapi.Experimental.Codec().DecodeInto([]byte(response.Node.Value), &controller) if controller.Spec.Replicas != replicas { t.Errorf("wrong replicas count expected: %d got: %d", replicas, controller.Spec.Replicas) } diff --git a/pkg/registry/generic/etcd/etcd_test.go b/pkg/registry/generic/etcd/etcd_test.go index 566e329432a..cd389b7ef8c 100644 --- a/pkg/registry/generic/etcd/etcd_test.go +++ b/pkg/registry/generic/etcd/etcd_test.go @@ -70,7 +70,7 @@ func hasCreated(t *testing.T, pod *api.Pod) func(runtime.Object) bool { func NewTestGenericEtcdRegistry(t *testing.T) (*tools.FakeEtcdClient, *Etcd) { f := tools.NewFakeEtcdClient(t) f.TestIndex = true - s := etcdstorage.NewEtcdStorage(f, testapi.Codec(), etcdtest.PathPrefix()) + s := etcdstorage.NewEtcdStorage(f, testapi.Default.Codec(), etcdtest.PathPrefix()) strategy := &testRESTStrategy{api.Scheme, api.SimpleNameGenerator, true, false, true} podPrefix := "/pods" return f, &Etcd{ @@ -135,14 +135,14 @@ func TestEtcdList(t *testing.T) { singleElemListResp := &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podA), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), }, } normalListResp := &etcd.Response{ Node: &etcd.Node{ Nodes: []*etcd.Node{ - {Value: runtime.EncodeOrDie(testapi.Codec(), podA)}, - {Value: runtime.EncodeOrDie(testapi.Codec(), podB)}, + {Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA)}, + {Value: runtime.EncodeOrDie(testapi.Default.Codec(), podB)}, }, }, } @@ -243,7 +243,7 @@ func TestEtcdCreate(t *testing.T) { nodeWithPodA := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podA), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), ModifiedIndex: 1, CreatedIndex: 1, }, @@ -323,7 +323,7 @@ func TestEtcdUpdate(t *testing.T) { nodeWithPodA := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podA), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), ModifiedIndex: 1, CreatedIndex: 1, }, @@ -334,7 +334,7 @@ func TestEtcdUpdate(t *testing.T) { newerNodeWithPodA := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podA), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), ModifiedIndex: 2, CreatedIndex: 1, }, @@ -345,7 +345,7 @@ func TestEtcdUpdate(t *testing.T) { nodeWithPodB := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podB), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podB), ModifiedIndex: 1, CreatedIndex: 1, }, @@ -356,7 +356,7 @@ func TestEtcdUpdate(t *testing.T) { nodeWithPodAWithResourceVersion := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podAWithResourceVersion), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podAWithResourceVersion), ModifiedIndex: 3, CreatedIndex: 1, }, @@ -452,7 +452,7 @@ func TestEtcdGet(t *testing.T) { nodeWithPodA := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podA), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), ModifiedIndex: 1, CreatedIndex: 1, }, @@ -508,7 +508,7 @@ func TestEtcdDelete(t *testing.T) { nodeWithPodA := tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), podA), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), ModifiedIndex: 1, CreatedIndex: 1, }, @@ -576,7 +576,7 @@ func TestEtcdWatch(t *testing.T) { respWithPodA := &etcd.Response{ Node: &etcd.Node{ Key: "/registry/pods/default/foo", - Value: runtime.EncodeOrDie(testapi.Codec(), podA), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), podA), ModifiedIndex: 1, CreatedIndex: 1, }, diff --git a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go index 826d52dd3ea..a5ab4768565 100644 --- a/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go +++ b/pkg/registry/horizontalpodautoscaler/etcd/etcd_test.go @@ -32,7 +32,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/limitrange/etcd/etcd_test.go b/pkg/registry/limitrange/etcd/etcd_test.go index c08e627cc31..be3c49e8c47 100644 --- a/pkg/registry/limitrange/etcd/etcd_test.go +++ b/pkg/registry/limitrange/etcd/etcd_test.go @@ -29,7 +29,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/minion/etcd/etcd_test.go b/pkg/registry/minion/etcd/etcd_test.go index e89051eb5c7..af23cb0c1da 100644 --- a/pkg/registry/minion/etcd/etcd_test.go +++ b/pkg/registry/minion/etcd/etcd_test.go @@ -37,7 +37,7 @@ func (fakeConnectionInfoGetter) GetConnectionInfo(host string) (string, uint, ht } func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") storage, _ := NewREST(etcdStorage, false, fakeConnectionInfoGetter{}) return storage, fakeClient } diff --git a/pkg/registry/namespace/etcd/etcd_test.go b/pkg/registry/namespace/etcd/etcd_test.go index dfe9ed8714d..653d84b6058 100644 --- a/pkg/registry/namespace/etcd/etcd_test.go +++ b/pkg/registry/namespace/etcd/etcd_test.go @@ -31,7 +31,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") storage, _, _ := NewREST(etcdStorage) return storage, fakeClient } @@ -140,7 +140,7 @@ func TestDeleteNamespaceWithIncompleteFinalizers(t *testing.T) { }, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } - if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), namespace), 0); err != nil { + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), namespace), 0); err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := storage.Delete(ctx, "foo", nil); err == nil { @@ -163,7 +163,7 @@ func TestDeleteNamespaceWithCompleteFinalizers(t *testing.T) { }, Status: api.NamespaceStatus{Phase: api.NamespaceActive}, } - if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), namespace), 0); err != nil { + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), namespace), 0); err != nil { t.Fatalf("unexpected error: %v", err) } if _, err := storage.Delete(ctx, "foo", nil); err != nil { diff --git a/pkg/registry/persistentvolume/etcd/etcd_test.go b/pkg/registry/persistentvolume/etcd/etcd_test.go index 8bc00c5a2c9..e376a3b998b 100644 --- a/pkg/registry/persistentvolume/etcd/etcd_test.go +++ b/pkg/registry/persistentvolume/etcd/etcd_test.go @@ -32,7 +32,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") storage, statusStorage := NewREST(etcdStorage) return storage, statusStorage, fakeClient } @@ -148,7 +148,7 @@ func TestUpdateStatus(t *testing.T) { key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) pvStart := validNewPersistentVolume("foo") - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), pvStart), 0) + fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), pvStart), 0) pvIn := &api.PersistentVolume{ ObjectMeta: api.ObjectMeta{ diff --git a/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go b/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go index 049b2d128c5..acba24753a4 100644 --- a/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go +++ b/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go @@ -32,7 +32,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") storage, statusStorage := NewREST(etcdStorage) return storage, statusStorage, fakeClient } @@ -141,7 +141,7 @@ func TestUpdateStatus(t *testing.T) { key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) pvcStart := validNewPersistentVolumeClaim("foo", api.NamespaceDefault) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), pvcStart), 0) + fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), pvcStart), 0) pvc := &api.PersistentVolumeClaim{ ObjectMeta: api.ObjectMeta{ diff --git a/pkg/registry/pod/etcd/etcd_test.go b/pkg/registry/pod/etcd/etcd_test.go index ee737d6ce1b..7622132eaf6 100644 --- a/pkg/registry/pod/etcd/etcd_test.go +++ b/pkg/registry/pod/etcd/etcd_test.go @@ -37,7 +37,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *BindingREST, *StatusREST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") storage := NewStorage(etcdStorage, false, nil) return storage.Pod, storage.Binding, storage.Status, fakeClient } @@ -256,7 +256,7 @@ func TestResourceLocation(t *testing.T) { storage, _, _, fakeClient := newStorage(t) key, _ := storage.KeyFunc(ctx, tc.pod.Name) key = etcdtest.AddPrefix(key) - if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &tc.pod), 0); err != nil { + if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), &tc.pod), 0); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -338,7 +338,7 @@ func TestEtcdCreate(t *testing.T) { t.Fatalf("Unexpected error %v", err) } var pod api.Pod - err = testapi.Codec().DecodeInto([]byte(resp.Node.Value), &pod) + err = testapi.Default.Codec().DecodeInto([]byte(resp.Node.Value), &pod) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -424,7 +424,7 @@ func TestEtcdCreateWithContainersNotFound(t *testing.T) { t.Fatalf("Unexpected error %v", err) } var pod api.Pod - err = testapi.Codec().DecodeInto([]byte(resp.Node.Value), &pod) + err = testapi.Default.Codec().DecodeInto([]byte(resp.Node.Value), &pod) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -495,7 +495,7 @@ func TestEtcdCreateWithExistingContainers(t *testing.T) { t.Fatalf("Unexpected error %v", err) } var pod api.Pod - err = testapi.Codec().DecodeInto([]byte(resp.Node.Value), &pod) + err = testapi.Default.Codec().DecodeInto([]byte(resp.Node.Value), &pod) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -578,7 +578,7 @@ func TestEtcdUpdateNotScheduled(t *testing.T) { key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), validNewPod()), 1) + fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), validNewPod()), 1) podIn := validChangedPod() _, _, err := storage.Update(ctx, podIn) @@ -590,7 +590,7 @@ func TestEtcdUpdateNotScheduled(t *testing.T) { t.Fatalf("Unexpected error: %v", err) } podOut := &api.Pod{} - testapi.Codec().DecodeInto([]byte(response.Node.Value), podOut) + testapi.Default.Codec().DecodeInto([]byte(response.Node.Value), podOut) if !api.Semantic.DeepEqual(podOut, podIn) { t.Errorf("objects differ: %v", util.ObjectDiff(podOut, podIn)) } @@ -603,7 +603,7 @@ func TestEtcdUpdateScheduled(t *testing.T) { key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ + fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, @@ -655,7 +655,7 @@ func TestEtcdUpdateScheduled(t *testing.T) { t.Fatalf("Unexpected error: %v", err) } var podOut api.Pod - testapi.Codec().DecodeInto([]byte(response.Node.Value), &podOut) + testapi.Default.Codec().DecodeInto([]byte(response.Node.Value), &podOut) if !api.Semantic.DeepEqual(podOut, podIn) { t.Errorf("expected: %#v, got: %#v", podOut, podIn) } @@ -684,7 +684,7 @@ func TestEtcdUpdateStatus(t *testing.T) { }, }, } - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &podStart), 0) + fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), &podStart), 0) podIn := api.Pod{ ObjectMeta: api.ObjectMeta{ diff --git a/pkg/registry/podtemplate/etcd/etcd_test.go b/pkg/registry/podtemplate/etcd/etcd_test.go index 8fc83cc8e3b..0b712671979 100644 --- a/pkg/registry/podtemplate/etcd/etcd_test.go +++ b/pkg/registry/podtemplate/etcd/etcd_test.go @@ -28,7 +28,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/registrytest/etcd.go b/pkg/registry/registrytest/etcd.go index 95f8aecc19b..c22a393dd1b 100644 --- a/pkg/registry/registrytest/etcd.go +++ b/pkg/registry/registrytest/etcd.go @@ -17,6 +17,7 @@ limitations under the License. package registrytest import ( + "fmt" "testing" "github.com/coreos/go-etcd/etcd" @@ -34,10 +35,10 @@ import ( "k8s.io/kubernetes/pkg/tools/etcdtest" ) -func NewEtcdStorage(t *testing.T) (storage.Interface, *tools.FakeEtcdClient) { +func NewEtcdStorage(t *testing.T, group string) (storage.Interface, *tools.FakeEtcdClient) { fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true - etcdStorage := etcdstorage.NewEtcdStorage(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeClient, testapi.Groups[group].Codec(), etcdtest.PathPrefix()) return etcdStorage, fakeClient } @@ -149,6 +150,27 @@ func (t *Tester) TestWatch(valid runtime.Object, labelsPass, labelsFail []labels } // ============================================================================= +// get codec based on runtime.Object +func getCodec(obj runtime.Object) (runtime.Codec, error) { + _, kind, err := api.Scheme.ObjectVersionAndKind(obj) + if err != nil { + return nil, fmt.Errorf("unexpected encoding error: %v", err) + } + // TODO: caesarxuchao: we should detect which group an object belongs to + // by using the version returned by Schem.ObjectVersionAndKind() once we + // split the schemes for internal objects. + // TODO: caesarxuchao: we should add a map from kind to group in Scheme. + var codec runtime.Codec + if api.Scheme.Recognizes(testapi.Default.GroupAndVersion(), kind) { + codec = testapi.Default.Codec() + } else if api.Scheme.Recognizes(testapi.Experimental.GroupAndVersion(), kind) { + codec = testapi.Experimental.Codec() + } else { + return nil, fmt.Errorf("unexpected kind: %v", kind) + } + return codec, nil +} + // Helper functions func (t *Tester) getObject(ctx api.Context, obj runtime.Object) (runtime.Object, error) { @@ -166,7 +188,12 @@ func (t *Tester) getObject(ctx api.Context, obj runtime.Object) (runtime.Object, return nil, err } result := t.storage.NewFunc() - if err := testapi.Codec().DecodeInto([]byte(resp.Node.Value), result); err != nil { + + codec, err := getCodec(obj) + if err != nil { + return nil, err + } + if err := codec.DecodeInto([]byte(resp.Node.Value), result); err != nil { return nil, err } return result, nil @@ -182,7 +209,12 @@ func (t *Tester) setObject(ctx api.Context, obj runtime.Object) error { return err } key = etcdtest.AddPrefix(key) - _, err = t.fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), obj), 0) + + codec, err := getCodec(obj) + if err != nil { + return err + } + _, err = t.fakeClient.Set(key, runtime.EncodeOrDie(codec, obj), 0) return err } @@ -193,8 +225,9 @@ func (t *Tester) setObjectsForList(objects []runtime.Object) []runtime.Object { if len(objects) > 0 { nodes := make([]*etcd.Node, len(objects)) for i, obj := range objects { - encoded := runtime.EncodeOrDie(testapi.Codec(), obj) - decoded, _ := testapi.Codec().Decode([]byte(encoded)) + codec, _ := getCodec(obj) + encoded := runtime.EncodeOrDie(codec, obj) + decoded, _ := codec.Decode([]byte(encoded)) nodes[i] = &etcd.Node{Value: encoded} result[i] = decoded } @@ -224,7 +257,11 @@ func (t *Tester) injectWatchError(err error) { } func (t *Tester) emitObject(obj runtime.Object, action string) error { - encoded, err := testapi.Codec().Encode(obj) + codec, err := getCodec(obj) + if err != nil { + return err + } + encoded, err := codec.Encode(obj) if err != nil { return err } diff --git a/pkg/registry/resourcequota/etcd/etcd_test.go b/pkg/registry/resourcequota/etcd/etcd_test.go index 6999bd13623..978bcfe870c 100644 --- a/pkg/registry/resourcequota/etcd/etcd_test.go +++ b/pkg/registry/resourcequota/etcd/etcd_test.go @@ -33,7 +33,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") storage, statusStorage := NewREST(etcdStorage) return storage, statusStorage, fakeClient } @@ -152,7 +152,7 @@ func TestUpdateStatus(t *testing.T) { key, _ := storage.KeyFunc(ctx, "foo") key = etcdtest.AddPrefix(key) resourcequotaStart := validNewResourceQuota() - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), resourcequotaStart), 0) + fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), resourcequotaStart), 0) resourcequotaIn := &api.ResourceQuota{ ObjectMeta: api.ObjectMeta{ diff --git a/pkg/registry/secret/etcd/etcd_test.go b/pkg/registry/secret/etcd/etcd_test.go index e7506c12123..f027c18f4ad 100644 --- a/pkg/registry/secret/etcd/etcd_test.go +++ b/pkg/registry/secret/etcd/etcd_test.go @@ -28,7 +28,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/service/allocator/etcd/etcd_test.go b/pkg/registry/service/allocator/etcd/etcd_test.go index e1efedea0b3..1c7d89dc926 100644 --- a/pkg/registry/service/allocator/etcd/etcd_test.go +++ b/pkg/registry/service/allocator/etcd/etcd_test.go @@ -30,7 +30,7 @@ import ( ) func newStorage(t *testing.T) (*Etcd, *tools.FakeEtcdClient, allocator.Interface) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") mem := allocator.NewAllocationMap(100, "rangeSpecValue") etcd := NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", etcdStorage) return etcd, fakeClient, mem @@ -57,7 +57,7 @@ func TestEmpty(t *testing.T) { func TestStore(t *testing.T) { storage, fakeClient, backing := newStorage(t) - if _, err := fakeClient.Set(key(), runtime.EncodeOrDie(testapi.Codec(), validNewRangeAllocation()), 0); err != nil { + if _, err := fakeClient.Set(key(), runtime.EncodeOrDie(testapi.Default.Codec(), validNewRangeAllocation()), 0); err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/registry/service/etcd/etcd_test.go b/pkg/registry/service/etcd/etcd_test.go index 580d0a093a6..b05021235fb 100644 --- a/pkg/registry/service/etcd/etcd_test.go +++ b/pkg/registry/service/etcd/etcd_test.go @@ -29,7 +29,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/service/ipallocator/etcd/etcd_test.go b/pkg/registry/service/ipallocator/etcd/etcd_test.go index 59e73b999dc..4db8f2742ce 100644 --- a/pkg/registry/service/ipallocator/etcd/etcd_test.go +++ b/pkg/registry/service/ipallocator/etcd/etcd_test.go @@ -33,7 +33,7 @@ import ( ) func newStorage(t *testing.T) (*tools.FakeEtcdClient, ipallocator.Interface, allocator.Interface) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") _, cidr, err := net.ParseCIDR("192.168.1.0/24") if err != nil { t.Fatal(err) @@ -79,7 +79,7 @@ func TestErrors(t *testing.T) { func TestStore(t *testing.T) { fakeClient, storage, backing := newStorage(t) - if _, err := fakeClient.Set(key(), runtime.EncodeOrDie(testapi.Codec(), validNewRangeAllocation()), 0); err != nil { + if _, err := fakeClient.Set(key(), runtime.EncodeOrDie(testapi.Default.Codec(), validNewRangeAllocation()), 0); err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/registry/serviceaccount/etcd/etcd_test.go b/pkg/registry/serviceaccount/etcd/etcd_test.go index b102f965a30..af6e1631d03 100644 --- a/pkg/registry/serviceaccount/etcd/etcd_test.go +++ b/pkg/registry/serviceaccount/etcd/etcd_test.go @@ -28,7 +28,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/thirdpartyresource/etcd/etcd_test.go b/pkg/registry/thirdpartyresource/etcd/etcd_test.go index 0f2cea31d53..14e31c5f402 100644 --- a/pkg/registry/thirdpartyresource/etcd/etcd_test.go +++ b/pkg/registry/thirdpartyresource/etcd/etcd_test.go @@ -31,7 +31,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") return NewREST(etcdStorage), fakeClient } diff --git a/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go b/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go index 6dc2bd81643..ea0f7505ca4 100644 --- a/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go +++ b/pkg/registry/thirdpartyresourcedata/etcd/etcd_test.go @@ -31,7 +31,7 @@ import ( ) func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { - etcdStorage, fakeClient := registrytest.NewEtcdStorage(t) + etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") return NewREST(etcdStorage, "foo", "bar"), fakeClient } diff --git a/pkg/runtime/helper_test.go b/pkg/runtime/helper_test.go index c60ec310dd5..7ec7cbdce56 100644 --- a/pkg/runtime/helper_test.go +++ b/pkg/runtime/helper_test.go @@ -137,7 +137,7 @@ func TestDecodeList(t *testing.T) { pl := &api.List{ Items: []runtime.Object{ &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}, - &runtime.Unknown{TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: testapi.Version()}, RawJSON: []byte(`{"kind":"Pod","apiVersion":"` + testapi.Version() + `","metadata":{"name":"test"}}`)}, + &runtime.Unknown{TypeMeta: runtime.TypeMeta{Kind: "Pod", APIVersion: testapi.Default.Version()}, RawJSON: []byte(`{"kind":"Pod","apiVersion":"` + testapi.Default.Version() + `","metadata":{"name":"test"}}`)}, &runtime.Unstructured{TypeMeta: runtime.TypeMeta{Kind: "Foo", APIVersion: "Bar"}, Object: map[string]interface{}{"test": "value"}}, }, } diff --git a/pkg/runtime/unstructured_test.go b/pkg/runtime/unstructured_test.go index 2d9b884cfbe..8780ea29a6c 100644 --- a/pkg/runtime/unstructured_test.go +++ b/pkg/runtime/unstructured_test.go @@ -26,7 +26,7 @@ import ( ) func TestDecodeUnstructured(t *testing.T) { - version := testapi.Version() + version := testapi.Default.Version() rawJson := fmt.Sprintf(`{"kind":"Pod","apiVersion":"%s","metadata":{"name":"test"}}`, version) pl := &api.List{ Items: []runtime.Object{ diff --git a/pkg/storage/cacher_test.go b/pkg/storage/cacher_test.go index 7a160e50a09..b9b7895efac 100644 --- a/pkg/storage/cacher_test.go +++ b/pkg/storage/cacher_test.go @@ -43,7 +43,7 @@ func newTestCacher(client tools.EtcdClient) *storage.Cacher { config := storage.CacherConfig{ CacheCapacity: 10, Versioner: etcdstorage.APIObjectVersioner{}, - Storage: etcdstorage.NewEtcdStorage(client, testapi.Codec(), etcdtest.PathPrefix()), + Storage: etcdstorage.NewEtcdStorage(client, testapi.Default.Codec(), etcdtest.PathPrefix()), Type: &api.Pod{}, ResourcePrefix: prefix, KeyFunc: func(obj runtime.Object) (string, error) { return storage.NamespaceKeyFunc(prefix, obj) }, @@ -94,7 +94,7 @@ func TestListFromMemory(t *testing.T) { { Action: "create", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, @@ -102,7 +102,7 @@ func TestListFromMemory(t *testing.T) { { Action: "create", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podBar)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podBar)), CreatedIndex: 2, ModifiedIndex: 2, }, @@ -110,7 +110,7 @@ func TestListFromMemory(t *testing.T) { { Action: "create", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podBaz)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podBaz)), CreatedIndex: 3, ModifiedIndex: 3, }, @@ -118,12 +118,12 @@ func TestListFromMemory(t *testing.T) { { Action: "set", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFooPrime)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFooPrime)), CreatedIndex: 1, ModifiedIndex: 4, }, PrevNode: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, @@ -135,7 +135,7 @@ func TestListFromMemory(t *testing.T) { ModifiedIndex: 5, }, PrevNode: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podBar)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podBar)), CreatedIndex: 1, ModifiedIndex: 1, }, @@ -210,7 +210,7 @@ func TestWatch(t *testing.T) { etcdResponse: &etcd.Response{ Action: "create", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, @@ -223,7 +223,7 @@ func TestWatch(t *testing.T) { etcdResponse: &etcd.Response{ Action: "create", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podBar)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podBar)), CreatedIndex: 2, ModifiedIndex: 2, }, @@ -236,12 +236,12 @@ func TestWatch(t *testing.T) { etcdResponse: &etcd.Response{ Action: "set", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 3, }, PrevNode: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, @@ -326,7 +326,7 @@ func TestFiltering(t *testing.T) { etcdResponse: &etcd.Response{ Action: "create", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, @@ -339,12 +339,12 @@ func TestFiltering(t *testing.T) { etcdResponse: &etcd.Response{ Action: "set", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFooFiltered)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFooFiltered)), CreatedIndex: 1, ModifiedIndex: 2, }, PrevNode: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, @@ -358,12 +358,12 @@ func TestFiltering(t *testing.T) { etcdResponse: &etcd.Response{ Action: "set", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 3, }, PrevNode: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFooFiltered)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFooFiltered)), CreatedIndex: 1, ModifiedIndex: 2, }, @@ -377,12 +377,12 @@ func TestFiltering(t *testing.T) { etcdResponse: &etcd.Response{ Action: "set", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 4, }, PrevNode: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 3, }, @@ -399,7 +399,7 @@ func TestFiltering(t *testing.T) { ModifiedIndex: 5, }, PrevNode: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 4, }, @@ -462,7 +462,7 @@ func TestStorageError(t *testing.T) { fakeClient.WatchResponse <- &etcd.Response{ Action: "create", Node: &etcd.Node{ - Value: string(runtime.EncodeOrDie(testapi.Codec(), podFoo)), + Value: string(runtime.EncodeOrDie(testapi.Default.Codec(), podFoo)), CreatedIndex: 1, ModifiedIndex: 1, }, diff --git a/pkg/storage/etcd/etcd_helper_test.go b/pkg/storage/etcd/etcd_helper_test.go index c84b66f808d..99820304b98 100644 --- a/pkg/storage/etcd/etcd_helper_test.go +++ b/pkg/storage/etcd/etcd_helper_test.go @@ -57,8 +57,8 @@ var codec runtime.Codec func init() { scheme = runtime.NewScheme() scheme.AddKnownTypes("", &TestResource{}) - scheme.AddKnownTypes(testapi.Version(), &TestResource{}) - codec = runtime.CodecFor(scheme, testapi.Version()) + scheme.AddKnownTypes(testapi.Default.Version(), &TestResource{}) + codec = runtime.CodecFor(scheme, testapi.Default.Version()) scheme.AddConversionFuncs( func(in *TestResource, out *TestResource, s conversion.Scope) error { *out = *in @@ -85,7 +85,7 @@ func TestIsEtcdNotFound(t *testing.T) { // Returns an encoded version of api.Pod with the given name. func getEncodedPod(name string) string { - pod, _ := testapi.Codec().Encode(&api.Pod{ + pod, _ := testapi.Default.Codec().Encode(&api.Pod{ ObjectMeta: api.ObjectMeta{Name: name}, }) return string(pod) @@ -93,7 +93,7 @@ func getEncodedPod(name string) string { func TestList(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ @@ -167,7 +167,7 @@ func TestList(t *testing.T) { // TestListAcrossDirectories ensures that the client excludes directories and flattens tree-response - simulates cross-namespace query func TestListAcrossDirectories(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ @@ -254,7 +254,7 @@ func TestListAcrossDirectories(t *testing.T) { func TestListExcludesDirectories(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ @@ -329,7 +329,7 @@ func TestListExcludesDirectories(t *testing.T) { func TestGet(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") grace := int64(30) expect := api.Pod{ @@ -340,7 +340,7 @@ func TestGet(t *testing.T) { TerminationGracePeriodSeconds: &grace, }, } - fakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), &expect), 0) + fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), &expect), 0) var got api.Pod err := helper.Get("/some/key", &got, false) if err != nil { @@ -353,7 +353,7 @@ func TestGet(t *testing.T) { func TestGetNotFoundErr(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) key1 := etcdtest.AddPrefix("/some/key") fakeClient.Data[key1] = tools.EtcdResponseWithError{ R: &etcd.Response{ @@ -397,13 +397,13 @@ func TestGetNotFoundErr(t *testing.T) { func TestCreate(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) returnedObj := &api.Pod{} err := helper.Create("/some/key", obj, returnedObj, 5) if err != nil { t.Errorf("Unexpected error %#v", err) } - data, err := testapi.Codec().Encode(obj) + data, err := testapi.Default.Codec().Encode(obj) if err != nil { t.Errorf("Unexpected error %#v", err) } @@ -423,7 +423,7 @@ func TestCreate(t *testing.T) { func TestCreateNilOutParam(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) err := helper.Create("/some/key", obj, nil, 5) if err != nil { t.Errorf("Unexpected error %#v", err) @@ -433,13 +433,13 @@ func TestCreateNilOutParam(t *testing.T) { func TestSet(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) returnedObj := &api.Pod{} err := helper.Set("/some/key", obj, returnedObj, 5) if err != nil { t.Errorf("Unexpected error %#v", err) } - data, err := testapi.Codec().Encode(obj) + data, err := testapi.Default.Codec().Encode(obj) if err != nil { t.Errorf("Unexpected error %#v", err) } @@ -461,7 +461,7 @@ func TestSetFailCAS(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}} fakeClient := tools.NewFakeEtcdClient(t) fakeClient.CasErr = fakeClient.NewError(123) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) err := helper.Set("/some/key", obj, nil, 5) if err == nil { t.Errorf("Expecting error.") @@ -472,12 +472,12 @@ func TestSetWithVersion(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}} fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ - Value: runtime.EncodeOrDie(testapi.Codec(), obj), + Value: runtime.EncodeOrDie(testapi.Default.Codec(), obj), ModifiedIndex: 1, }, }, @@ -488,7 +488,7 @@ func TestSetWithVersion(t *testing.T) { if err != nil { t.Fatalf("Unexpected error %#v", err) } - data, err := testapi.Codec().Encode(obj) + data, err := testapi.Default.Codec().Encode(obj) if err != nil { t.Fatalf("Unexpected error %#v", err) } @@ -508,7 +508,7 @@ func TestSetWithVersion(t *testing.T) { func TestSetWithoutResourceVersioner(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) helper.versioner = nil returnedObj := &api.Pod{} err := helper.Set("/some/key", obj, returnedObj, 3) @@ -516,7 +516,7 @@ func TestSetWithoutResourceVersioner(t *testing.T) { if err != nil { t.Errorf("Unexpected error %#v", err) } - data, err := testapi.Codec().Encode(obj) + data, err := testapi.Default.Codec().Encode(obj) if err != nil { t.Errorf("Unexpected error %#v", err) } @@ -536,7 +536,7 @@ func TestSetWithoutResourceVersioner(t *testing.T) { func TestSetNilOutParam(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} fakeClient := tools.NewFakeEtcdClient(t) - helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), etcdtest.PathPrefix()) helper.versioner = nil err := helper.Set("/some/key", obj, nil, 3) if err != nil { @@ -860,7 +860,7 @@ func TestGetEtcdVersion_NotListening(t *testing.T) { func TestPrefixEtcdKey(t *testing.T) { fakeClient := tools.NewFakeEtcdClient(t) prefix := path.Join("/", etcdtest.PathPrefix()) - helper := newEtcdHelper(fakeClient, testapi.Codec(), prefix) + helper := newEtcdHelper(fakeClient, testapi.Default.Codec(), prefix) baseKey := "/some/key" diff --git a/pkg/watch/json/decoder_test.go b/pkg/watch/json/decoder_test.go index c2e8d2be810..ba7bf7640fc 100644 --- a/pkg/watch/json/decoder_test.go +++ b/pkg/watch/json/decoder_test.go @@ -33,12 +33,12 @@ func TestDecoder(t *testing.T) { for _, eventType := range table { out, in := io.Pipe() - decoder := NewDecoder(out, testapi.Codec()) + decoder := NewDecoder(out, testapi.Default.Codec()) expect := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} encoder := json.NewEncoder(in) go func() { - data, err := testapi.Codec().Encode(expect) + data, err := testapi.Default.Codec().Encode(expect) if err != nil { t.Fatalf("Unexpected error %v", err) } @@ -81,7 +81,7 @@ func TestDecoder(t *testing.T) { func TestDecoder_SourceClose(t *testing.T) { out, in := io.Pipe() - decoder := NewDecoder(out, testapi.Codec()) + decoder := NewDecoder(out, testapi.Default.Codec()) done := make(chan struct{}) diff --git a/pkg/watch/json/encoder_test.go b/pkg/watch/json/encoder_test.go index c5a0cfd1c56..92550692fdd 100644 --- a/pkg/watch/json/encoder_test.go +++ b/pkg/watch/json/encoder_test.go @@ -36,17 +36,17 @@ func TestEncodeDecodeRoundTrip(t *testing.T) { { watch.Added, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, - testapi.Codec(), + testapi.Default.Codec(), }, { watch.Modified, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, - testapi.Codec(), + testapi.Default.Codec(), }, { watch.Deleted, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}, - testapi.Codec(), + testapi.Default.Codec(), }, } for i, testCase := range testCases { diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/plugin/pkg/scheduler/factory/factory_test.go index 9301e78ff00..e18250224e6 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/plugin/pkg/scheduler/factory/factory_test.go @@ -43,7 +43,7 @@ func TestCreate(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}) factory := NewConfigFactory(client, nil) factory.Create() } @@ -61,7 +61,7 @@ func TestCreateFromConfig(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}) factory := NewConfigFactory(client, nil) // Pre-register some predicate and priority functions @@ -103,7 +103,7 @@ func TestCreateFromEmptyConfig(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}) factory := NewConfigFactory(client, nil) configData = []byte(`{}`) @@ -149,10 +149,10 @@ func TestDefaultErrorFunc(t *testing.T) { mux := http.NewServeMux() // FakeHandler musn't be sent requests other than the one you want to test. - mux.Handle(testapi.ResourcePath("pods", "bar", "foo"), &handler) + mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler) server := httptest.NewServer(mux) defer server.Close() - factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}), nil) + factory := NewConfigFactory(client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}), nil) queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) podBackoff := podBackoff{ perPodBackoff: map[string]*backoffEntry{}, @@ -172,7 +172,7 @@ func TestDefaultErrorFunc(t *testing.T) { if !exists { continue } - handler.ValidateRequest(t, testapi.ResourcePath("pods", "bar", "foo"), "GET", nil) + handler.ValidateRequest(t, testapi.Default.ResourcePath("pods", "bar", "foo"), "GET", nil) if e, a := testPod, got; !reflect.DeepEqual(e, a) { t.Errorf("Expected %v, got %v", e, a) } @@ -235,15 +235,15 @@ func TestBind(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Default.Version()}) b := binder{client} if err := b.Bind(item.binding); err != nil { t.Errorf("Unexpected error: %v", err) continue } - expectedBody := runtime.EncodeOrDie(testapi.Codec(), item.binding) - handler.ValidateRequest(t, testapi.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody) + expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding) + handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody) } } diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index 21145ce168a..3e2f7f13016 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -40,7 +40,7 @@ func (fb fakeBinder) Bind(binding *api.Binding) error { return fb.b(binding) } func podWithID(id, desiredHost string) *api.Pod { return &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: id, SelfLink: testapi.SelfLink("pods", id)}, + ObjectMeta: api.ObjectMeta{Name: id, SelfLink: testapi.Default.SelfLink("pods", id)}, Spec: api.PodSpec{ NodeName: desiredHost, }, diff --git a/shippable.yml b/shippable.yml index 5686b3e3cad..be7bb3d2c08 100644 --- a/shippable.yml +++ b/shippable.yml @@ -6,7 +6,7 @@ matrix: include: - go: 1.4 env: - - KUBE_TEST_API_VERSIONS=v1 KUBE_TEST_ETCD_PREFIXES=registry + - KUBE_TEST_API_VERSIONS=v1,experimental/v1 KUBE_TEST_ETCD_PREFIXES=registry - KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults" - CI_NAME="shippable" - CI_BUILD_NUMBER="$BUILD_NUMBER" @@ -17,7 +17,7 @@ matrix: - secure: hfh1Kwl2XYUlJCn4dtKSG0C9yXl5TtksVOY74OeqolvDAdVj4sc+GJD3Bywsp91CJe8YMEnkt9rN0WGI+gPVMcjTmZ9tMUxKiNNBP8m5oLRFbdgKOkNuXjpjpFHHWGAnNhMmh9vjI+ehADo+QIpU1fGxd3yO4tmIJ1qoK3QqvUrOZ1RwUubRXoeVn3xy3LK5yg4vP5ruitbNeWMw/RZZ7D6czvqvEfCgV6b4mdNDRMiqlUJNkaTRc3em1APXr30yagDV3a7hXLq3HdlyFwvF+9pmB4AKhQctyjPN4zvvPd0/gJXq3ZHXSlZXOZBMPXHlSS5pizfSInNszyZyrP3+/w== - go: 1.3 env: - - KUBE_TEST_API_VERSIONS=v1 KUBE_TEST_ETCD_PREFIXES=kubernetes.io/registry + - KUBE_TEST_API_VERSIONS=v1,experimental/v1 KUBE_TEST_ETCD_PREFIXES=kubernetes.io/registry - KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults" - CI_NAME="shippable" - CI_BUILD_NUMBER="$BUILD_NUMBER" @@ -45,8 +45,17 @@ install: - ./hack/build-go.sh - godep go install ./... - ./hack/travis/install-etcd.sh + - ./hack/verify-gofmt.sh + - ./hack/verify-boilerplate.sh + - ./hack/verify-description.sh + - ./hack/verify-flags-underscore.py + - ./hack/verify-godeps.sh ${BASE_BRANCH} - ./hack/travis/install-std-race.sh - - make verify BRANCH=${BASE_BRANCH} + - ./hack/verify-generated-conversions.sh + - ./hack/verify-generated-deep-copies.sh + - ./hack/verify-generated-docs.sh + - ./hack/verify-swagger-spec.sh + - ./hack/verify-linkcheck.sh script: # Disable coverage collection on pull requests diff --git a/test/e2e/persistent_volumes.go b/test/e2e/persistent_volumes.go index 968bfac6db1..8341a27a066 100644 --- a/test/e2e/persistent_volumes.go +++ b/test/e2e/persistent_volumes.go @@ -163,7 +163,7 @@ func makeCheckPod(ns string, nfsserver string) *api.Pod { return &api.Pod{ TypeMeta: api.TypeMeta{ Kind: "Pod", - APIVersion: testapi.Version(), + APIVersion: testapi.Default.Version(), }, ObjectMeta: api.ObjectMeta{ GenerateName: "checker-", diff --git a/test/e2e/proxy.go b/test/e2e/proxy.go index 3e0ac808876..b1149d8159e 100644 --- a/test/e2e/proxy.go +++ b/test/e2e/proxy.go @@ -36,7 +36,7 @@ import ( ) var _ = Describe("Proxy", func() { - version := testapi.Version() + version := testapi.Default.Version() Context("version "+version, func() { proxyContext(version) }) }) diff --git a/test/integration/auth_test.go b/test/integration/auth_test.go index 5e8a29b2b3d..e1f3577f7f9 100644 --- a/test/integration/auth_test.go +++ b/test/integration/auth_test.go @@ -65,22 +65,22 @@ func getTestTokenAuth() authenticator.Request { } func path(resource, namespace, name string) string { - return testapi.ResourcePath(resource, namespace, name) + return testapi.Default.ResourcePath(resource, namespace, name) } func pathWithPrefix(prefix, resource, namespace, name string) string { - return testapi.ResourcePathWithPrefix(prefix, resource, namespace, name) + return testapi.Default.ResourcePathWithPrefix(prefix, resource, namespace, name) } func timeoutPath(resource, namespace, name string) string { - return addTimeoutFlag(testapi.ResourcePath(resource, namespace, name)) + return addTimeoutFlag(testapi.Default.ResourcePath(resource, namespace, name)) } // Bodies for requests used in subsequent tests. var aPod string = ` { "kind": "Pod", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "metadata": { "name": "a", "creationTimestamp": null%s @@ -98,7 +98,7 @@ var aPod string = ` var aRC string = ` { "kind": "ReplicationController", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "metadata": { "name": "a", "labels": { @@ -131,7 +131,7 @@ var aRC string = ` var aService string = ` { "kind": "Service", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "metadata": { "name": "a", "labels": { @@ -155,7 +155,7 @@ var aService string = ` var aNode string = ` { "kind": "Node", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "metadata": { "name": "a"%s }, @@ -167,7 +167,7 @@ var aNode string = ` var aEvent string = ` { "kind": "Event", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "metadata": { "name": "a"%s }, @@ -183,7 +183,7 @@ var aEvent string = ` var aBinding string = ` { "kind": "Binding", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "metadata": { "name": "a"%s }, @@ -206,7 +206,7 @@ var emptyEndpoints string = ` var aEndpoints string = ` { "kind": "Endpoints", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "metadata": { "name": "a"%s }, @@ -231,7 +231,7 @@ var aEndpoints string = ` var deleteNow string = ` { "kind": "DeleteOptions", - "apiVersion": "` + testapi.Version() + `", + "apiVersion": "` + testapi.Default.Version() + `", "gracePeriodSeconds": 0%s } ` diff --git a/test/integration/client_test.go b/test/integration/client_test.go index 732fc52aa45..c27390fff02 100644 --- a/test/integration/client_test.go +++ b/test/integration/client_test.go @@ -43,7 +43,7 @@ func TestClient(t *testing.T) { ns := api.NamespaceDefault framework.DeleteAllEtcdKeys() - client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) info, err := client.ServerVersion() if err != nil { @@ -113,7 +113,7 @@ func TestSingleWatch(t *testing.T) { ns := "blargh" deleteAllEtcdKeys() - client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) mkEvent := func(i int) *api.Event { name := fmt.Sprintf("event-%v", i) @@ -197,7 +197,7 @@ func TestMultiWatch(t *testing.T) { defer s.Close() ns := api.NamespaceDefault - client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) dummyEvent := func(i int) *api.Event { name := fmt.Sprintf("unrelated-%v", i) diff --git a/test/integration/etcd_tools_test.go b/test/integration/etcd_tools_test.go index 60b9408edb0..9eb6b9c58b6 100644 --- a/test/integration/etcd_tools_test.go +++ b/test/integration/etcd_tools_test.go @@ -34,7 +34,7 @@ import ( func TestSet(t *testing.T) { client := framework.NewEtcdClient() - etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), "") + etcdStorage := etcd.NewEtcdStorage(client, testapi.Default.Codec(), "") framework.WithEtcdKey(func(key string) { testObject := api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: "foo"}} if err := etcdStorage.Set(key, &testObject, nil, 0); err != nil { @@ -44,7 +44,7 @@ func TestSet(t *testing.T) { if err != nil || resp.Node == nil { t.Fatalf("unexpected error: %v %v", err, resp) } - decoded, err := testapi.Codec().Decode([]byte(resp.Node.Value)) + decoded, err := testapi.Default.Codec().Decode([]byte(resp.Node.Value)) if err != nil { t.Fatalf("unexpected response: %#v", resp.Node) } @@ -57,10 +57,10 @@ func TestSet(t *testing.T) { func TestGet(t *testing.T) { client := framework.NewEtcdClient() - etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), "") + etcdStorage := etcd.NewEtcdStorage(client, testapi.Default.Codec(), "") framework.WithEtcdKey(func(key string) { testObject := api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: "foo"}} - coded, err := testapi.Codec().Encode(&testObject) + coded, err := testapi.Default.Codec().Encode(&testObject) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -82,7 +82,7 @@ func TestGet(t *testing.T) { func TestWriteTTL(t *testing.T) { client := framework.NewEtcdClient() - etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), "") + etcdStorage := etcd.NewEtcdStorage(client, testapi.Default.Codec(), "") framework.WithEtcdKey(func(key string) { testObject := api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: "foo"}} if err := etcdStorage.Set(key, &testObject, nil, 0); err != nil { @@ -135,10 +135,10 @@ func TestWriteTTL(t *testing.T) { func TestWatch(t *testing.T) { client := framework.NewEtcdClient() - etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcd.NewEtcdStorage(client, testapi.Default.Codec(), etcdtest.PathPrefix()) framework.WithEtcdKey(func(key string) { key = etcdtest.AddPrefix(key) - resp, err := client.Set(key, runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}), 0) + resp, err := client.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}), 0) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/framework/etcd_utils.go b/test/integration/framework/etcd_utils.go index d4cc7a0164e..e4f09ecad1f 100644 --- a/test/integration/framework/etcd_utils.go +++ b/test/integration/framework/etcd_utils.go @@ -41,7 +41,7 @@ func NewEtcdClient() *etcd.Client { } func NewEtcdStorage() (storage.Interface, error) { - return master.NewEtcdStorage(NewEtcdClient(), latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) + return master.NewEtcdStorage(NewEtcdClient(), latest.InterfacesFor, testapi.Default.Version(), etcdtest.PathPrefix()) } func RequireEtcd() { diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 31b7dc8275b..7c90f1596a1 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -99,7 +99,7 @@ func NewMasterComponents(c *Config) *MasterComponents { if c.DeleteEtcdKeys { DeleteAllEtcdKeys() } - restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version(), QPS: c.QPS, Burst: c.Burst}) + restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version(), QPS: c.QPS, Burst: c.Burst}) rcStopCh := make(chan struct{}) controllerManager := replicationcontroller.NewReplicationManager(restClient, c.Burst) @@ -269,7 +269,7 @@ func StartPods(numPods int, host string, restClient *client.Client) error { // TODO: Merge this into startMasterOrDie. func RunAMaster(t *testing.T) (*master.Master, *httptest.Server) { etcdClient := NewEtcdClient() - etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) + etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, testapi.Default.Version(), etcdtest.PathPrefix()) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/metrics_test.go b/test/integration/metrics_test.go index 4455bdba0c4..d3dc6945c78 100644 --- a/test/integration/metrics_test.go +++ b/test/integration/metrics_test.go @@ -109,7 +109,7 @@ func TestApiserverMetrics(t *testing.T) { // Make a request to the apiserver to ensure there's at least one data point // for the metrics we're expecting -- otherwise, they won't be exported. - client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) if _, err := client.Pods(api.NamespaceDefault).List(labels.Everything(), fields.Everything()); err != nil { t.Fatalf("unexpected error getting pods: %v", err) } diff --git a/test/integration/persistent_volumes_test.go b/test/integration/persistent_volumes_test.go index 93b979b2ca3..fdd1f406dd8 100644 --- a/test/integration/persistent_volumes_test.go +++ b/test/integration/persistent_volumes_test.go @@ -40,7 +40,7 @@ func TestPersistentVolumeClaimBinder(t *testing.T) { defer s.Close() deleteAllEtcdKeys() - client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) + client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(client, 1*time.Second) binder.Run() diff --git a/test/integration/scheduler_test.go b/test/integration/scheduler_test.go index 3dfe04b9bf0..a6042900afa 100644 --- a/test/integration/scheduler_test.go +++ b/test/integration/scheduler_test.go @@ -77,7 +77,7 @@ func TestUnschedulableNodes(t *testing.T) { AdmissionControl: admit.NewAlwaysAdmit(), }) - restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) + restClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) schedulerConfigFactory := factory.NewConfigFactory(restClient, nil) schedulerConfig, err := schedulerConfigFactory.Create() diff --git a/test/integration/secret_test.go b/test/integration/secret_test.go index e1627c709e1..77191c61634 100644 --- a/test/integration/secret_test.go +++ b/test/integration/secret_test.go @@ -71,8 +71,8 @@ func TestSecrets(t *testing.T) { }) framework.DeleteAllEtcdKeys() - client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Version()}) - DoTestSecrets(t, client, testapi.Version()) + client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()}) + DoTestSecrets(t, client, testapi.Default.Version()) } // DoTestSecrets test secrets for one api version. diff --git a/test/integration/service_account_test.go b/test/integration/service_account_test.go index 8b915cff163..7218b547f8a 100644 --- a/test/integration/service_account_test.go +++ b/test/integration/service_account_test.go @@ -341,7 +341,7 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config, deleteAllEtcdKeys() // Etcd - etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) + etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), latest.InterfacesFor, testapi.Default.Version(), etcdtest.PathPrefix()) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -353,9 +353,9 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config, })) // Anonymous client config - clientConfig := client.Config{Host: apiServer.URL, Version: testapi.Version()} + clientConfig := client.Config{Host: apiServer.URL, Version: testapi.Default.Version()} // Root client - rootClient := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Version(), BearerToken: rootToken}) + rootClient := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.Version(), BearerToken: rootToken}) // Set up two authenticators: // 1. A token authenticator that maps the rootToken to the "root" user diff --git a/test/integration/utils.go b/test/integration/utils.go index 1e3e9ddf24e..fb8c4cac838 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -66,7 +66,7 @@ func deleteAllEtcdKeys() { } func runAMaster(t *testing.T) (*master.Master, *httptest.Server) { - etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) + etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), latest.InterfacesFor, testapi.Default.Version(), etcdtest.PathPrefix()) if err != nil { t.Fatalf("unexpected error: %v", err) } From a4fc22c849847c304ebbc50873a776c710041971 Mon Sep 17 00:00:00 2001 From: He Simei Date: Thu, 27 Aug 2015 21:53:09 +0800 Subject: [PATCH 065/101] deprecate etcd on node --- cluster/ubuntu/build.sh | 1 - cluster/ubuntu/minion/init_conf/etcd.conf | 31 ------ cluster/ubuntu/minion/init_conf/flanneld.conf | 7 +- .../ubuntu/minion/init_conf/kube-proxy.conf | 6 +- cluster/ubuntu/minion/init_conf/kubelet.conf | 6 +- cluster/ubuntu/minion/init_scripts/etcd | 100 ------------------ cluster/ubuntu/minion/init_scripts/kubelet | 2 +- cluster/ubuntu/reconfDocker.sh | 63 ++++++----- cluster/ubuntu/util.sh | 82 +++++++------- 9 files changed, 89 insertions(+), 209 deletions(-) delete mode 100644 cluster/ubuntu/minion/init_conf/etcd.conf delete mode 100755 cluster/ubuntu/minion/init_scripts/etcd diff --git a/cluster/ubuntu/build.sh b/cluster/ubuntu/build.sh index b6081796212..cb9e7efdcba 100755 --- a/cluster/ubuntu/build.sh +++ b/cluster/ubuntu/build.sh @@ -51,7 +51,6 @@ if [ ! -f etcd.tar.gz ] ; then tar xzf etcd.tar.gz fi cp $ETCD/etcd $ETCD/etcdctl binaries/master -cp $ETCD/etcd $ETCD/etcdctl binaries/minion # k8s echo "Download kubernetes release ..." diff --git a/cluster/ubuntu/minion/init_conf/etcd.conf b/cluster/ubuntu/minion/init_conf/etcd.conf deleted file mode 100644 index a1caaf36165..00000000000 --- a/cluster/ubuntu/minion/init_conf/etcd.conf +++ /dev/null @@ -1,31 +0,0 @@ -description "Etcd service" -author "@jainvipin" - -start on (net-device-up - and local-filesystems - and runlevel [2345]) - -respawn - -pre-start script - # see also https://github.com/jainvipin/kubernetes-ubuntu-start - ETCD=/opt/bin/$UPSTART_JOB - if [ -f /etc/default/$UPSTART_JOB ]; then - . /etc/default/$UPSTART_JOB - fi - if [ -f $ETCD ]; then - exit 0 - fi - echo "$ETCD binary not found, exiting" - exit 22 -end script - -script - # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) - ETCD=/opt/bin/$UPSTART_JOB - ETCD_OPTS="" - if [ -f /etc/default/$UPSTART_JOB ]; then - . /etc/default/$UPSTART_JOB - fi - exec "$ETCD" $ETCD_OPTS -end script diff --git a/cluster/ubuntu/minion/init_conf/flanneld.conf b/cluster/ubuntu/minion/init_conf/flanneld.conf index 9be8c49d918..3cc59848a79 100644 --- a/cluster/ubuntu/minion/init_conf/flanneld.conf +++ b/cluster/ubuntu/minion/init_conf/flanneld.conf @@ -3,10 +3,9 @@ author "@chenxingyu" respawn -# start in conjunction with etcd -start on started etcd -stop on stopping etcd - +start on (net-device-up + and local-filesystems + and runlevel [2345]) pre-start script FLANNEL=/opt/bin/$UPSTART_JOB if [ -f /etc/default/$UPSTART_JOB ]; then diff --git a/cluster/ubuntu/minion/init_conf/kube-proxy.conf b/cluster/ubuntu/minion/init_conf/kube-proxy.conf index 16a79b0642a..4fed4e5643f 100644 --- a/cluster/ubuntu/minion/init_conf/kube-proxy.conf +++ b/cluster/ubuntu/minion/init_conf/kube-proxy.conf @@ -3,9 +3,9 @@ author "@jainvipin" respawn -# start in conjunction with etcd -start on started etcd -stop on stopping etcd +# start in conjunction with flanneld +start on started flanneld +stop on stopping flanneld limit nofile 65536 65536 diff --git a/cluster/ubuntu/minion/init_conf/kubelet.conf b/cluster/ubuntu/minion/init_conf/kubelet.conf index d4ab3b0cdf2..7031073c1b1 100644 --- a/cluster/ubuntu/minion/init_conf/kubelet.conf +++ b/cluster/ubuntu/minion/init_conf/kubelet.conf @@ -3,9 +3,9 @@ author "@jainvipin" respawn -# start in conjunction with etcd -start on started etcd -stop on stopping etcd +# start in conjunction with flanneld +start on started flanneld +stop on stopping flanneld pre-start script # see also https://github.com/jainvipin/kubernetes-ubuntu-start diff --git a/cluster/ubuntu/minion/init_scripts/etcd b/cluster/ubuntu/minion/init_scripts/etcd deleted file mode 100755 index 16fda422fb4..00000000000 --- a/cluster/ubuntu/minion/init_scripts/etcd +++ /dev/null @@ -1,100 +0,0 @@ -#!/bin/sh -set -e - -### BEGIN INIT INFO -# Provides: etcd -# Required-Start: $docker -# Required-Stop: -# Should-Start: -# Should-Stop: -# Default-Start: -# Default-Stop: -# Short-Description: Start distrubted key/value pair service -# Description: -# http://www.github.com/coreos/etcd -### END INIT INFO - -export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/opt/bin: - -BASE=$(basename $0) - -# modify these in /etc/default/$BASE (/etc/default/etcd) -ETCD=/opt/bin/$BASE -# This is the pid file managed by etcd itself -ETCD_PIDFILE=/var/run/$BASE.pid -ETCD_LOGFILE=/var/log/$BASE.log -ETCD_OPTS="" -ETCD_DESC="Etcd" - -# Get lsb functions -. /lib/lsb/init-functions - -if [ -f /etc/default/$BASE ]; then - . /etc/default/$BASE -fi - -# see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it) -if false && [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then - log_failure_msg "$ETCD_DESC is managed via upstart, try using service $BASE $1" - exit 1 -fi - -# Check etcd is present -if [ ! -x $ETCD ]; then - log_failure_msg "$ETCD not present or not executable" - exit 1 -fi - -fail_unless_root() { - if [ "$(id -u)" != '0' ]; then - log_failure_msg "$ETCD_DESC must be run as root" - exit 1 - fi -} - -ETCD_START="start-stop-daemon \ ---start \ ---background \ ---quiet \ ---exec $ETCD \ ---make-pidfile \ ---pidfile $ETCD_PIDFILE \ --- $ETCD_OPTS \ ->> $ETCD_LOGFILE 2>&1" - -ETCD_STOP="start-stop-daemon \ ---stop \ ---pidfile $ETCD_PIDFILE" - -case "$1" in - start) - fail_unless_root - log_begin_msg "Starting $ETCD_DESC: $BASE" - $ETCD_START - log_end_msg $? - ;; - - stop) - fail_unless_root - log_begin_msg "Stopping $ETCD_DESC: $BASE" - $ETCD_STOP - log_end_msg $? - ;; - - restart | force-reload) - fail_unless_root - log_begin_msg "Restarting $ETCD_DESC: $BASE" - $ETCD_STOP - $ETCD_START - log_end_msg $? - ;; - - status) - status_of_proc -p "$ETCD_PIDFILE" "$ETCD" "$ETCD_DESC" - ;; - - *) - echo "Usage: $0 {start|stop|restart|status}" - exit 1 - ;; -esac diff --git a/cluster/ubuntu/minion/init_scripts/kubelet b/cluster/ubuntu/minion/init_scripts/kubelet index 6fbfc362603..5c3540c2c1c 100755 --- a/cluster/ubuntu/minion/init_scripts/kubelet +++ b/cluster/ubuntu/minion/init_scripts/kubelet @@ -24,7 +24,7 @@ KUBELET=/opt/bin/$BASE KUBELET_PIDFILE=/var/run/$BASE.pid KUBELET_LOGFILE=/var/log/$BASE.log KUBELET_OPTS="" -KUBELET_DESC="Kube-Apiserver" +KUBELET_DESC="Kubelet" # Get lsb functions . /lib/lsb/init-functions diff --git a/cluster/ubuntu/reconfDocker.sh b/cluster/ubuntu/reconfDocker.sh index e0c1dc8db8f..067f46cea1f 100755 --- a/cluster/ubuntu/reconfDocker.sh +++ b/cluster/ubuntu/reconfDocker.sh @@ -21,33 +21,48 @@ if [ "$(id -u)" != "0" ]; then exit 1 fi -source ~/kube/config-default.sh -attempt=0 -while true; do - /opt/bin/etcdctl get /coreos.com/network/config - if [[ "$?" == 0 ]]; then - break - else - # enough timeout?? - if (( attempt > 600 )); then - echo "timeout for waiting network config" > ~/kube/err.log - exit 2 +function config_etcd { + + source ~/kube/config-default.sh + + attempt=0 + while true; do + /opt/bin/etcdctl get /coreos.com/network/config + if [[ "$?" == 0 ]]; then + break + else + # enough timeout?? + if (( attempt > 600 )); then + echo "timeout for waiting network config" > ~/kube/err.log + exit 2 + fi + + /opt/bin/etcdctl mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" + attempt=$((attempt+1)) + sleep 3 fi + done +} - /opt/bin/etcdctl mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" - attempt=$((attempt+1)) - sleep 3 - fi -done +function restart_docker { + #wait some secs for /run/flannel/subnet.env ready + sleep 15 + sudo ip link set dev docker0 down + sudo brctl delbr docker0 -#wait some secs for /run/flannel/subnet.env ready -sleep 15 -sudo ip link set dev docker0 down -sudo brctl delbr docker0 + source /run/flannel/subnet.env -source /run/flannel/subnet.env + echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock \ + --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker + sudo service docker restart +} -echo DOCKER_OPTS=\"${DOCKER_OPTS} -H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock \ - --bip=${FLANNEL_SUBNET} --mtu=${FLANNEL_MTU}\" > /etc/default/docker -sudo service docker restart +if [[ $1 == "i" ]]; then + restart_docker +elif [[ $1 == "ai" ]]; then + config_etcd + restart_docker +elif [[ $1 == "a" ]]; then + config_etcd +fi diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index 61e5e8dc85b..b5c98fb5ff2 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -21,7 +21,6 @@ SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=E # use an array to record name and ip declare -A mm -CLUSTER="" MASTER="" MASTER_IP="" MINION_IPS="" @@ -44,28 +43,18 @@ function setClusterInfo() { MINION_IPS="" ii=0 - for i in $nodes - do - name="infra"$ii + for i in $nodes; do nodeIP=${i#*@} - item="$name=http://$nodeIP:2380" - if [ "$ii" == 0 ]; then - CLUSTER=$item - else - CLUSTER="$CLUSTER,$item" - fi - mm[$nodeIP]=$name - - if [ "${roles[${ii}]}" == "ai" ]; then + if [[ "${roles[${ii}]}" == "ai" ]]; then MASTER_IP=$nodeIP MASTER=$i MINION_IPS="$nodeIP" - elif [ "${roles[${ii}]}" == "a" ]; then + elif [[ "${roles[${ii}]}" == "a" ]]; then MASTER_IP=$nodeIP MASTER=$i - elif [ "${roles[${ii}]}" == "i" ]; then - if [ -z "${MINION_IPS}" ];then + elif [[ "${roles[${ii}]}" == "i" ]]; then + if [[ -z "${MINION_IPS}" ]];then MINION_IPS="$nodeIP" else MINION_IPS="$MINION_IPS,$nodeIP" @@ -191,12 +180,9 @@ function verify-minion(){ function create-etcd-opts(){ cat < ~/kube/default/etcd -ETCD_OPTS="-name $1 \ - -initial-advertise-peer-urls http://$2:2380 \ - -listen-peer-urls http://$2:2380 \ - -initial-cluster-token etcd-cluster-1 \ - -initial-cluster $3 \ - -initial-cluster-state new" +ETCD_OPTS="-name infra +-listen-client-urls http://0.0.0.0:4001 \ +-advertise-client-urls http://127.0.0.1:4001" EOF } @@ -256,7 +242,7 @@ EOF function create-flanneld-opts(){ cat < ~/kube/default/flanneld -FLANNEL_OPTS="" +FLANNEL_OPTS="--etcd-endpoints=http://${1}:4001" EOF } @@ -324,10 +310,10 @@ function kube-up() { { if [ "${roles[${ii}]}" == "a" ]; then provision-master - elif [ "${roles[${ii}]}" == "i" ]; then - provision-minion $i elif [ "${roles[${ii}]}" == "ai" ]; then provision-masterandminion + elif [ "${roles[${ii}]}" == "i" ]; then + provision-minion $i else echo "unsupported role for ${i}. please check" exit 1 @@ -356,21 +342,22 @@ function provision-master() { echo "Deploying master on machine ${MASTER_IP}" echo ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default" - scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube" + scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/reconfDocker.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube" # remote login to MASTER and use sudo to configue k8s master ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ setClusterInfo; \ - create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ + create-etcd-opts; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ - create-flanneld-opts; \ + create-flanneld-opts "127.0.0.1"; \ sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ;\ sudo groupadd -f -r kube-cert; \ sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \ sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \ - sudo service etcd start;" + sudo service etcd start; \ + sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh "a";" } function provision-minion() { @@ -383,14 +370,13 @@ function provision-minion() { # remote login to MASTER and use sudo to configue k8s master ssh $SSH_OPTS -t $1 "source ~/kube/util.sh; \ setClusterInfo; \ - create-etcd-opts "${mm[${1#*@}]}" "${1#*@}" "${CLUSTER}"; \ - create-kubelet-opts "${1#*@}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; + create-kubelet-opts "${1#*@}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; \ create-kube-proxy-opts "${MASTER_IP}"; \ - create-flanneld-opts; \ + create-flanneld-opts "${MASTER_IP}"; \ sudo -p '[sudo] password to copy files and start minion: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \ && sudo mkdir -p /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin; \ - sudo service etcd start; \ - sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh" + sudo service flanneld start; \ + sudo -b ~/kube/reconfDocker.sh "i";" } function provision-masterandminion() { @@ -398,24 +384,25 @@ function provision-masterandminion() { echo "Deploying master and minion on machine ${MASTER_IP}" echo ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default" - scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube" + # scp order matters + scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/minion/* ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube" # remote login to the node and use sudo to configue k8s ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ setClusterInfo; \ - create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ + create-etcd-opts; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; create-kube-proxy-opts "${MASTER_IP}";\ - create-flanneld-opts; \ + create-flanneld-opts "127.0.0.1"; \ sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ; \ sudo groupadd -f -r kube-cert; \ sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \ sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \ sudo service etcd start; \ - sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh" + sudo FLANNEL_NET=${FLANNEL_NET} -b ~/kube/reconfDocker.sh "ai";" } # Delete a kubernetes cluster @@ -423,15 +410,26 @@ function kube-down { KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" + ii=0 for i in ${nodes}; do { echo "Cleaning on node ${i#*@}" - ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop && sudo rm -rf /infra*' - # Delete the files in order to generate a clean environment, so you can change each node's role at next deployment. - ssh -t $i 'sudo rm -f /opt/bin/kube* /etc/init/kube* /etc/init.d/kube* /etc/default/kube*; sudo rm -rf ~/kube /var/lib/kubelet' + if [[ "${roles[${ii}]}" == "ai" || "${roles[${ii}]}" == "a" ]]; then + ssh -t $i 'pgrep etcd && sudo -p "[sudo] password for cleaning etcd data: " service etcd stop && sudo rm -rf /infra*; + sudo rm -rf /opt/bin/etcd* /etc/init/etcd.conf /etc/init.d/etcd /etc/default/etcd' + elif [[ "${roles[${ii}]}" == "i" ]]; then + ssh -t $i 'pgrep flanneld && sudo -p "[sudo] password for stopping flanneld: " service flanneld stop' + else + echo "unsupported role for ${i}" + fi + # Delete the files in order to generate a clean environment, so you can change each node's role at next deployment. + ssh -t $i 'sudo rm -f /opt/bin/kube* /opt/bin/flanneld; + sudo rm -rf /etc/init/kube* /etc/init/flanneld.conf /etc/init.d/kube* /etc/init.d/flanneld; + sudo rm -rf /etc/default/kube* /etc/default/flanneld; + sudo rm -rf ~/kube /var/lib/kubelet' } + ((ii=ii+1)) done - wait } # Update a kubernetes cluster with latest source From 9ccceac5e8e5811aff95e3147e3c057d1505037a Mon Sep 17 00:00:00 2001 From: Dai Zuozhuo Date: Fri, 21 Aug 2015 15:25:56 +0800 Subject: [PATCH 066/101] sort jsonpath_test results --- pkg/util/jsonpath/jsonpath_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pkg/util/jsonpath/jsonpath_test.go b/pkg/util/jsonpath/jsonpath_test.go index 86384ac5867..fa181b34656 100644 --- a/pkg/util/jsonpath/jsonpath_test.go +++ b/pkg/util/jsonpath/jsonpath_test.go @@ -20,6 +20,9 @@ import ( "bytes" "encoding/json" "fmt" + "reflect" + "sort" + "strings" "testing" ) @@ -43,7 +46,12 @@ func testJSONPath(tests []jsonpathTest, t *testing.T) { t.Errorf("in %s, execute error %v", test.name, err) } out := buf.String() - if out != test.expect { + //since map is itereated in random order, we need to sort the results. + sortedOut := strings.Fields(out) + sort.Strings(sortedOut) + sortedExpect := strings.Fields(test.expect) + sort.Strings(sortedExpect) + if !reflect.DeepEqual(sortedOut, sortedExpect) { t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out) } } From 2e2ef3e8309f7c46ab75147b27daa72294eafe4c Mon Sep 17 00:00:00 2001 From: Dai Zuozhuo Date: Fri, 21 Aug 2015 21:09:41 +0800 Subject: [PATCH 067/101] change -o template to -o go-template=... --- cluster/libvirt-coreos/util.sh | 2 +- .../salt/kube-addons/kube-addon-update.sh | 2 +- .../saltbase/salt/kube-addons/kube-addons.sh | 2 +- cluster/update-storage-objects.sh | 4 +-- cluster/vagrant/util.sh | 2 +- docs/man/man1/kubectl-config-view.1 | 9 ++++--- docs/man/man1/kubectl-expose.1 | 9 ++++--- docs/man/man1/kubectl-get.1 | 9 ++++--- docs/man/man1/kubectl-label.1 | 9 ++++--- docs/man/man1/kubectl-rolling-update.1 | 9 ++++--- docs/man/man1/kubectl-run.1 | 9 ++++--- docs/user-guide/compute-resources.md | 4 +-- docs/user-guide/kubectl/kubectl_annotate.md | 2 +- .../user-guide/kubectl/kubectl_config_view.md | 6 ++--- docs/user-guide/kubectl/kubectl_expose.md | 6 ++--- docs/user-guide/kubectl/kubectl_get.md | 8 +++--- docs/user-guide/kubectl/kubectl_label.md | 6 ++--- .../kubectl/kubectl_rolling-update.md | 6 ++--- docs/user-guide/kubectl/kubectl_run.md | 6 ++--- docs/user-guide/production-pods.md | 4 +-- docs/user-guide/walkthrough/README.md | 2 +- docs/user-guide/walkthrough/k8s201.md | 4 +-- examples/k8petstore/k8petstore-nodeport.sh | 8 +++--- hack/lib/test.sh | 4 +-- hack/test-cmd.sh | 4 +-- pkg/kubectl/cmd/get.go | 2 +- pkg/kubectl/cmd/util/printing.go | 13 +++++++-- pkg/kubectl/resource_printer.go | 18 ++++++++++--- pkg/util/jsonpath/jsonpath_test.go | 27 +++++++++++++++++-- test/e2e/util.go | 2 +- 30 files changed, 124 insertions(+), 74 deletions(-) diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh index 3a520b346f7..fec9bdab667 100644 --- a/cluster/libvirt-coreos/util.sh +++ b/cluster/libvirt-coreos/util.sh @@ -195,7 +195,7 @@ function wait-cluster-readiness { local timeout=120 while [[ $timeout -ne 0 ]]; do - nb_ready_minions=$("${kubectl}" get nodes -o template -t "{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true) + nb_ready_minions=$("${kubectl}" get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true) echo "Nb ready minions: $nb_ready_minions / $NUM_MINIONS" if [[ "$nb_ready_minions" -eq "$NUM_MINIONS" ]]; then return 0 diff --git a/cluster/saltbase/salt/kube-addons/kube-addon-update.sh b/cluster/saltbase/salt/kube-addons/kube-addon-update.sh index 0caa4c0a8b8..6b61b9ad11f 100755 --- a/cluster/saltbase/salt/kube-addons/kube-addon-update.sh +++ b/cluster/saltbase/salt/kube-addons/kube-addon-update.sh @@ -198,7 +198,7 @@ function run-until-success() { # returns a list of / pairs (nsnames) function get-addon-nsnames-from-server() { local -r obj_type=$1 - "${KUBECTL}" get "${obj_type}" --all-namespaces -o template -t "{{range.items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}" --api-version=v1 -l kubernetes.io/cluster-service=true + "${KUBECTL}" get "${obj_type}" --all-namespaces -o go-template="{{range.items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}" --api-version=v1 -l kubernetes.io/cluster-service=true } # returns the characters after the last separator (including) diff --git a/cluster/saltbase/salt/kube-addons/kube-addons.sh b/cluster/saltbase/salt/kube-addons/kube-addons.sh index 45b1ee42b2a..c9fd9a0ea90 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addons.sh +++ b/cluster/saltbase/salt/kube-addons/kube-addons.sh @@ -174,7 +174,7 @@ start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" & token_found="" while [ -z "${token_found}" ]; do sleep .5 - token_found=$(${KUBECTL} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o template -t "{{with index .secrets 0}}{{.name}}{{end}}" || true) + token_found=$(${KUBECTL} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o go-template="{{with index .secrets 0}}{{.name}}{{end}}" || true) done echo "== default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} ==" diff --git a/cluster/update-storage-objects.sh b/cluster/update-storage-objects.sh index e4c0c830452..d6789929bea 100755 --- a/cluster/update-storage-objects.sh +++ b/cluster/update-storage-objects.sh @@ -49,7 +49,7 @@ declare -a resources=( ) # Find all the namespaces. -namespaces=( $("${KUBECTL}" get namespaces -o template -t "{{range.items}}{{.metadata.name}} {{end}}")) +namespaces=( $("${KUBECTL}" get namespaces -o go-template="{{range.items}}{{.metadata.name}} {{end}}")) if [ -z "${namespaces:-}" ] then echo "Unexpected: No namespace found. Nothing to do." @@ -59,7 +59,7 @@ for resource in "${resources[@]}" do for namespace in "${namespaces[@]}" do - instances=( $("${KUBECTL}" get "${resource}" --namespace="${namespace}" -o template -t "{{range.items}}{{.metadata.name}} {{end}}")) + instances=( $("${KUBECTL}" get "${resource}" --namespace="${namespace}" -o go-template="{{range.items}}{{.metadata.name}} {{end}}")) # Nothing to do if there is no instance of that resource. if [[ -z "${instances:-}" ]] then diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index f0803411d37..f02097912e4 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -238,7 +238,7 @@ function verify-cluster { local count="0" until [[ "$count" == "1" ]]; do local minions - minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o template --template '{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1) + minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o go-template='{{range.items}}{{.metadata.name}}:{{end}}' --api-version=v1) count=$(echo $minions | grep -c "${MINION_IPS[i]}") || { printf "." sleep 2 diff --git a/docs/man/man1/kubectl-config-view.1 b/docs/man/man1/kubectl-config-view.1 index 25ab0462cac..5ada9c8bb33 100644 --- a/docs/man/man1/kubectl-config-view.1 +++ b/docs/man/man1/kubectl-config-view.1 @@ -38,7 +38,9 @@ You can use \-\-output=template \-\-template=TEMPLATE to extract specific values .PP \fB\-o\fP, \fB\-\-output\fP="" - Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + Output format. One of: json|yaml|wide|name|go\-template=...|go\-template\-file=...|jsonpath=...|jsonpath\-file=... See golang template [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]] and jsonpath template [ +\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]]. .PP \fB\-\-output\-version\fP="" @@ -58,9 +60,8 @@ You can use \-\-output=template \-\-template=TEMPLATE to extract specific values .PP \fB\-t\fP, \fB\-\-template\fP="" - Template string or path to template file to use when \-o=template, \-o=templatefile or \-o=jsonpath. The template format is golang templates [ -\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. The jsonpath template is composed of jsonpath expressions enclosed by {} [ -\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]] + Template string or path to template file to use when \-o=go\-template, \-o=go\-template\-file. The template format is golang templates [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. .SH OPTIONS INHERITED FROM PARENT COMMANDS diff --git a/docs/man/man1/kubectl-expose.1 b/docs/man/man1/kubectl-expose.1 index d148ffcaf0d..513310db4f2 100644 --- a/docs/man/man1/kubectl-expose.1 +++ b/docs/man/man1/kubectl-expose.1 @@ -60,7 +60,9 @@ re\-use the labels from the resource it exposes. .PP \fB\-o\fP, \fB\-\-output\fP="" - Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + Output format. One of: json|yaml|wide|name|go\-template=...|go\-template\-file=...|jsonpath=...|jsonpath\-file=... See golang template [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]] and jsonpath template [ +\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]]. .PP \fB\-\-output\-version\fP="" @@ -100,9 +102,8 @@ re\-use the labels from the resource it exposes. .PP \fB\-t\fP, \fB\-\-template\fP="" - Template string or path to template file to use when \-o=template, \-o=templatefile or \-o=jsonpath. The template format is golang templates [ -\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. The jsonpath template is composed of jsonpath expressions enclosed by {} [ -\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]] + Template string or path to template file to use when \-o=go\-template, \-o=go\-template\-file. The template format is golang templates [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. .PP \fB\-\-type\fP="" diff --git a/docs/man/man1/kubectl-get.1 b/docs/man/man1/kubectl-get.1 index 3c94adeb246..63ecbb104d6 100644 --- a/docs/man/man1/kubectl-get.1 +++ b/docs/man/man1/kubectl-get.1 @@ -45,7 +45,9 @@ of the \-\-template flag, you can filter the attributes of the fetched resource( .PP \fB\-o\fP, \fB\-\-output\fP="" - Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + Output format. One of: json|yaml|wide|name|go\-template=...|go\-template\-file=...|jsonpath=...|jsonpath\-file=... See golang template [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]] and jsonpath template [ +\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]]. .PP \fB\-\-output\-version\fP="" @@ -65,9 +67,8 @@ of the \-\-template flag, you can filter the attributes of the fetched resource( .PP \fB\-t\fP, \fB\-\-template\fP="" - Template string or path to template file to use when \-o=template, \-o=templatefile or \-o=jsonpath. The template format is golang templates [ -\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. The jsonpath template is composed of jsonpath expressions enclosed by {} [ -\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]] + Template string or path to template file to use when \-o=go\-template, \-o=go\-template\-file. The template format is golang templates [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. .PP \fB\-w\fP, \fB\-\-watch\fP=false diff --git a/docs/man/man1/kubectl-label.1 b/docs/man/man1/kubectl-label.1 index 98cf53ae948..56fadc3670a 100644 --- a/docs/man/man1/kubectl-label.1 +++ b/docs/man/man1/kubectl-label.1 @@ -40,7 +40,9 @@ If \-\-resource\-version is specified, then updates will use this resource versi .PP \fB\-o\fP, \fB\-\-output\fP="" - Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + Output format. One of: json|yaml|wide|name|go\-template=...|go\-template\-file=...|jsonpath=...|jsonpath\-file=... See golang template [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]] and jsonpath template [ +\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]]. .PP \fB\-\-output\-version\fP="" @@ -68,9 +70,8 @@ If \-\-resource\-version is specified, then updates will use this resource versi .PP \fB\-t\fP, \fB\-\-template\fP="" - Template string or path to template file to use when \-o=template, \-o=templatefile or \-o=jsonpath. The template format is golang templates [ -\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. The jsonpath template is composed of jsonpath expressions enclosed by {} [ -\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]] + Template string or path to template file to use when \-o=go\-template, \-o=go\-template\-file. The template format is golang templates [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. .SH OPTIONS INHERITED FROM PARENT COMMANDS diff --git a/docs/man/man1/kubectl-rolling-update.1 b/docs/man/man1/kubectl-rolling-update.1 index 32ae729db93..03cf264dc5e 100644 --- a/docs/man/man1/kubectl-rolling-update.1 +++ b/docs/man/man1/kubectl-rolling-update.1 @@ -44,7 +44,9 @@ existing replication controller and overwrite at least one (common) label in its .PP \fB\-o\fP, \fB\-\-output\fP="" - Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + Output format. One of: json|yaml|wide|name|go\-template=...|go\-template\-file=...|jsonpath=...|jsonpath\-file=... See golang template [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]] and jsonpath template [ +\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]]. .PP \fB\-\-output\-version\fP="" @@ -68,9 +70,8 @@ existing replication controller and overwrite at least one (common) label in its .PP \fB\-t\fP, \fB\-\-template\fP="" - Template string or path to template file to use when \-o=template, \-o=templatefile or \-o=jsonpath. The template format is golang templates [ -\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. The jsonpath template is composed of jsonpath expressions enclosed by {} [ -\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]] + Template string or path to template file to use when \-o=go\-template, \-o=go\-template\-file. The template format is golang templates [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. .PP \fB\-\-timeout\fP=5m0s diff --git a/docs/man/man1/kubectl-run.1 b/docs/man/man1/kubectl-run.1 index 5df0bb6c189..c332c0adfd0 100644 --- a/docs/man/man1/kubectl-run.1 +++ b/docs/man/man1/kubectl-run.1 @@ -52,7 +52,9 @@ Creates a replication controller to manage the created container(s). .PP \fB\-o\fP, \fB\-\-output\fP="" - Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + Output format. One of: json|yaml|wide|name|go\-template=...|go\-template\-file=...|jsonpath=...|jsonpath\-file=... See golang template [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]] and jsonpath template [ +\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]]. .PP \fB\-\-output\-version\fP="" @@ -88,9 +90,8 @@ Creates a replication controller to manage the created container(s). .PP \fB\-t\fP, \fB\-\-template\fP="" - Template string or path to template file to use when \-o=template, \-o=templatefile or \-o=jsonpath. The template format is golang templates [ -\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. The jsonpath template is composed of jsonpath expressions enclosed by {} [ -\[la]http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md\[ra]] + Template string or path to template file to use when \-o=go\-template, \-o=go\-template\-file. The template format is golang templates [ +\[la]http://golang.org/pkg/text/template/#pkg-overview\[ra]]. .PP \fB\-\-tty\fP=false diff --git a/docs/user-guide/compute-resources.md b/docs/user-guide/compute-resources.md index 0ff1442daa4..7ee8a730730 100644 --- a/docs/user-guide/compute-resources.md +++ b/docs/user-guide/compute-resources.md @@ -250,10 +250,10 @@ Events: The `Restart Count: 5` indicates that the `simmemleak` container in this pod was terminated and restarted 5 times. -You can call `get pod` with the `-o template -t ...` option to fetch the status of previously terminated containers: +You can call `get pod` with the `-o go-template=...` option to fetch the status of previously terminated containers: ```console -[13:59:01] $ ./cluster/kubectl.sh get pod -o template -t '{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-60xbc +[13:59:01] $ ./cluster/kubectl.sh get pod -o go-template='{{range.status.containerStatuses}}{{"Container Name: "}}{{.name}}{{"\r\nLastState: "}}{{.lastState}}{{end}}' simmemleak-60xbc Container Name: simmemleak LastState: map[terminated:map[exitCode:137 reason:OOM Killed startedAt:2015-07-07T20:58:43Z finishedAt:2015-07-07T20:58:43Z containerID:docker://0e4095bba1feccdfe7ef9fb6ebffe972b4b14285d5acdec6f0d3ae8a22fad8b2]][13:59:03] clusterScaleDoc ~/go/src/github.com/kubernetes/kubernetes $ ``` diff --git a/docs/user-guide/kubectl/kubectl_annotate.md b/docs/user-guide/kubectl/kubectl_annotate.md index b185ff2c5e5..21909136b72 100644 --- a/docs/user-guide/kubectl/kubectl_annotate.md +++ b/docs/user-guide/kubectl/kubectl_annotate.md @@ -119,7 +119,7 @@ $ kubectl annotate pods foo description- * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474197531 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-02 06:24:17.720533039 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_annotate.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_view.md b/docs/user-guide/kubectl/kubectl_config_view.md index d92a26f1646..bbfadeb2d91 100644 --- a/docs/user-guide/kubectl/kubectl_config_view.md +++ b/docs/user-guide/kubectl/kubectl_config_view.md @@ -63,12 +63,12 @@ $ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2 --merge=true: merge together the full hierarchy of kubeconfig files --minify[=false]: remove all information not used by current-context from the output --no-headers[=false]: When using the default output, don't print headers. - -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. --output-version="": Output the formatted object with the given version (default api-version). --raw[=false]: display raw byte data -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. - --template="": Template string or path to template file to use when -o=template, -o=templatefile or -o=jsonpath. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. The jsonpath template is composed of jsonpath expressions enclosed by {} [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md] + --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. ``` ### Options inherited from parent commands @@ -103,7 +103,7 @@ $ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2 * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474467216 +0000 UTC +###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.775349034 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_view.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_expose.md b/docs/user-guide/kubectl/kubectl_expose.md index 2d0531dada0..397de0db7a2 100644 --- a/docs/user-guide/kubectl/kubectl_expose.md +++ b/docs/user-guide/kubectl/kubectl_expose.md @@ -75,7 +75,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream -l, --labels="": Labels to apply to the service created by this call. --name="": The name for the newly created object. --no-headers[=false]: When using the default output, don't print headers. - -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. --output-version="": Output the formatted object with the given version (default api-version). --overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. --port=-1: The port that the service should serve on. Copied from the resource being exposed, if unspecified @@ -85,7 +85,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. --target-port="": Name or number for the port on the container that the service should direct traffic to. Optional. - --template="": Template string or path to template file to use when -o=template, -o=templatefile or -o=jsonpath. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. The jsonpath template is composed of jsonpath expressions enclosed by {} [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md] + --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --type="": Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is 'ClusterIP'. ``` @@ -121,7 +121,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.473647619 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-03 09:05:42.928698484 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_get.md b/docs/user-guide/kubectl/kubectl_get.md index c9eed590858..659f6ce7b78 100644 --- a/docs/user-guide/kubectl/kubectl_get.md +++ b/docs/user-guide/kubectl/kubectl_get.md @@ -49,7 +49,7 @@ By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s). ``` -kubectl get [(-o|--output=)json|yaml|template|templatefile|wide|jsonpath|...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags] +kubectl get [(-o|--output=)json|yaml|wide|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags] ``` ### Examples @@ -90,12 +90,12 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to get from a server. -L, --label-columns=[]: Accepts a comma separated list of labels that are going to be presented as columns. Names are case-sensitive. You can also use multiple flag statements like -L label1 -L label2... --no-headers[=false]: When using the default output, don't print headers. - -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. --output-version="": Output the formatted object with the given version (default api-version). -l, --selector="": Selector (label query) to filter on -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. - --template="": Template string or path to template file to use when -o=template, -o=templatefile or -o=jsonpath. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. The jsonpath template is composed of jsonpath expressions enclosed by {} [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md] + --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. -w, --watch[=false]: After listing/getting the requested object, watch for changes. --watch-only[=false]: Watch for changes to the requested object(s), without listing/getting first. ``` @@ -132,7 +132,7 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469014739 +0000 UTC +###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.761418557 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_get.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_label.md b/docs/user-guide/kubectl/kubectl_label.md index d521ae3e3b4..1b0ee3119bc 100644 --- a/docs/user-guide/kubectl/kubectl_label.md +++ b/docs/user-guide/kubectl/kubectl_label.md @@ -78,14 +78,14 @@ $ kubectl label pods foo bar- --dry-run[=false]: If true, only print the object that would be sent, without sending it. -f, --filename=[]: Filename, directory, or URL to a file identifying the resource to update the labels --no-headers[=false]: When using the default output, don't print headers. - -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. --output-version="": Output the formatted object with the given version (default api-version). --overwrite[=false]: If true, allow labels to be overwritten, otherwise reject label updates that overwrite existing labels. --resource-version="": If non-empty, the labels update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource. -l, --selector="": Selector (label query) to filter on -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. - --template="": Template string or path to template file to use when -o=template, -o=templatefile or -o=jsonpath. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. The jsonpath template is composed of jsonpath expressions enclosed by {} [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md] + --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. ``` ### Options inherited from parent commands @@ -120,7 +120,7 @@ $ kubectl label pods foo bar- * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-04 23:19:55.649428669 +0000 UTC +###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.773776248 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_label.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_rolling-update.md b/docs/user-guide/kubectl/kubectl_rolling-update.md index 4d8e38bd48d..26aea54d971 100644 --- a/docs/user-guide/kubectl/kubectl_rolling-update.md +++ b/docs/user-guide/kubectl/kubectl_rolling-update.md @@ -74,13 +74,13 @@ $ kubectl rolling-update frontend --image=image:v2 -f, --filename=[]: Filename or URL to file to use to create the new replication controller. --image="": Image to use for upgrading the replication controller. Can not be used with --filename/-f --no-headers[=false]: When using the default output, don't print headers. - -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. --output-version="": Output the formatted object with the given version (default api-version). --poll-interval=3s: Time delay between polling for replication controller status after the update. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". --rollback[=false]: If true, this is a request to abort an existing rollout that is partially rolled out. It effectively reverses current and next and runs a rollout -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. - --template="": Template string or path to template file to use when -o=template, -o=templatefile or -o=jsonpath. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. The jsonpath template is composed of jsonpath expressions enclosed by {} [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md] + --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --timeout=5m0s: Max time to wait for a replication controller to update before giving up. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". --update-period=1m0s: Time to wait between updating pods. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". --validate[=true]: If true, use a schema to validate the input before sending it @@ -118,7 +118,7 @@ $ kubectl rolling-update frontend --image=image:v2 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470878033 +0000 UTC +###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.768458355 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_run.md b/docs/user-guide/kubectl/kubectl_run.md index ecfdcea2812..b9a664dd750 100644 --- a/docs/user-guide/kubectl/kubectl_run.md +++ b/docs/user-guide/kubectl/kubectl_run.md @@ -81,7 +81,7 @@ $ kubectl run nginx --image=nginx --command -- ... --image="": The image for the container to run. -l, --labels="": Labels to apply to the pod(s). --no-headers[=false]: When using the default output, don't print headers. - -o, --output="": Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name. + -o, --output="": Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]. --output-version="": Output the formatted object with the given version (default api-version). --overrides="": An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field. --port=-1: The port that this container exposes. @@ -90,7 +90,7 @@ $ kubectl run nginx --image=nginx --command -- ... -a, --show-all[=false]: When printing, show all resources (default hide terminated pods.) --sort-by="": If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string. -i, --stdin[=false]: Keep stdin open on the container(s) in the pod, even if nothing is attached. - --template="": Template string or path to template file to use when -o=template, -o=templatefile or -o=jsonpath. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. The jsonpath template is composed of jsonpath expressions enclosed by {} [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md] + --template="": Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. --tty[=false]: Allocated a TTY for each container in the pod. Because -t is currently shorthand for --template, -t is not supported for --tty. This shorthand is deprecated and we expect to adopt -t for --tty soon. ``` @@ -126,7 +126,7 @@ $ kubectl run nginx --image=nginx --command -- ... * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.472292491 +0000 UTC +###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.772003236 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_run.md?pixel)]() diff --git a/docs/user-guide/production-pods.md b/docs/user-guide/production-pods.md index 1e7c0219bf6..75bf31edd35 100644 --- a/docs/user-guide/production-pods.md +++ b/docs/user-guide/production-pods.md @@ -371,9 +371,9 @@ The message is recorded along with the other state of the last (i.e., most recen $ kubectl create -f ./pod.yaml pods/pod-w-message $ sleep 70 -$ kubectl get pods/pod-w-message -o template -t "{{range .status.containerStatuses}}{{.lastState.terminated.message}}{{end}}" +$ kubectl get pods/pod-w-message -o go-template="{{range .status.containerStatuses}}{{.lastState.terminated.message}}{{end}}" Sleep expired -$ kubectl get pods/pod-w-message -o template -t "{{range .status.containerStatuses}}{{.lastState.terminated.exitCode}}{{end}}" +$ kubectl get pods/pod-w-message -o go-template="{{range .status.containerStatuses}}{{.lastState.terminated.exitCode}}{{end}}" 0 ``` diff --git a/docs/user-guide/walkthrough/README.md b/docs/user-guide/walkthrough/README.md index fd5e5f92f01..0740e1674f2 100644 --- a/docs/user-guide/walkthrough/README.md +++ b/docs/user-guide/walkthrough/README.md @@ -108,7 +108,7 @@ On most providers, the pod IPs are not externally accessible. The easiest way to Provided the pod IP is accessible, you should be able to access its http endpoint with curl on port 80: ```sh -$ curl http://$(kubectl get pod nginx -o=template -t={{.status.podIP}}) +$ curl http://$(kubectl get pod nginx -o go-template={{.status.podIP}}) ``` Delete the pod by name: diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index 46b99f6d036..24f9c1d8001 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -217,8 +217,8 @@ On most providers, the service IPs are not externally accessible. The easiest wa Provided the service IP is accessible, you should be able to access its http endpoint with curl on port 80: ```console -$ export SERVICE_IP=$(kubectl get service nginx-service -o=template -t={{.spec.clusterIP}}) -$ export SERVICE_PORT=$(kubectl get service nginx-service -o=template '-t={{(index .spec.ports 0).port}}') +$ export SERVICE_IP=$(kubectl get service nginx-service -o go-template={{.spec.clusterIP}}) +$ export SERVICE_PORT=$(kubectl get service nginx-service -o go-template'={{(index .spec.ports 0).port}}') $ curl http://${SERVICE_IP}:${SERVICE_PORT} ``` diff --git a/examples/k8petstore/k8petstore-nodeport.sh b/examples/k8petstore/k8petstore-nodeport.sh index 07b2cd1d2db..0931b2d8be4 100755 --- a/examples/k8petstore/k8petstore-nodeport.sh +++ b/examples/k8petstore/k8petstore-nodeport.sh @@ -232,10 +232,10 @@ $kubectl create -f bps-load-gen-rc.json --namespace=$NS #Get the IP addresses of all Kubernetes nodes. function getIP { #currently this script is only tested on GCE. The following line may need to be updated if k8s is not running on a cloud platform - NODES_IP=$($kubectl get nodes -t='{{range .items}}{{range .status.addresses}}{{if or (eq .type "ExternalIP") (eq .type "LegacyHostIP")}}{{.address}}{{print "\n"}}{{end}}{{end}}{{end}}') - TEST_IP=$($kubectl get nodes -t='{{range (index .items 0).status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}') + NODES_IP=$($kubectl get nodes -o go-template='{{range .items}}{{range .status.addresses}}{{if or (eq .type "ExternalIP") (eq .type "LegacyHostIP")}}{{.address}}{{print "\n"}}{{end}}{{end}}{{end}}') + TEST_IP=$($kubectl get nodes -o go-template='{{range (index .items 0).status.addresses}}{{if eq .type "ExternalIP"}}{{.address}}{{end}}{{end}}') if [ -z "$TEST_IP" ]; then - TEST_IP=$($kubectl get nodes -t='{{range (index .items 0).status.addresses}}{{if eq .type "LegacyHostIP"}}{{.address}}{{end}}{{end}}') + TEST_IP=$($kubectl get nodes -o go-template='{{range (index .items 0).status.addresses}}{{if eq .type "LegacyHostIP"}}{{.address}}{{end}}{{end}}') fi if [ -z "$NODES_IP" ]; then echo "Error: Can't get node's IP!!!" @@ -249,7 +249,7 @@ function getIP { } function getNodePort { -NODE_PORT=$($kubectl get services/frontend -t='{{(index .spec.ports 0).nodePort}}') +NODE_PORT=$($kubectl get services/frontend -o go-template='{{(index .spec.ports 0).nodePort}}') if [ -z "$NODE_PORT" ]; then echo "Error: Can't get NodePort of services/frontend!!!" exit 1 diff --git a/hack/lib/test.sh b/hack/lib/test.sh index 907c2ca0050..9cda34ea56b 100644 --- a/hack/lib/test.sh +++ b/hack/lib/test.sh @@ -31,7 +31,7 @@ kube::test::get_object_assert() { local request=$2 local expected=$3 - res=$(eval kubectl get "${kube_flags[@]}" $object -o template -t \"$request\") + res=$(eval kubectl get "${kube_flags[@]}" $object -o go-template=\"$request\") if [[ "$res" =~ ^$expected$ ]]; then echo -n ${green} @@ -56,7 +56,7 @@ kube::test::get_object_jsonpath_assert() { local request=$2 local expected=$3 - res=$(eval kubectl get "${kube_flags[@]}" $object -o jsonpath -t \"$request\") + res=$(eval kubectl get "${kube_flags[@]}" $object -o jsonpath=\"$request\") if [[ "$res" =~ ^$expected$ ]]; then echo -n ${green} diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 7ae9c01a863..6209b39ebc2 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -163,14 +163,14 @@ runTests() { -s "http://127.0.0.1:${API_PORT}" --match-server-version ) - [ "$(kubectl get nodes -t '{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ] + [ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ] else kube_flags=( -s "http://127.0.0.1:${API_PORT}" --match-server-version --api-version="${version}" ) - [ "$(kubectl get nodes -t '{{ .apiVersion }}' "${kube_flags[@]}")" == "${version}" ] + [ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "${version}" ] fi id_field=".metadata.name" labels_field=".metadata.labels" diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go index 097183c02fb..e0780a5b2bd 100644 --- a/pkg/kubectl/cmd/get.go +++ b/pkg/kubectl/cmd/get.go @@ -79,7 +79,7 @@ func NewCmdGet(f *cmdutil.Factory, out io.Writer) *cobra.Command { options := &GetOptions{} cmd := &cobra.Command{ - Use: "get [(-o|--output=)json|yaml|template|templatefile|wide|jsonpath|...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags]", + Use: "get [(-o|--output=)json|yaml|wide|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=...] (TYPE [NAME | -l label] | TYPE/NAME ...) [flags]", Short: "Display one or many resources", Long: get_long, Example: get_example, diff --git a/pkg/kubectl/cmd/util/printing.go b/pkg/kubectl/cmd/util/printing.go index 1d709e3bd69..5205998e39a 100644 --- a/pkg/kubectl/cmd/util/printing.go +++ b/pkg/kubectl/cmd/util/printing.go @@ -19,6 +19,7 @@ package util import ( "fmt" "io" + "strings" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/kubectl" @@ -28,12 +29,12 @@ import ( // AddPrinterFlags adds printing related flags to a command (e.g. output format, no headers, template path) func AddPrinterFlags(cmd *cobra.Command) { - cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|template|templatefile|wide|jsonpath|name.") + cmd.Flags().StringP("output", "o", "", "Output format. One of: json|yaml|wide|name|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md].") cmd.Flags().String("output-version", "", "Output the formatted object with the given version (default api-version).") cmd.Flags().Bool("no-headers", false, "When using the default output, don't print headers.") // template shorthand -t is deprecated to support -t for --tty // TODO: remove template flag shorthand -t - cmd.Flags().StringP("template", "t", "", "Template string or path to template file to use when -o=template, -o=templatefile or -o=jsonpath. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview]. The jsonpath template is composed of jsonpath expressions enclosed by {} [http://releases.k8s.io/HEAD/docs/user-guide/jsonpath.md]") + cmd.Flags().StringP("template", "t", "", "Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].") cmd.Flags().MarkShorthandDeprecated("template", "please use --template instead") cmd.Flags().String("sort-by", "", "If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. 'ObjectMeta.Name'). The field in the API resource specified by this JSONPath expression must be an integer or a string.") cmd.Flags().BoolP("show-all", "a", false, "When printing, show all resources (default hide terminated pods.)") @@ -94,6 +95,14 @@ func PrinterForCommand(cmd *cobra.Command) (kubectl.ResourcePrinter, bool, error outputFormat = "template" } + templateFormat := []string{"go-template=", "go-template-file=", "jsonpath=", "jsonpath-file="} + for _, format := range templateFormat { + if strings.HasPrefix(outputFormat, format) { + templateFile = outputFormat[len(format):] + outputFormat = format[:len(format)-1] + } + } + printer, generic, err := kubectl.GetPrinter(outputFormat, templateFile) if err != nil { return nil, generic, err diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index e95c859fbf0..fc9ea6b58dd 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -57,7 +57,7 @@ func GetPrinter(format, formatArgument string) (ResourcePrinter, bool, error) { printer = &YAMLPrinter{} case "name": printer = &NamePrinter{} - case "template": + case "template", "go-template": if len(formatArgument) == 0 { return nil, false, fmt.Errorf("template format specified but no template given") } @@ -66,7 +66,7 @@ func GetPrinter(format, formatArgument string) (ResourcePrinter, bool, error) { if err != nil { return nil, false, fmt.Errorf("error parsing template %s, %v\n", formatArgument, err) } - case "templatefile": + case "templatefile", "go-template-file": if len(formatArgument) == 0 { return nil, false, fmt.Errorf("templatefile format specified but no template file given") } @@ -80,13 +80,25 @@ func GetPrinter(format, formatArgument string) (ResourcePrinter, bool, error) { } case "jsonpath": if len(formatArgument) == 0 { - return nil, false, fmt.Errorf("jsonpath format specified but no jsonpath template given") + return nil, false, fmt.Errorf("jsonpath template format specified but no template given") } var err error printer, err = NewJSONPathPrinter(formatArgument) if err != nil { return nil, false, fmt.Errorf("error parsing jsonpath %s, %v\n", formatArgument, err) } + case "jsonpath-file": + if len(formatArgument) == 0 { + return nil, false, fmt.Errorf("jsonpath file format specified but no template file file given") + } + data, err := ioutil.ReadFile(formatArgument) + if err != nil { + return nil, false, fmt.Errorf("error reading template %s, %v\n", formatArgument, err) + } + printer, err = NewJSONPathPrinter(string(data)) + if err != nil { + return nil, false, fmt.Errorf("error parsing template %s, %v\n", string(data), err) + } case "wide": fallthrough case "": diff --git a/pkg/util/jsonpath/jsonpath_test.go b/pkg/util/jsonpath/jsonpath_test.go index fa181b34656..9922a8fee21 100644 --- a/pkg/util/jsonpath/jsonpath_test.go +++ b/pkg/util/jsonpath/jsonpath_test.go @@ -34,6 +34,26 @@ type jsonpathTest struct { } func testJSONPath(tests []jsonpathTest, t *testing.T) { + for _, test := range tests { + j := New(test.name) + err := j.Parse(test.template) + if err != nil { + t.Errorf("in %s, parse %s error %v", test.name, test.template, err) + } + buf := new(bytes.Buffer) + err = j.Execute(buf, test.input) + if err != nil { + t.Errorf("in %s, execute error %v", test.name, err) + } + out := buf.String() + if out != test.expect { + t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out) + } + } +} + +// testJSONPathSortOutput test testcases related to map, the results may print in random order +func testJSONPathSortOutput(tests []jsonpathTest, t *testing.T) { for _, test := range tests { j := New(test.name) err := j.Parse(test.template) @@ -218,8 +238,6 @@ func TestKubenates(t *testing.T) { `127.0.0.1 127.0.0.2 127.0.0.3`}, {"double range", "{range .items[*]}{range .status.addresses[*]}{.address}, {end}{end}", nodesData, `127.0.0.1, 127.0.0.2, 127.0.0.3, `}, - // TODO: fix & uncomment the case bellow (#13024) - // {"recursive name", "{..name}", nodesData, `127.0.0.1 127.0.0.2 myself e2e`}, {"item name", "{.items[*].metadata.name}", nodesData, `127.0.0.1 127.0.0.2`}, {"union nodes capacity", "{.items[*]['metadata.name', 'status.capacity']}", nodesData, `127.0.0.1 127.0.0.2 map[cpu:4] map[cpu:8]`}, @@ -228,4 +246,9 @@ func TestKubenates(t *testing.T) { {"user password", `{.users[?(@.name=="e2e")].user.password}`, &nodesData, "secret"}, } testJSONPath(nodesTests, t) + + randomPrintOrderTests := []jsonpathTest{ + {"recursive name", "{..name}", nodesData, `127.0.0.1 127.0.0.2 myself e2e`}, + } + testJSONPathSortOutput(randomPrintOrderTests, t) } diff --git a/test/e2e/util.go b/test/e2e/util.go index 02ffdb4c513..59f4157abf6 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -853,7 +853,7 @@ func cleanup(filePath string, ns string, selectors ...string) { if resources != "" { Failf("Resources left running after stop:\n%s", resources) } - pods := runKubectl("get", "pods", "-l", selector, nsArg, "-t", "{{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") + pods := runKubectl("get", "pods", "-l", selector, nsArg, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}") if pods != "" { Failf("Pods left unterminated after stop:\n%s", pods) } From cf908df89c302bae90e8b4a686fff31539a4c9dd Mon Sep 17 00:00:00 2001 From: James DeFelice Date: Sun, 6 Sep 2015 00:31:39 +0000 Subject: [PATCH 068/101] add flags for initial executor cpu and memory resources --- contrib/mesos/pkg/scheduler/service/service.go | 15 ++++++++++----- hack/verify-flags/known-flags.txt | 2 ++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/contrib/mesos/pkg/scheduler/service/service.go b/contrib/mesos/pkg/scheduler/service/service.go index e898fcf72cb..3504238c2c8 100644 --- a/contrib/mesos/pkg/scheduler/service/service.go +++ b/contrib/mesos/pkg/scheduler/service/service.go @@ -75,9 +75,8 @@ const ( defaultReconcileInterval = 300 // 5m default task reconciliation interval defaultReconcileCooldown = 15 * time.Second defaultFrameworkName = "Kubernetes" - - executorCPUs = mresource.CPUShares(0.25) // initial CPU allocated for executor - executorMem = mresource.MegaBytes(64.0) // initial memory allocated for executor + defaultExecutorCPUs = mresource.CPUShares(0.25) // initial CPU allocated for executor + defaultExecutorMem = mresource.MegaBytes(128.0) // initial memory allocated for executor ) type SchedulerServer struct { @@ -97,6 +96,8 @@ type SchedulerServer struct { MesosAuthPrincipal string MesosAuthSecretFile string MesosCgroupPrefix string + MesosExecutorCPUs mresource.CPUShares + MesosExecutorMem mresource.MegaBytes Checkpoint bool FailoverTimeout float64 @@ -177,6 +178,8 @@ func NewSchedulerServer() *SchedulerServer { MesosCgroupPrefix: minioncfg.DefaultCgroupPrefix, MesosMaster: defaultMesosMaster, MesosUser: defaultMesosUser, + MesosExecutorCPUs: defaultExecutorCPUs, + MesosExecutorMem: defaultExecutorMem, ReconcileInterval: defaultReconcileInterval, ReconcileCooldown: defaultReconcileCooldown, Checkpoint: true, @@ -221,6 +224,8 @@ func (s *SchedulerServer) addCoreFlags(fs *pflag.FlagSet) { fs.StringVar(&s.MesosAuthProvider, "mesos-authentication-provider", s.MesosAuthProvider, fmt.Sprintf("Authentication provider to use, default is SASL that supports mechanisms: %+v", mech.ListSupported())) fs.StringVar(&s.DockerCfgPath, "dockercfg-path", s.DockerCfgPath, "Path to a dockercfg file that will be used by the docker instance of the minions.") fs.StringVar(&s.MesosCgroupPrefix, "mesos-cgroup-prefix", s.MesosCgroupPrefix, "The cgroup prefix concatenated with MESOS_DIRECTORY must give the executor cgroup set by Mesos") + fs.Var(&s.MesosExecutorCPUs, "mesos-executor-cpus", "Initial CPU shares to allocate for each Mesos executor container.") + fs.Var(&s.MesosExecutorMem, "mesos-executor-mem", "Initial memory (MB) to allocate for each Mesos executor container.") fs.BoolVar(&s.Checkpoint, "checkpoint", s.Checkpoint, "Enable/disable checkpointing for the kubernetes-mesos framework.") fs.Float64Var(&s.FailoverTimeout, "failover-timeout", s.FailoverTimeout, fmt.Sprintf("Framework failover timeout, in sec.")) fs.UintVar(&s.DriverPort, "driver-port", s.DriverPort, "Port that the Mesos scheduler driver process should listen on.") @@ -452,8 +457,8 @@ func (s *SchedulerServer) prepareExecutorInfo(hks hyperkube.Interface) (*mesos.E } execInfo.Resources = []*mesos.Resource{ - mutil.NewScalarResource("cpus", float64(executorCPUs)+staticPodCPUs), - mutil.NewScalarResource("mem", float64(executorMem)+staticPodMem), + mutil.NewScalarResource("cpus", float64(s.MesosExecutorCPUs)+staticPodCPUs), + mutil.NewScalarResource("mem", float64(s.MesosExecutorMem)+staticPodMem), } // calculate ExecutorInfo hash to be used for validating compatibility diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index c53a47838f5..26a3763ebae 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -158,6 +158,8 @@ mesos-authentication-principal mesos-authentication-provider mesos-authentication-secret-file mesos-cgroup-prefix +mesos-executor-cpus +mesos-executor-mem mesos-master mesos-role mesos-user From 33b4cb91c149c9cc56384e62b6e3d546ccb36897 Mon Sep 17 00:00:00 2001 From: qiaolei Date: Sun, 6 Sep 2015 18:54:23 +0800 Subject: [PATCH 069/101] Delete an useless line in liveness Delete an useless line in liveness: `mwielgus@mwielgusd:~/test/k2/kubernetes/examples/liveness$ kubectl get pods` --- docs/user-guide/liveness/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/user-guide/liveness/README.md b/docs/user-guide/liveness/README.md index f894a97aa17..fd467fdd00c 100644 --- a/docs/user-guide/liveness/README.md +++ b/docs/user-guide/liveness/README.md @@ -96,7 +96,6 @@ Check the status half a minute later, you will see the container restart count b ```console $ kubectl get pods -mwielgus@mwielgusd:~/test/k2/kubernetes/examples/liveness$ kubectl get pods NAME READY STATUS RESTARTS AGE [...] liveness-exec 1/1 Running 1 36s From 014fa231de63eaeefcbe081385261ee4bb5d52c7 Mon Sep 17 00:00:00 2001 From: Harry Zhang Date: Sun, 6 Sep 2015 18:28:18 +0800 Subject: [PATCH 070/101] Specify how to do NFS demo for other providers Many people asked me how to allow privileged mode for their own cluster, we need to specify that. --- examples/nfs/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/nfs/README.md b/examples/nfs/README.md index 6572215b06d..cd3ab7bc361 100644 --- a/examples/nfs/README.md +++ b/examples/nfs/README.md @@ -53,6 +53,8 @@ $ vi cluster/saltbase/pillar/privilege.sls allow_privileged: true ``` +For other non-salt based provider, you can set `--allow-privileged=true` for both api-server and kubelet, and then restart these components. + Rebuild the Kubernetes and spin up a cluster using your preferred KUBERNETES_PROVIDER. ### NFS server part From 442a22d114f4d45a821a9238a75a8a1552fefbc2 Mon Sep 17 00:00:00 2001 From: He Simei Date: Wed, 2 Sep 2015 10:24:46 +0800 Subject: [PATCH 071/101] tear down resources before tear down the cluster --- cluster/common.sh | 10 ++++++++++ cluster/ubuntu/util.sh | 3 +++ 2 files changed, 13 insertions(+) diff --git a/cluster/common.sh b/cluster/common.sh index 7a08180a8e2..3b86bfb1b8a 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -117,6 +117,15 @@ function clear-kubeconfig() { echo "Cleared config for ${CONTEXT} from ${KUBECONFIG}" } + +function tear_down_alive_resources() { + local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" + "${kubectl}" delete rc --all + "${kubectl}" delete pods --all + "${kubectl}" delete svc --all + "${kubectl}" delete pvc --all +} + # Gets username, password for the current-context in kubeconfig, if they exist. # Assumed vars: # KUBECONFIG # if unset, defaults to global @@ -278,3 +287,4 @@ function tars_from_version() { exit 1 fi } + diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index b5c98fb5ff2..535ad3998a9 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -409,6 +409,9 @@ function provision-masterandminion() { function kube-down { KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/ubuntu/${KUBE_CONFIG_FILE-"config-default.sh"}" + + source "${KUBE_ROOT}/cluster/common.sh" + tear_down_alive_resources ii=0 for i in ${nodes}; do From 365562c0b9e920a1f89472ffae80b9aab56821cb Mon Sep 17 00:00:00 2001 From: harry Date: Mon, 7 Sep 2015 14:16:45 +0800 Subject: [PATCH 072/101] Add default version to scripts --- docs/getting-started-guides/docker-multinode.md | 10 ++++++++-- docs/getting-started-guides/docker-multinode/master.sh | 4 ++-- docs/getting-started-guides/docker-multinode/worker.sh | 5 +++-- 3 files changed, 13 insertions(+), 6 deletions(-) diff --git a/docs/getting-started-guides/docker-multinode.md b/docs/getting-started-guides/docker-multinode.md index 135ae4a86e0..89e23702b5e 100644 --- a/docs/getting-started-guides/docker-multinode.md +++ b/docs/getting-started-guides/docker-multinode.md @@ -74,6 +74,14 @@ This pattern is necessary because the `flannel` daemon is responsible for settin all of the Docker containers created by Kubernetes. To achieve this, it must run outside of the _main_ Docker daemon. However, it is still useful to use containers for deployment and management, so we create a simpler _bootstrap_ daemon to achieve this. +You can specify k8s version on very node before install: + +``` +export K8S_VERSION= +``` + +Otherwise, we'll use latest `hyperkube` image as default k8s version. + ## Master Node The first step in the process is to initialize the master node. @@ -81,7 +89,6 @@ The first step in the process is to initialize the master node. Clone the Kubernetes repo, and run [master.sh](docker-multinode/master.sh) on the master machine with root: ```sh -export K8S_VERSION= cd kubernetes/docs/getting-started-guides/docker-multinode/ ./master.sh ``` @@ -97,7 +104,6 @@ Once your master is up and running you can add one or more workers on different Clone the Kubernetes repo, and run [worker.sh](docker-multinode/worker.sh) on the worker machine with root: ```sh -export K8S_VERSION= export MASTER_IP= cd kubernetes/docs/getting-started-guides/docker-multinode/ ./worker.sh diff --git a/docs/getting-started-guides/docker-multinode/master.sh b/docs/getting-started-guides/docker-multinode/master.sh index 74e41324d0a..24e413faafe 100755 --- a/docs/getting-started-guides/docker-multinode/master.sh +++ b/docs/getting-started-guides/docker-multinode/master.sh @@ -27,8 +27,8 @@ fi # Make sure k8s version env is properly set if [ -z ${K8S_VERSION} ]; then - echo "Please export K8S_VERSION in your env" - exit 1 + K8S_VERSION="1.0.3" + echo "K8S_VERSION is not set, using default: ${K8S_VERSION}" else echo "k8s version is set to: ${K8S_VERSION}" fi diff --git a/docs/getting-started-guides/docker-multinode/worker.sh b/docs/getting-started-guides/docker-multinode/worker.sh index 14b1310d00c..7ad6cc53636 100755 --- a/docs/getting-started-guides/docker-multinode/worker.sh +++ b/docs/getting-started-guides/docker-multinode/worker.sh @@ -27,13 +27,14 @@ fi # Make sure k8s version env is properly set if [ -z ${K8S_VERSION} ]; then - echo "Please export K8S_VERSION in your env" - exit 1 + K8S_VERSION="1.0.3" + echo "K8S_VERSION is not set, using default: ${K8S_VERSION}" else echo "k8s version is set to: ${K8S_VERSION}" fi + # Run as root if [ "$(id -u)" != "0" ]; then echo >&2 "Please run as root" From 8efc62bccec3c999499739e5a2b18db9a78043af Mon Sep 17 00:00:00 2001 From: hurf Date: Wed, 26 Aug 2015 16:37:16 +0800 Subject: [PATCH 073/101] Simplify default output of "expose" command Use simple output string such as "rc nginx exposed" to show the result. Users can still use options like "-o yaml" to view detailed result. IP will be printed with detailed result now. --- pkg/kubectl/cmd/expose.go | 23 +++++++++++++++-------- pkg/kubectl/cmd/label.go | 5 ++--- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index 033dd517e5c..790683bd893 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -191,21 +191,28 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str } } + resourceMapper := &resource.Mapper{ObjectTyper: typer, RESTMapper: mapper, ClientMapper: f.ClientMapperForCommand()} + info, err = resourceMapper.InfoForObject(object) + if err != nil { + return err + } // TODO: extract this flag to a central location, when such a location exists. - if !cmdutil.GetFlagBool(cmd, "dry-run") { - resourceMapper := &resource.Mapper{ObjectTyper: typer, RESTMapper: mapper, ClientMapper: f.ClientMapperForCommand()} - info, err := resourceMapper.InfoForObject(object) - if err != nil { - return err - } + if cmdutil.GetFlagBool(cmd, "dry-run") { + fmt.Fprintln(out, "running in dry-run mode...") + } else { data, err := info.Mapping.Codec.Encode(object) if err != nil { return err } - _, err = resource.NewHelper(info.Client, info.Mapping).Create(namespace, false, data) + object, err = resource.NewHelper(info.Client, info.Mapping).Create(namespace, false, data) if err != nil { return err } } - return f.PrintObject(cmd, object, out) + outputFormat := cmdutil.GetFlagString(cmd, "output") + if outputFormat != "" { + return f.PrintObject(cmd, object, out) + } + cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "exposed") + return nil } diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index b5618ba25fe..44255881358 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -242,11 +242,10 @@ func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri } } outputFormat := cmdutil.GetFlagString(cmd, "output") - if outputFormat == "" { - cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "labeled") - } else { + if outputFormat != "" { return f.PrintObject(cmd, outputObj, out) } + cmdutil.PrintSuccess(mapper, false, out, info.Mapping.Resource, info.Name, "labeled") return nil }) } From de9614af37d3681f3978b5a3b68caa83b433e5e7 Mon Sep 17 00:00:00 2001 From: gmarek Date: Fri, 4 Sep 2015 16:26:31 +0200 Subject: [PATCH 074/101] Add an hack/update-all.sh script that run all update scripts --- hack/update-all.sh | 57 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100755 hack/update-all.sh diff --git a/hack/update-all.sh b/hack/update-all.sh new file mode 100755 index 00000000000..db7d59d6673 --- /dev/null +++ b/hack/update-all.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A single sciprt that runs a predefined set of update-* scripts, as they often go together. +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/cluster/kube-env.sh" + +SILENT=true + +while getopts ":v" opt; do + case $opt in + v) + SILENT=false + ;; + \?) + echo "Invalid flag: -$OPTARG" >&2 + exit 1 + ;; + esac +done + +if $SILENT ; then + echo "Running in the silent mode, run with -v if you want to see script logs." +fi + +BASH_TARGETS="generated-conversions + generated-deep-copies + generated-docs + generated-swagger-docs + swagger-spec" + +for t in $BASH_TARGETS +do + echo -e "Updating $t" + if $SILENT ; then + bash "$KUBE_ROOT/hack/update-$t.sh" 1> /dev/null || echo -e "${color_red}FAILED${color_norm}" + else + bash "$KUBE_ROOT/hack/update-$t.sh" + fi +done From e232840673fe9aa75889aa4d1f91748381945db7 Mon Sep 17 00:00:00 2001 From: gmarek Date: Thu, 3 Sep 2015 15:57:09 +0200 Subject: [PATCH 075/101] Add a verify-all script that runs most of 'verify' scripts --- hack/verify-all.sh | 82 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100755 hack/verify-all.sh diff --git a/hack/verify-all.sh b/hack/verify-all.sh new file mode 100755 index 00000000000..c097fe531c4 --- /dev/null +++ b/hack/verify-all.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/cluster/kube-env.sh" + +SILENT=true + +function is-excluded { + for e in $EXCLUDE; do + if [[ $1 -ef ${BASH_SOURCE} ]]; then + return + fi + if [[ $1 -ef "$KUBE_ROOT/hack/$e" ]]; then + return + fi + done + return 1 +} + +while getopts ":v" opt; do + case $opt in + v) + SILENT=false + ;; + \?) + echo "Invalid flag: -$OPTARG" >&2 + exit 1 + ;; + esac +done + +if $SILENT ; then + echo "Running in the silent mode, run with -v if you want to see script logs." +fi + +EXCLUDE="verify-godeps.sh" + +for t in `ls $KUBE_ROOT/hack/verify-*.sh` +do + if is-excluded $t ; then + echo "Skipping $t" + continue + fi + if $SILENT ; then + echo -e "Verifying $t" + bash "$t" &> /dev/null && echo -e "${color_green}SUCCESS${color_norm}" || echo -e "${color_red}FAILED${color_norm}" + else + bash "$t" || true + fi +done + +for t in `ls $KUBE_ROOT/hack/verify-*.py` +do + if is-excluded $t ; then + echo "Skipping $t" + continue + fi + if $SILENT ; then + echo -e "Verifying $t" + python "$t" &> /dev/null && echo -e "${color_green}SUCCESS${color_norm}" || echo -e "${color_red}FAILED${color_norm}" + else + python "$t" || true + fi +done \ No newline at end of file From d713826aa99d0c59b4ea769b983e22cdf6a0f988 Mon Sep 17 00:00:00 2001 From: feihujiang Date: Mon, 7 Sep 2015 14:28:26 +0800 Subject: [PATCH 076/101] Add pod name to error message when getting logs of a container --- pkg/kubelet/kubelet.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 630967784c7..243ccc8b446 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2050,11 +2050,11 @@ func (kl *Kubelet) validateContainerStatus(podStatus *api.PodStatus, containerNa cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName) if !found { - return "", fmt.Errorf("container %q not found in pod", containerName) + return "", fmt.Errorf("container %q not found", containerName) } if previous { if cStatus.LastTerminationState.Terminated == nil { - return "", fmt.Errorf("previous terminated container %q not found in pod", containerName) + return "", fmt.Errorf("previous terminated container %q not found", containerName) } cID = cStatus.LastTerminationState.Terminated.ContainerID } else { @@ -2081,23 +2081,23 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName, tail stri pod, ok := kl.GetPodByName(namespace, name) if !ok { - return fmt.Errorf("unable to get logs for container %q in pod %q: unable to find pod", containerName, podFullName) + return fmt.Errorf("unable to get logs for container %q in pod %q namespace %q: unable to find pod", containerName, name, namespace) } podStatus, found := kl.statusManager.GetPodStatus(pod.UID) if !found { - return fmt.Errorf("failed to get status for pod %q", podFullName) + return fmt.Errorf("failed to get status for pod %q in namespace %q", name, namespace) } if err := kl.validatePodPhase(&podStatus); err != nil { // No log is available if pod is not in a "known" phase (e.g. Unknown). - return err + return fmt.Errorf("Pod %q in namespace %q : %v", name, namespace, err) } containerID, err := kl.validateContainerStatus(&podStatus, containerName, previous) if err != nil { // No log is available if the container status is missing or is in the // waiting state. - return err + return fmt.Errorf("Pod %q in namespace %q: %v", name, namespace, err) } return kl.containerRuntime.GetContainerLogs(pod, containerID, tail, follow, stdout, stderr) } From 41708fb0af125e33824e89f039abe7b433693d21 Mon Sep 17 00:00:00 2001 From: qiaolei Date: Sun, 6 Sep 2015 10:45:46 +0800 Subject: [PATCH 077/101] Add 'svc.' before 'cluster.local' and fix some typos 1. Add 'svc.' before 'cluster.local' 2. ```Terminating`` to `Terminating` 3. 'DNS entry1' to 'DNS entry' --- docs/admin/namespaces.md | 6 +++--- docs/user-guide/namespaces.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/admin/namespaces.md b/docs/admin/namespaces.md index 7f597df8106..5ef8d286e9c 100644 --- a/docs/admin/namespaces.md +++ b/docs/admin/namespaces.md @@ -122,7 +122,7 @@ See [Admission control: Limit Range](../design/admission_control_limit_range.md) A namespace can be in one of two phases: * `Active` the namespace is in use - * ```Terminating`` the namespace is being deleted, and can not be used for new objects + * `Terminating` the namespace is being deleted, and can not be used for new objects See the [design doc](../design/namespaces.md#phases) for more details. @@ -166,8 +166,8 @@ This delete is asynchronous, so for a time you will see the namespace in the `Te ## Namespaces and DNS -When you create a [Service](../../docs/user-guide/services.md), it creates a corresponding [DNS entry](dns.md)1. -This entry is of the form `..cluster.local`, which means +When you create a [Service](../../docs/user-guide/services.md), it creates a corresponding [DNS entry](dns.md). +This entry is of the form `..svc.cluster.local`, which means that if a container just uses `` it will resolve to the service which is local to a namespace. This is useful for using the same configuration across multiple namespaces such as Development, Staging and Production. If you want to reach diff --git a/docs/user-guide/namespaces.md b/docs/user-guide/namespaces.md index ff0e94d6369..c1a63d14fbd 100644 --- a/docs/user-guide/namespaces.md +++ b/docs/user-guide/namespaces.md @@ -105,7 +105,7 @@ $ kubectl config set-context $(CONTEXT) --namespace= ## Namespaces and DNS When you create a [Service](services.md), it creates a corresponding [DNS entry](../admin/dns.md). -This entry is of the form `..cluster.local`, which means +This entry is of the form `..svc.cluster.local`, which means that if a container just uses `` it will resolve to the service which is local to a namespace. This is useful for using the same configuration across multiple namespaces such as Development, Staging and Production. If you want to reach From 02c30c5d61bb282a1e9faf4734ddf1437444f102 Mon Sep 17 00:00:00 2001 From: Marcin Wielgus Date: Mon, 7 Sep 2015 12:25:04 +0200 Subject: [PATCH 078/101] Update for scaling rules in HorizontalPodAutoscaler --- docs/proposals/horizontal-pod-autoscaler.md | 22 ++++---- .../horizontalpodautoscaler_controller.go | 53 +++++++++++++------ 2 files changed, 49 insertions(+), 26 deletions(-) diff --git a/docs/proposals/horizontal-pod-autoscaler.md b/docs/proposals/horizontal-pod-autoscaler.md index 6ae84532fc7..924988d29d6 100644 --- a/docs/proposals/horizontal-pod-autoscaler.md +++ b/docs/proposals/horizontal-pod-autoscaler.md @@ -200,16 +200,20 @@ and adjust the count of the Scale if needed to match the target The target number of pods will be calculated from the following formula: ``` -TargetNumOfPods = sum(CurrentPodsConsumption) / Target +TargetNumOfPods =ceil(sum(CurrentPodsConsumption) / Target) ``` -To make scaling more stable, scale-up will happen only when the floor of ```TargetNumOfPods``` is higher than -the current number, while scale-down will happen only when the ceiling of ```TargetNumOfPods``` is lower than -the current number. +Starting and stopping pods may introduce noise to the metrics (for instance starting may temporarily increase +CPU and decrease average memory consumption) so, after each action, the autoscaler should wait some time for reliable data. -The decision to scale-up will be executed instantly. -However, we will execute scale-down only if the sufficient time has passed from the last scale-up (e.g.: 10 minutes). -Such approach has two benefits: +Scale-up will happen if there was no rescaling within the last 3 minutes. +Scale-down will wait for 10 minutes from the last rescaling. Moreover any scaling will only be made if + +``` +avg(CurrentPodsConsumption) / Target +``` + +drops below 0.9 or increases above 1.1 (10% tolerance). Such approach has two benefits: * Autoscaler works in a conservative way. If new user load appears, it is important for us to rapidly increase the number of pods, @@ -218,10 +222,6 @@ Such approach has two benefits: * Autoscaler avoids thrashing, i.e.: prevents rapid execution of conflicting decision if the load is not stable. - -As the CPU consumption of a pod immediately after start may be highly variable due to initialization/startup, -autoscaler will skip metrics from the first minute of pod lifecycle. - ## Relative vs. absolute metrics The question arises whether the values of the target metrics should be absolute (e.g.: 0.6 core, 100MB of RAM) diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go b/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go index a3cce83833b..abdb061e99d 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go +++ b/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go @@ -18,6 +18,7 @@ package autoscalercontroller import ( "fmt" + "math" "time" "github.com/golang/glog" @@ -30,6 +31,15 @@ import ( "k8s.io/kubernetes/pkg/util" ) +const ( + heapsterNamespace = "kube-system" + heapsterService = "monitoring-heapster" + + // Usage shoud exceed the tolerance before we start downscale or upscale the pods. + // TODO: make it a flag or HPA spec element. + tolerance = 0.1 +) + type HorizontalPodAutoscalerController struct { client client.Interface expClient client.ExperimentalInterface @@ -79,38 +89,51 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { continue } - // if the ratio is 1.2 we want to have 2 replicas - desiredReplicas := 1 + int((currentConsumption.Quantity.MilliValue()*int64(currentReplicas))/hpa.Spec.Target.Quantity.MilliValue()) + usageRatio := float64(currentConsumption.Quantity.MilliValue()) / float64(hpa.Spec.Target.Quantity.MilliValue()) + desiredReplicas := int(math.Ceil(usageRatio * float64(currentReplicas))) if desiredReplicas < hpa.Spec.MinCount { desiredReplicas = hpa.Spec.MinCount } + + // TODO: remove when pod ideling is done. + if desiredReplicas == 0 { + desiredReplicas = 1 + } + if desiredReplicas > hpa.Spec.MaxCount { desiredReplicas = hpa.Spec.MaxCount } now := time.Now() rescale := false + if desiredReplicas != currentReplicas { - // Going down - if desiredReplicas < currentReplicas && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || - hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) { + // Going down only if the usageRatio dropped significantly below the target + // and there was no rescaling in the last downscaleForbiddenWindow. + if desiredReplicas < currentReplicas && usageRatio < (1-tolerance) && + (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || + hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) { rescale = true } - // Going up - if desiredReplicas > currentReplicas && (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || - hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) { + // Going up only if the usage ratio increased significantly above the target + // and there was no rescaling in the last upscaleForbiddenWindow. + if desiredReplicas > currentReplicas && usageRatio > (1+tolerance) && + (hpa.Status == nil || hpa.Status.LastScaleTimestamp == nil || + hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) { rescale = true } + } - if rescale { - scale.Spec.Replicas = desiredReplicas - _, err = a.expClient.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) - if err != nil { - glog.Warningf("Failed to rescale %s: %v", reference, err) - continue - } + if rescale { + scale.Spec.Replicas = desiredReplicas + _, err = a.expClient.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) + if err != nil { + glog.Warningf("Failed to rescale %s: %v", reference, err) + continue } + } else { + desiredReplicas = currentReplicas } status := expapi.HorizontalPodAutoscalerStatus{ From 98b954c74cb2035388ddcb6356422e95f4ebc7a7 Mon Sep 17 00:00:00 2001 From: gmarek Date: Mon, 7 Sep 2015 15:04:15 +0200 Subject: [PATCH 079/101] Add a mutex to assure atomicity of reat_limited queue operations and remove 'leaky' version of it --- pkg/controller/node/nodecontroller.go | 20 ++++++++++++++++--- pkg/controller/node/rate_limited_queue.go | 16 +++++---------- .../node/rate_limited_queue_test.go | 14 ++++++------- 3 files changed, 29 insertions(+), 21 deletions(-) diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 596606ec517..2da83b9ab3d 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "net" + "sync" "time" "github.com/golang/glog" @@ -88,7 +89,9 @@ type NodeController struct { // to aviod the problem with time skew across the cluster. nodeStatusMap map[string]nodeStatusData now func() util.Time - // worker that evicts pods from unresponsive nodes. + // Lock to access evictor workers + evictorLock *sync.Mutex + // workers that evicts pods from unresponsive nodes. podEvictor *RateLimitedTimedQueue terminationEvictor *RateLimitedTimedQueue podEvictionTimeout time.Duration @@ -120,6 +123,7 @@ func NewNodeController( if allocateNodeCIDRs && clusterCIDR == nil { glog.Fatal("NodeController: Must specify clusterCIDR if allocateNodeCIDRs == true.") } + evictorLock := sync.Mutex{} return &NodeController{ cloud: cloud, knownNodeSet: make(util.StringSet), @@ -127,8 +131,9 @@ func NewNodeController( recorder: recorder, podEvictionTimeout: podEvictionTimeout, maximumGracePeriod: 5 * time.Minute, - podEvictor: NewRateLimitedTimedQueue(podEvictionLimiter, false), - terminationEvictor: NewRateLimitedTimedQueue(podEvictionLimiter, false), + evictorLock: &evictorLock, + podEvictor: NewRateLimitedTimedQueue(podEvictionLimiter), + terminationEvictor: NewRateLimitedTimedQueue(podEvictionLimiter), nodeStatusMap: make(map[string]nodeStatusData), nodeMonitorGracePeriod: nodeMonitorGracePeriod, nodeMonitorPeriod: nodeMonitorPeriod, @@ -162,6 +167,8 @@ func (nc *NodeController) Run(period time.Duration) { // c. If there are pods still terminating, wait for their estimated completion // before retrying go util.Until(func() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() nc.podEvictor.Try(func(value TimedValue) (bool, time.Duration) { remaining, err := nc.deletePods(value.Value) if err != nil { @@ -178,6 +185,8 @@ func (nc *NodeController) Run(period time.Duration) { // TODO: replace with a controller that ensures pods that are terminating complete // in a particular time period go util.Until(func() { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() nc.terminationEvictor.Try(func(value TimedValue) (bool, time.Duration) { completed, remaining, err := nc.terminatePods(value.Value, value.AddedAt) if err != nil { @@ -551,12 +560,17 @@ func (nc *NodeController) hasPods(nodeName string) (bool, error) { // evictPods queues an eviction for the provided node name, and returns false if the node is already // queued for eviction. func (nc *NodeController) evictPods(nodeName string) bool { + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() return nc.podEvictor.Add(nodeName) } // cancelPodEviction removes any queued evictions, typically because the node is available again. It // returns true if an eviction was queued. func (nc *NodeController) cancelPodEviction(nodeName string) bool { + glog.V(2).Infof("Cancelling pod Eviction on Node: %v", nodeName) + nc.evictorLock.Lock() + defer nc.evictorLock.Unlock() wasDeleting := nc.podEvictor.Remove(nodeName) wasTerminating := nc.terminationEvictor.Remove(nodeName) return wasDeleting || wasTerminating diff --git a/pkg/controller/node/rate_limited_queue.go b/pkg/controller/node/rate_limited_queue.go index 550b646941f..2fcd0963d1b 100644 --- a/pkg/controller/node/rate_limited_queue.go +++ b/pkg/controller/node/rate_limited_queue.go @@ -136,19 +136,16 @@ func (q *UniqueQueue) Head() (TimedValue, bool) { type RateLimitedTimedQueue struct { queue UniqueQueue limiter util.RateLimiter - leak bool } -// Creates new queue which will use given RateLimiter to oversee execution. If leak is true, -// items which are rate limited will be leakped. Otherwise, rate limited items will be requeued. -func NewRateLimitedTimedQueue(limiter util.RateLimiter, leak bool) *RateLimitedTimedQueue { +// Creates new queue which will use given RateLimiter to oversee execution. +func NewRateLimitedTimedQueue(limiter util.RateLimiter) *RateLimitedTimedQueue { return &RateLimitedTimedQueue{ queue: UniqueQueue{ queue: TimedQueue{}, set: util.NewStringSet(), }, limiter: limiter, - leak: leak, } } @@ -164,12 +161,9 @@ func (q *RateLimitedTimedQueue) Try(fn ActionFunc) { val, ok := q.queue.Head() for ok { // rate limit the queue checking - if q.leak { - if !q.limiter.CanAccept() { - break - } - } else { - q.limiter.Accept() + if !q.limiter.CanAccept() { + // Try again later + break } now := now() diff --git a/pkg/controller/node/rate_limited_queue_test.go b/pkg/controller/node/rate_limited_queue_test.go index a3a49905255..5dc8a4d81a4 100644 --- a/pkg/controller/node/rate_limited_queue_test.go +++ b/pkg/controller/node/rate_limited_queue_test.go @@ -38,7 +38,7 @@ func CheckSetEq(lhs, rhs util.StringSet) bool { } func TestAddNode(t *testing.T) { - evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter(), true) + evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter()) evictor.Add("first") evictor.Add("second") evictor.Add("third") @@ -61,7 +61,7 @@ func TestAddNode(t *testing.T) { } func TestDelNode(t *testing.T) { - evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter(), true) + evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter()) evictor.Add("first") evictor.Add("second") evictor.Add("third") @@ -83,7 +83,7 @@ func TestDelNode(t *testing.T) { t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern) } - evictor = NewRateLimitedTimedQueue(util.NewFakeRateLimiter(), true) + evictor = NewRateLimitedTimedQueue(util.NewFakeRateLimiter()) evictor.Add("first") evictor.Add("second") evictor.Add("third") @@ -105,7 +105,7 @@ func TestDelNode(t *testing.T) { t.Errorf("Invalid map. Got %v, expected %v", evictor.queue.set, setPattern) } - evictor = NewRateLimitedTimedQueue(util.NewFakeRateLimiter(), true) + evictor = NewRateLimitedTimedQueue(util.NewFakeRateLimiter()) evictor.Add("first") evictor.Add("second") evictor.Add("third") @@ -129,7 +129,7 @@ func TestDelNode(t *testing.T) { } func TestTry(t *testing.T) { - evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter(), true) + evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter()) evictor.Add("first") evictor.Add("second") evictor.Add("third") @@ -151,7 +151,7 @@ func TestTry(t *testing.T) { } func TestTryOrdering(t *testing.T) { - evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter(), false) + evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter()) evictor.Add("first") evictor.Add("second") evictor.Add("third") @@ -183,7 +183,7 @@ func TestTryOrdering(t *testing.T) { } func TestTryRemovingWhileTry(t *testing.T) { - evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter(), false) + evictor := NewRateLimitedTimedQueue(util.NewFakeRateLimiter()) evictor.Add("first") evictor.Add("second") evictor.Add("third") From e1e3101e75179e4597411a75f5c0ffa4f7d3b898 Mon Sep 17 00:00:00 2001 From: gmarek Date: Mon, 7 Sep 2015 15:04:48 +0200 Subject: [PATCH 080/101] Add a sleep in resize_nodes test to allow NodeController's Watch to catch up --- test/e2e/resize_nodes.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 7ae33f1ad75..c3674726a01 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -522,10 +522,13 @@ var _ = Describe("Nodes", func() { By(fmt.Sprintf("block network traffic from node %s", node.Name)) performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node) Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) - if !waitForNodeToBe(c, node.Name, true, resizeNodeReadyTimeout) { + if !waitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } + // sleep a bit, to allow Watch in NodeController to catch up. + time.Sleep(5 * time.Second) + By("verify whether new pods can be created on the re-attached node") // increasing the RC size is not a valid way to test this // since we have no guarantees the pod will be scheduled on our node. From c9644b3e5b8b9cba7beff912d25c66d8ea90dea6 Mon Sep 17 00:00:00 2001 From: harry Date: Mon, 7 Sep 2015 11:38:04 +0800 Subject: [PATCH 081/101] Add mount dirs to multi-node scripts --- .../docker-multinode/master.sh | 28 +++++++++++++++++-- .../docker-multinode/worker.sh | 27 ++++++++++++++++-- 2 files changed, 51 insertions(+), 4 deletions(-) diff --git a/docs/getting-started-guides/docker-multinode/master.sh b/docs/getting-started-guides/docker-multinode/master.sh index 74e41324d0a..5ac9b4c8043 100755 --- a/docs/getting-started-guides/docker-multinode/master.sh +++ b/docs/getting-started-guides/docker-multinode/master.sh @@ -135,8 +135,32 @@ start_k8s(){ sleep 5 # Start kubelet & proxy, then start master components as pods - docker run --net=host --privileged --restart=always -d -v /sys:/sys:ro -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v${K8S_VERSION} /hyperkube kubelet --api-servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable-server --hostname-override=127.0.0.1 --config=/etc/kubernetes/manifests-multi --cluster-dns=10.0.0.10 --cluster-domain=cluster.local - docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v${K8S_VERSION} /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 + docker run \ + --net=host \ + --privileged \ + --restart=always \ + -d \ + -v /sys:/sys:ro \ + -v /var/run:/var/run:rw \ + -v /:/rootfs:ro \ + -v /dev:/dev \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /var/lib/kubelet/:/var/lib/kubelet:rw \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube kubelet \ + --api-servers=http://localhost:8080 \ + --v=2 --address=0.0.0.0 --enable-server \ + --hostname-override=127.0.0.1 \ + --config=/etc/kubernetes/manifests-multi \ + --cluster-dns=10.0.0.10 \ + --cluster-domain=cluster.local + + docker run \ + -d \ + --net=host \ + --privileged \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 } echo "Detecting your OS distro ..." diff --git a/docs/getting-started-guides/docker-multinode/worker.sh b/docs/getting-started-guides/docker-multinode/worker.sh index 14b1310d00c..2788ef011dd 100755 --- a/docs/getting-started-guides/docker-multinode/worker.sh +++ b/docs/getting-started-guides/docker-multinode/worker.sh @@ -133,8 +133,31 @@ start_k8s() { sleep 5 # Start kubelet & proxy in container - sudo docker run --net=host --privileged --restart=always -d -v /sys:/sys:ro -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v${K8S_VERSION} /hyperkube kubelet --api-servers=http://${MASTER_IP}:8080 --v=2 --address=0.0.0.0 --enable-server --hostname-override=$(hostname -i) --cluster-dns=10.0.0.10 --cluster-domain=cluster.local - sudo docker run -d --net=host --privileged --restart=always gcr.io/google_containers/hyperkube:v${K8S_VERSION} /hyperkube proxy --master=http://${MASTER_IP}:8080 --v=2 + docker run \ + --net=host \ + --privileged \ + --restart=always \ + -d \ + -v /sys:/sys:ro \ + -v /var/run:/var/run:rw \ + -v /dev:/dev \ + -v /var/lib/docker/:/var/lib/docker:ro \ + -v /var/lib/kubelet/:/var/lib/kubelet:rw \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube kubelet --api-servers=http://${MASTER_IP}:8080 \ + --v=2 --address=0.0.0.0 --enable-server \ + --hostname-override=$(hostname -i) \ + --cluster-dns=10.0.0.10 \ + --cluster-domain=cluster.local + + docker run \ + -d \ + --net=host \ + --privileged \ + --restart=always \ + gcr.io/google_containers/hyperkube:v${K8S_VERSION} \ + /hyperkube proxy --master=http://${MASTER_IP}:8080 \ + --v=2 } echo "Detecting your OS distro ..." From 84e94e39cd958b9ae9925e75170b370e4ec88e9b Mon Sep 17 00:00:00 2001 From: feihujiang Date: Tue, 1 Sep 2015 11:03:29 +0800 Subject: [PATCH 082/101] Support setting env vars in kubectl run --- contrib/completions/bash/kubectl | 1 + docs/man/man1/kubectl-run.1 | 10 +++ docs/user-guide/docker-cli-to-kubectl.md | 4 +- docs/user-guide/kubectl/kubectl_run.md | 11 ++- pkg/kubectl/cmd/run.go | 12 +++- pkg/kubectl/cmd/run_test.go | 19 ++++++ pkg/kubectl/run.go | 59 ++++++++++++++++ pkg/kubectl/run_test.go | 87 ++++++++++++++++++++++++ 8 files changed, 198 insertions(+), 5 deletions(-) diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index 34bd8d3c7a0..0413c2a5260 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -681,6 +681,7 @@ _kubectl_run() flags+=("--attach") flags+=("--command") flags+=("--dry-run") + flags+=("--env=") flags+=("--generator=") flags+=("--hostport=") flags+=("--image=") diff --git a/docs/man/man1/kubectl-run.1 b/docs/man/man1/kubectl-run.1 index c332c0adfd0..1ba98e3898b 100644 --- a/docs/man/man1/kubectl-run.1 +++ b/docs/man/man1/kubectl-run.1 @@ -30,6 +30,10 @@ Creates a replication controller to manage the created container(s). \fB\-\-dry\-run\fP=false If true, only print the object that would be sent, without sending it. +.PP +\fB\-\-env\fP=[] + Environment variables to set in the container + .PP \fB\-\-generator\fP="" The name of the API generator to use. Default is 'run/v1' if \-\-restart=Always, otherwise the default is 'run\-pod/v1'. @@ -200,6 +204,12 @@ Creates a replication controller to manage the created container(s). # Start a single instance of nginx. $ kubectl run nginx \-\-image=nginx +# Start a single instance of hazelcast and let the container expose port 5701 . +$ kubectl run hazelcast \-\-image=hazelcast \-\-port=5701 + +# Start a single instance of hazelcast and set environment variables "DNS\_DOMAIN=cluster" and "POD\_NAMESPACE=default" in the container. +$ kubectl run hazelcast \-\-image=hazelcast \-\-env="DNS\_DOMAIN=local" \-\-env="POD\_NAMESPACE=default" + # Start a replicated instance of nginx. $ kubectl run nginx \-\-image=nginx \-\-replicas=5 diff --git a/docs/user-guide/docker-cli-to-kubectl.md b/docs/user-guide/docker-cli-to-kubectl.md index ad9feb29e17..77152a9b1fe 100644 --- a/docs/user-guide/docker-cli-to-kubectl.md +++ b/docs/user-guide/docker-cli-to-kubectl.md @@ -58,7 +58,7 @@ How do I run an nginx container and expose it to the world? Checkout [kubectl ru With docker: ```console -$ docker run -d --restart=always --name nginx-app -p 80:80 nginx +$ docker run -d --restart=always -e DOMAIN=cluster --name nginx-app -p 80:80 nginx a9ec34d9878748d2f33dc20cb25c714ff21da8d40558b45bfaec9955859075d0 $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES @@ -69,7 +69,7 @@ With kubectl: ```console # start the pod running nginx -$ kubectl run --image=nginx nginx-app +$ kubectl run --image=nginx nginx-app --port=80 --env="DOMAIN=local" CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS nginx-app nginx-app nginx run=nginx-app 1 # expose a port through with a service diff --git a/docs/user-guide/kubectl/kubectl_run.md b/docs/user-guide/kubectl/kubectl_run.md index b9a664dd750..c664afd417c 100644 --- a/docs/user-guide/kubectl/kubectl_run.md +++ b/docs/user-guide/kubectl/kubectl_run.md @@ -42,7 +42,7 @@ Create and run a particular image, possibly replicated. Creates a replication controller to manage the created container(s). ``` -kubectl run NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] +kubectl run NAME --image=image [--env="key=value"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json] ``` ### Examples @@ -51,6 +51,12 @@ kubectl run NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bo # Start a single instance of nginx. $ kubectl run nginx --image=nginx +# Start a single instance of hazelcast and let the container expose port 5701 . +$ kubectl run hazelcast --image=hazelcast --port=5701 + +# Start a single instance of hazelcast and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container. +$ kubectl run hazelcast --image=hazelcast --env="DNS_DOMAIN=local" --env="POD_NAMESPACE=default" + # Start a replicated instance of nginx. $ kubectl run nginx --image=nginx --replicas=5 @@ -76,6 +82,7 @@ $ kubectl run nginx --image=nginx --command -- ... --attach[=false]: If true, wait for the Pod to start running, and then attach to the Pod as if 'kubectl attach ...' were called. Default false, unless '-i/--interactive' is set, in which case the default is true. --command[=false]: If true and extra arguments are present, use them as the 'command' field in the container, rather than the 'args' field which is the default. --dry-run[=false]: If true, only print the object that would be sent, without sending it. + --env=[]: Environment variables to set in the container --generator="": The name of the API generator to use. Default is 'run/v1' if --restart=Always, otherwise the default is 'run-pod/v1'. --hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container. --image="": The image for the container to run. @@ -126,7 +133,7 @@ $ kubectl run nginx --image=nginx --command -- ... * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.772003236 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-07 06:40:12.142439604 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_run.md?pixel)]() diff --git a/pkg/kubectl/cmd/run.go b/pkg/kubectl/cmd/run.go index b02931a22e1..5d2730967e1 100644 --- a/pkg/kubectl/cmd/run.go +++ b/pkg/kubectl/cmd/run.go @@ -38,6 +38,12 @@ Creates a replication controller to manage the created container(s).` run_example = `# Start a single instance of nginx. $ kubectl run nginx --image=nginx +# Start a single instance of hazelcast and let the container expose port 5701 . +$ kubectl run hazelcast --image=hazelcast --port=5701 + +# Start a single instance of hazelcast and set environment variables "DNS_DOMAIN=cluster" and "POD_NAMESPACE=default" in the container. +$ kubectl run hazelcast --image=hazelcast --env="DNS_DOMAIN=local" --env="POD_NAMESPACE=default" + # Start a replicated instance of nginx. $ kubectl run nginx --image=nginx --replicas=5 @@ -59,7 +65,7 @@ $ kubectl run nginx --image=nginx --command -- ... ` func NewCmdRun(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command { cmd := &cobra.Command{ - Use: "run NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]", + Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]", // run-container is deprecated Aliases: []string{"run-container"}, Short: "Run a particular image on the cluster.", @@ -77,6 +83,7 @@ func NewCmdRun(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *c cmd.Flags().IntP("replicas", "r", 1, "Number of replicas to create for this container. Default is 1.") cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without sending it.") cmd.Flags().String("overrides", "", "An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.") + cmd.Flags().StringSlice("env", []string{}, "Environment variables to set in the container") cmd.Flags().Int("port", -1, "The port that this container exposes.") cmd.Flags().Int("hostport", -1, "The host port mapping for the container port. To demonstrate a single-machine container.") cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s).") @@ -137,6 +144,9 @@ func Run(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer, cmd *cob if len(args) > 1 { params["args"] = args[1:] } + + params["env"] = cmdutil.GetFlagStringSlice(cmd, "env") + err = kubectl.ValidateParams(names, params) if err != nil { return err diff --git a/pkg/kubectl/cmd/run_test.go b/pkg/kubectl/cmd/run_test.go index 56185a5f1ae..c40ab85ea11 100644 --- a/pkg/kubectl/cmd/run_test.go +++ b/pkg/kubectl/cmd/run_test.go @@ -17,10 +17,12 @@ limitations under the License. package cmd import ( + "reflect" "testing" "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" + cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" ) func TestGetRestartPolicy(t *testing.T) { @@ -78,3 +80,20 @@ func TestGetRestartPolicy(t *testing.T) { } } } + +func TestGetEnv(t *testing.T) { + test := struct { + input []string + expected []string + }{ + input: []string{"a=b", "c=d"}, + expected: []string{"a=b", "c=d"}, + } + cmd := &cobra.Command{} + cmd.Flags().StringSlice("env", test.input, "") + + envStrings := cmdutil.GetFlagStringSlice(cmd, "env") + if len(envStrings) != 2 || !reflect.DeepEqual(envStrings, test.expected) { + t.Errorf("expected: %s, saw: %s", test.expected, envStrings) + } +} diff --git a/pkg/kubectl/run.go b/pkg/kubectl/run.go index 6c2c5dd7a44..233cbbe9ba9 100644 --- a/pkg/kubectl/run.go +++ b/pkg/kubectl/run.go @@ -19,9 +19,11 @@ package kubectl import ( "fmt" "strconv" + "strings" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util" ) type BasicReplicationController struct{} @@ -39,6 +41,7 @@ func (BasicReplicationController) ParamNames() []GeneratorParam { {"tty", false}, {"command", false}, {"args", false}, + {"env", false}, } } @@ -77,6 +80,23 @@ func (BasicReplicationController) Generate(genericParams map[string]interface{}) } delete(genericParams, "args") } + + // TODO: abstract this logic so that multiple generators can handle env in the same way. Same for parse envs. + var envs []api.EnvVar + envStrings, found := genericParams["env"] + if found { + if envStringArray, isArray := envStrings.([]string); isArray { + var err error + envs, err = parseEnvs(envStringArray) + if err != nil { + return nil, err + } + delete(genericParams, "env") + } else { + return nil, fmt.Errorf("expected []string, found: %v", envStrings) + } + } + params := map[string]string{} for key, value := range genericParams { strVal, isString := value.(string) @@ -127,6 +147,10 @@ func (BasicReplicationController) Generate(genericParams map[string]interface{}) } } + if len(envs) > 0 { + podSpec.Containers[0].Env = envs + } + controller := api.ReplicationController{ ObjectMeta: api.ObjectMeta{ Name: name, @@ -198,6 +222,7 @@ func (BasicPod) ParamNames() []GeneratorParam { {"restart", false}, {"command", false}, {"args", false}, + {"env", false}, } } @@ -212,6 +237,22 @@ func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, } delete(genericParams, "args") } + // TODO: abstract this logic so that multiple generators can handle env in the same way. Same for parse envs. + var envs []api.EnvVar + envStrings, found := genericParams["env"] + if found { + if envStringArray, isArray := envStrings.([]string); isArray { + var err error + envs, err = parseEnvs(envStringArray) + if err != nil { + return nil, err + } + delete(genericParams, "env") + } else { + return nil, fmt.Errorf("expected []string, found: %v", envStrings) + } + } + params := map[string]string{} for key, value := range genericParams { strVal, isString := value.(string) @@ -281,8 +322,26 @@ func (BasicPod) Generate(genericParams map[string]interface{}) (runtime.Object, pod.Spec.Containers[0].Args = args } } + + if len(envs) > 0 { + pod.Spec.Containers[0].Env = envs + } + if err := updatePodPorts(params, &pod.Spec); err != nil { return nil, err } return &pod, nil } + +func parseEnvs(envArray []string) ([]api.EnvVar, error) { + envs := []api.EnvVar{} + for _, env := range envArray { + parts := strings.Split(env, "=") + if len(parts) != 2 || !util.IsCIdentifier(parts[0]) || len(parts[1]) == 0 { + return nil, fmt.Errorf("invalid env: %v", env) + } + envVar := api.EnvVar{Name: parts[0], Value: parts[1]} + envs = append(envs, envVar) + } + return envs, nil +} diff --git a/pkg/kubectl/run_test.go b/pkg/kubectl/run_test.go index 66138f25f77..3b187eba801 100644 --- a/pkg/kubectl/run_test.go +++ b/pkg/kubectl/run_test.go @@ -60,6 +60,50 @@ func TestGenerate(t *testing.T) { }, }, }, + + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "replicas": "1", + "port": "-1", + "env": []string{"a=b", "c=d"}, + }, + expected: &api.ReplicationController{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.ReplicationControllerSpec{ + Replicas: 1, + Selector: map[string]string{"run": "foo"}, + Template: &api.PodTemplateSpec{ + ObjectMeta: api.ObjectMeta{ + Labels: map[string]string{"run": "foo"}, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + Env: []api.EnvVar{ + { + Name: "a", + Value: "b", + }, + { + Name: "c", + Value: "d", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { params: map[string]interface{}{ "name": "foo", @@ -287,6 +331,49 @@ func TestGeneratePod(t *testing.T) { }, }, }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "env": []string{"a", "c"}, + }, + + expected: nil, + expectErr: true, + }, + { + params: map[string]interface{}{ + "name": "foo", + "image": "someimage", + "env": []string{"a=b", "c=d"}, + }, + expected: &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "foo", + Image: "someimage", + ImagePullPolicy: api.PullIfNotPresent, + Env: []api.EnvVar{ + { + Name: "a", + Value: "b", + }, + { + Name: "c", + Value: "d", + }, + }, + }, + }, + DNSPolicy: api.DNSClusterFirst, + RestartPolicy: api.RestartPolicyAlways, + }, + }, + }, { params: map[string]interface{}{ "name": "foo", From b85d0557b4bcd4eb20511394fa1758eb6250e195 Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 8 Sep 2015 09:34:10 +0200 Subject: [PATCH 083/101] Revert #13052 --- pkg/kubelet/dockertools/manager.go | 98 +++++++--------------- pkg/kubelet/dockertools/manager_test.go | 59 ------------- test/e2e/docker_containers.go | 8 +- test/e2e/downward_api.go | 105 ++++++++---------------- test/e2e/downwardapi_volume.go | 2 +- test/e2e/framework.go | 7 +- test/e2e/host_path.go | 4 +- test/e2e/secrets.go | 2 +- test/e2e/util.go | 30 +------ 9 files changed, 77 insertions(+), 238 deletions(-) diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index feb42fc8786..f1b8a6da4d0 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -290,58 +290,16 @@ type containerStatusResult struct { err error } -const podIPDownwardAPISelector = "status.podIP" - -// podDependsOnIP returns whether any containers in a pod depend on using the pod IP via -// the downward API. -func podDependsOnPodIP(pod *api.Pod) bool { - for _, container := range pod.Spec.Containers { - for _, env := range container.Env { - if env.ValueFrom != nil && - env.ValueFrom.FieldRef != nil && - env.ValueFrom.FieldRef.FieldPath == podIPDownwardAPISelector { - return true - } - } - } - - return false -} - -// determineContainerIP determines the IP address of the given container. It is expected -// that the container passed is the infrastructure container of a pod and the responsibility -// of the caller to ensure that the correct container is passed. -func (dm *DockerManager) determineContainerIP(podNamespace, podName string, container *docker.Container) string { - result := "" - - if container.NetworkSettings != nil { - result = container.NetworkSettings.IPAddress - } - - if dm.networkPlugin.Name() != network.DefaultPluginName { - netStatus, err := dm.networkPlugin.Status(podNamespace, podName, kubeletTypes.DockerID(container.ID)) - if err != nil { - glog.Errorf("NetworkPlugin %s failed on the status hook for pod '%s' - %v", dm.networkPlugin.Name(), podName, err) - } else if netStatus != nil { - result = netStatus.IP.String() - } - } - - return result -} - func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string, pod *api.Pod) *containerStatusResult { result := containerStatusResult{api.ContainerStatus{}, "", nil} inspectResult, err := dm.client.InspectContainer(dockerID) + if err != nil { result.err = err return &result } - // NOTE (pmorie): this is a seriously fishy if statement. A nil result from InspectContainer seems like it should - // always be paired with a non-nil error in the result of InspectContainer. if inspectResult == nil { - glog.Error("Received a nil result from InspectContainer without receiving an error") // Why did we not get an error? return &result } @@ -359,7 +317,18 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string, StartedAt: util.NewTime(inspectResult.State.StartedAt), } if containerName == PodInfraContainerName { - result.ip = dm.determineContainerIP(pod.Namespace, pod.Name, inspectResult) + if inspectResult.NetworkSettings != nil { + result.ip = inspectResult.NetworkSettings.IPAddress + } + // override the above if a network plugin exists + if dm.networkPlugin.Name() != network.DefaultPluginName { + netStatus, err := dm.networkPlugin.Status(pod.Namespace, pod.Name, kubeletTypes.DockerID(dockerID)) + if err != nil { + glog.Errorf("NetworkPlugin %s failed on the status hook for pod '%s' - %v", dm.networkPlugin.Name(), pod.Name, err) + } else if netStatus != nil { + result.ip = netStatus.IP.String() + } + } } } else if !inspectResult.State.FinishedAt.IsZero() { reason := "" @@ -1391,7 +1360,7 @@ func containerAndPodFromLabels(inspect *docker.Container) (pod *api.Pod, contain } // Run a single container from a pod. Returns the docker container ID -func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode string) (kubeletTypes.DockerID, *docker.Container, error) { +func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode string) (kubeletTypes.DockerID, error) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start)) @@ -1404,7 +1373,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe opts, err := dm.generator.GenerateRunContainerOptions(pod, container) if err != nil { - return "", nil, err + return "", err } utsMode := "" @@ -1413,7 +1382,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe } id, err := dm.runContainer(pod, container, opts, ref, netMode, ipcMode, utsMode) if err != nil { - return "", nil, err + return "", err } // Remember this reference so we can report events about this container @@ -1425,7 +1394,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe handlerErr := dm.runner.Run(id, pod, container, container.Lifecycle.PostStart) if handlerErr != nil { dm.KillContainerInPod(types.UID(id), container, pod) - return kubeletTypes.DockerID(""), nil, fmt.Errorf("failed to call event handler: %v", handlerErr) + return kubeletTypes.DockerID(""), fmt.Errorf("failed to call event handler: %v", handlerErr) } } @@ -1443,11 +1412,11 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe // Container information is used in adjusting OOM scores and adding ndots. containerInfo, err := dm.client.InspectContainer(string(id)) if err != nil { - return "", nil, err + return "", err } // Ensure the PID actually exists, else we'll move ourselves. if containerInfo.State.Pid == 0 { - return "", nil, fmt.Errorf("failed to get init PID for Docker container %q", string(id)) + return "", fmt.Errorf("failed to get init PID for Docker container %q", string(id)) } // Set OOM score of the container based on the priority of the container. @@ -1462,10 +1431,10 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe } cgroupName, err := dm.procFs.GetFullContainerName(containerInfo.State.Pid) if err != nil { - return "", nil, err + return "", err } if err = dm.oomAdjuster.ApplyOomScoreAdjContainer(cgroupName, oomScoreAdj, 5); err != nil { - return "", nil, err + return "", err } // currently, Docker does not have a flag by which the ndots option can be passed. @@ -1478,7 +1447,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe err = addNDotsOption(containerInfo.ResolvConfPath) } - return kubeletTypes.DockerID(id), containerInfo, err + return kubeletTypes.DockerID(id), err } func addNDotsOption(resolvFilePath string) error { @@ -1512,7 +1481,7 @@ func appendToFile(filePath, stringToAppend string) error { } // createPodInfraContainer starts the pod infra container for a pod. Returns the docker container ID of the newly created container. -func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.DockerID, *docker.Container, error) { +func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.DockerID, error) { start := time.Now() defer func() { metrics.ContainerManagerLatency.WithLabelValues("createPodInfraContainer").Observe(metrics.SinceInMicroseconds(start)) @@ -1540,15 +1509,15 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubeletTypes.Doc // No pod secrets for the infra container. if err := dm.imagePuller.PullImage(pod, container, nil); err != nil { - return "", nil, err + return "", err } - id, dockerContainer, err := dm.runContainerInPod(pod, container, netNamespace, "") + id, err := dm.runContainerInPod(pod, container, netNamespace, "") if err != nil { - return "", nil, err + return "", err } - return id, dockerContainer, nil + return id, nil } // TODO(vmarmol): This will soon be made non-public when its only use is internal. @@ -1748,21 +1717,16 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod podInfraContainerID := containerChanges.InfraContainerId if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) { glog.V(4).Infof("Creating pod infra container for %q", podFullName) - podInfraContainerID, podInfraContainer, err := dm.createPodInfraContainer(pod) + podInfraContainerID, err = dm.createPodInfraContainer(pod) + + // Call the networking plugin if err == nil { - // Call the networking plugin err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID) } if err != nil { glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q", err, podFullName) return err } - - if podDependsOnPodIP(pod) { - // Find the pod IP after starting the infra container in order to expose - // it safely via the downward API without a race. - pod.Status.PodIP = dm.determineContainerIP(pod.Name, pod.Namespace, podInfraContainer) - } } // Start everything @@ -1794,7 +1758,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, runningPod kubecontainer.Pod, pod // TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID) - _, _, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode) + _, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode) dm.updateReasonCache(pod, container, err) if err != nil { // TODO(bburns) : Perhaps blacklist a container after N failures? diff --git a/pkg/kubelet/dockertools/manager_test.go b/pkg/kubelet/dockertools/manager_test.go index a6d54e71eba..ef289f4e309 100644 --- a/pkg/kubelet/dockertools/manager_test.go +++ b/pkg/kubelet/dockertools/manager_test.go @@ -2334,62 +2334,3 @@ func TestGetUidFromUser(t *testing.T) { } } } - -func TestPodDependsOnPodIP(t *testing.T) { - tests := []struct { - name string - expected bool - env api.EnvVar - }{ - { - name: "depends on pod IP", - expected: true, - env: api.EnvVar{ - Name: "POD_IP", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.Version(), - FieldPath: "status.podIP", - }, - }, - }, - }, - { - name: "literal value", - expected: false, - env: api.EnvVar{ - Name: "SOME_VAR", - Value: "foo", - }, - }, - { - name: "other downward api field", - expected: false, - env: api.EnvVar{ - Name: "POD_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: testapi.Default.Version(), - FieldPath: "metadata.name", - }, - }, - }, - }, - } - - for _, tc := range tests { - pod := &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ - {Env: []api.EnvVar{tc.env}}, - }, - }, - } - - result := podDependsOnPodIP(pod) - if e, a := tc.expected, result; e != a { - t.Errorf("%v: Unexpected result; expected %v, got %v", tc.name, e, a) - } - } - -} diff --git a/test/e2e/docker_containers.go b/test/e2e/docker_containers.go index dfb585a70f9..cae777a9842 100644 --- a/test/e2e/docker_containers.go +++ b/test/e2e/docker_containers.go @@ -45,7 +45,7 @@ var _ = Describe("Docker Containers", func() { }) It("should use the image defaults if command and args are blank", func() { - testContainerOutput("use defaults", c, entrypointTestPod(), 0, []string{ + testContainerOutputInNamespace("use defaults", c, entrypointTestPod(), 0, []string{ "[/ep default arguments]", }, ns) }) @@ -54,7 +54,7 @@ var _ = Describe("Docker Containers", func() { pod := entrypointTestPod() pod.Spec.Containers[0].Args = []string{"override", "arguments"} - testContainerOutput("override arguments", c, pod, 0, []string{ + testContainerOutputInNamespace("override arguments", c, pod, 0, []string{ "[/ep override arguments]", }, ns) }) @@ -65,7 +65,7 @@ var _ = Describe("Docker Containers", func() { pod := entrypointTestPod() pod.Spec.Containers[0].Command = []string{"/ep-2"} - testContainerOutput("override command", c, pod, 0, []string{ + testContainerOutputInNamespace("override command", c, pod, 0, []string{ "[/ep-2]", }, ns) }) @@ -75,7 +75,7 @@ var _ = Describe("Docker Containers", func() { pod.Spec.Containers[0].Command = []string{"/ep-2"} pod.Spec.Containers[0].Args = []string{"override", "arguments"} - testContainerOutput("override all", c, pod, 0, []string{ + testContainerOutputInNamespace("override all", c, pod, 0, []string{ "[/ep-2 override arguments]", }, ns) }) diff --git a/test/e2e/downward_api.go b/test/e2e/downward_api.go index 49364096310..e5ade3f0fa7 100644 --- a/test/e2e/downward_api.go +++ b/test/e2e/downward_api.go @@ -30,85 +30,46 @@ var _ = Describe("Downward API", func() { It("should provide pod name and namespace as env vars", func() { podName := "downward-api-" + string(util.NewUUID()) - env := []api.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.name", - }, - }, + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: podName, + Labels: map[string]string{"name": podName}, }, - { - Name: "POD_NAMESPACE", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "POD_IP", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "status.podIP", + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "dapi-container", + Image: "gcr.io/google_containers/busybox", + Command: []string{"sh", "-c", "env"}, + Env: []api.EnvVar{ + { + Name: "POD_NAME", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "POD_NAMESPACE", + ValueFrom: &api.EnvVarSource{ + FieldRef: &api.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "metadata.namespace", + }, + }, + }, + }, }, }, + RestartPolicy: api.RestartPolicyNever, }, } - expectations := []string{ + framework.TestContainerOutput("downward api env vars", pod, 0, []string{ fmt.Sprintf("POD_NAME=%v", podName), fmt.Sprintf("POD_NAMESPACE=%v", framework.Namespace.Name), - } - - testDownwardAPI(framework, podName, env, expectations) + }) }) - - It("should provide pod IP as an env var", func() { - podName := "downward-api-" + string(util.NewUUID()) - env := []api.EnvVar{ - { - Name: "POD_IP", - ValueFrom: &api.EnvVarSource{ - FieldRef: &api.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "status.podIP", - }, - }, - }, - } - - expectations := []string{ - "POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)", - } - - testDownwardAPI(framework, podName, env, expectations) - }) - }) - -func testDownwardAPI(framework *Framework, podName string, env []api.EnvVar, expectations []string) { - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ - Name: podName, - Labels: map[string]string{"name": podName}, - }, - Spec: api.PodSpec{ - Containers: []api.Container{ - { - Name: "dapi-container", - Image: "gcr.io/google_containers/busybox", - Command: []string{"sh", "-c", "env"}, - Env: env, - }, - }, - RestartPolicy: api.RestartPolicyNever, - }, - } - - framework.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations) -} diff --git a/test/e2e/downwardapi_volume.go b/test/e2e/downwardapi_volume.go index 8f55ae9a764..7bb05b75016 100644 --- a/test/e2e/downwardapi_volume.go +++ b/test/e2e/downwardapi_volume.go @@ -86,7 +86,7 @@ var _ = Describe("Downwar dAPI volume", func() { RestartPolicy: api.RestartPolicyNever, }, } - testContainerOutput("downward API volume plugin", f.Client, pod, 0, []string{ + testContainerOutputInNamespace("downward API volume plugin", f.Client, pod, 0, []string{ fmt.Sprintf("cluster=\"rack10\"\n"), fmt.Sprintf("builder=\"john-doe\"\n"), fmt.Sprintf("%s\n", podName), diff --git a/test/e2e/framework.go b/test/e2e/framework.go index bdc6383db63..136b42ab7ae 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -112,12 +112,7 @@ func (f *Framework) WaitForPodRunning(podName string) error { // Runs the given pod and verifies that the output of exact container matches the desired output. func (f *Framework) TestContainerOutput(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) { - testContainerOutput(scenarioName, f.Client, pod, containerIndex, expectedOutput, f.Namespace.Name) -} - -// Runs the given pod and verifies that the output of exact container matches the desired regexps. -func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) { - testContainerOutputRegexp(scenarioName, f.Client, pod, containerIndex, expectedOutput, f.Namespace.Name) + testContainerOutputInNamespace(scenarioName, f.Client, pod, containerIndex, expectedOutput, f.Namespace.Name) } // WaitForAnEndpoint waits for at least one endpoint to become available in the diff --git a/test/e2e/host_path.go b/test/e2e/host_path.go index 6bc4ccdc46b..6e55c62bd5a 100644 --- a/test/e2e/host_path.go +++ b/test/e2e/host_path.go @@ -67,7 +67,7 @@ var _ = Describe("hostPath", func() { fmt.Sprintf("--fs_type=%v", volumePath), fmt.Sprintf("--file_mode=%v", volumePath), } - testContainerOutput("hostPath mode", c, pod, 0, []string{ + testContainerOutputInNamespace("hostPath mode", c, pod, 0, []string{ "mode of file \"/test-volume\": dtrwxrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir }, namespace.Name) @@ -93,7 +93,7 @@ var _ = Describe("hostPath", func() { } //Read the content of the file with the second container to //verify volumes being shared properly among continers within the pod. - testContainerOutput("hostPath r/w", c, pod, 1, []string{ + testContainerOutputInNamespace("hostPath r/w", c, pod, 1, []string{ "content of file \"/test-volume/test-file\": mount-tester new file", }, namespace.Name, ) diff --git a/test/e2e/secrets.go b/test/e2e/secrets.go index 26a57438ec9..17f21e3e601 100644 --- a/test/e2e/secrets.go +++ b/test/e2e/secrets.go @@ -92,7 +92,7 @@ var _ = Describe("Secrets", func() { }, } - testContainerOutput("consume secrets", f.Client, pod, 0, []string{ + testContainerOutputInNamespace("consume secrets", f.Client, pod, 0, []string{ "content of file \"/etc/secret-volume/data-1\": value-1", "mode of file \"/etc/secret-volume/data-1\": -r--r--r--", }, f.Namespace.Name) diff --git a/test/e2e/util.go b/test/e2e/util.go index 59f4157abf6..d148d2d97a8 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -55,8 +55,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - - gomegatypes "github.com/onsi/gomega/types" ) const ( @@ -1024,29 +1022,9 @@ func tryKill(cmd *exec.Cmd) { } // testContainerOutputInNamespace runs the given pod in the given namespace and waits -// for all of the containers in the podSpec to move into the 'Success' status, and tests -// the specified container log against the given expected output using a substring matcher. -func testContainerOutput(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) { - testContainerOutputMatcher(scenarioName, c, pod, containerIndex, expectedOutput, ns, ContainSubstring) -} - -// testContainerOutputInNamespace runs the given pod in the given namespace and waits -// for all of the containers in the podSpec to move into the 'Success' status, and tests -// the specified container log against the given expected output using a regexp matcher. -func testContainerOutputRegexp(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) { - testContainerOutputMatcher(scenarioName, c, pod, containerIndex, expectedOutput, ns, MatchRegexp) -} - -// testContainerOutputInNamespace runs the given pod in the given namespace and waits -// for all of the containers in the podSpec to move into the 'Success' status, and tests -// the specified container log against the given expected output using the given matcher. -func testContainerOutputMatcher(scenarioName string, - c *client.Client, - pod *api.Pod, - containerIndex int, - expectedOutput []string, ns string, - matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) { - +// for all of the containers in the podSpec to move into the 'Success' status. It retrieves +// the exact container log and searches for lines of expected output. +func testContainerOutputInNamespace(scenarioName string, c *client.Client, pod *api.Pod, containerIndex int, expectedOutput []string, ns string) { By(fmt.Sprintf("Creating a pod to test %v", scenarioName)) defer c.Pods(ns).Delete(pod.Name, api.NewDeleteOptions(0)) @@ -1102,7 +1080,7 @@ func testContainerOutputMatcher(scenarioName string, } for _, m := range expectedOutput { - Expect(string(logs)).To(matcher(m), "%q in container output", m) + Expect(string(logs)).To(ContainSubstring(m), "%q in container output", m) } } From 72c7d879735c95d81cbcf42ca5a9a63c1fbff548 Mon Sep 17 00:00:00 2001 From: qiaolei Date: Mon, 7 Sep 2015 22:34:03 +0800 Subject: [PATCH 084/101] fixed a markdown error in the title --- docs/user-guide/getting-into-containers.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/user-guide/getting-into-containers.md b/docs/user-guide/getting-into-containers.md index 5007880f445..541de932527 100644 --- a/docs/user-guide/getting-into-containers.md +++ b/docs/user-guide/getting-into-containers.md @@ -30,7 +30,9 @@ Documentation for other releases can be found at -#Getting into containers: kubectl exec + +# Getting into containers: kubectl exec + Developers can use `kubectl exec` to run commands in a container. This guide demonstrates two use cases. ## Using kubectl exec to check the environment variables of a container From 3081f88929c1cd22606df4be44038d17e9172f02 Mon Sep 17 00:00:00 2001 From: Jian Huang Date: Tue, 8 Sep 2015 17:27:44 +0800 Subject: [PATCH 085/101] Update the KubeConfig Doc Correct the document. --- docs/user-guide/kubeconfig-file.md | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/docs/user-guide/kubeconfig-file.md b/docs/user-guide/kubeconfig-file.md index c84842359ef..4c3d3183d37 100644 --- a/docs/user-guide/kubeconfig-file.md +++ b/docs/user-guide/kubeconfig-file.md @@ -137,20 +137,25 @@ $ kubectl config view produces this output ```yaml +apiVersion: v1 clusters: - local-server: +- cluster: server: http://localhost:8080 + name: local-server contexts: - default-context: +- context: cluster: local-server namespace: the-right-prefix user: myself + name: default-context current-context: default-context +kind: Config preferences: {} users: - myself: - username: admin +- name: myself + user: password: secret + username: admin ``` and a kubeconfig file that looks like this @@ -173,8 +178,8 @@ preferences: {} users: - name: myself user: - username: admin password: secret + username: admin ``` #### Commands for the example file From bd8227b92cd63345a9245b16c2b129b6c786daaf Mon Sep 17 00:00:00 2001 From: Jerzy Szczepkowski Date: Tue, 8 Sep 2015 12:12:28 +0200 Subject: [PATCH 086/101] Memory based horizontal pod autoscaling. Implemented horizontal autoscaling of pods based on memory consumption. --- .../autoscaler/metrics/metrics_client.go | 11 +++- .../autoscaler/metrics/metrics_client_test.go | 57 +++++++++++++------ 2 files changed, 49 insertions(+), 19 deletions(-) diff --git a/pkg/controller/autoscaler/metrics/metrics_client.go b/pkg/controller/autoscaler/metrics/metrics_client.go index b061a0c209c..cae98e33ee2 100644 --- a/pkg/controller/autoscaler/metrics/metrics_client.go +++ b/pkg/controller/autoscaler/metrics/metrics_client.go @@ -75,7 +75,6 @@ func NewHeapsterMetricsClient(client client.Interface) *HeapsterMetricsClient { } var heapsterMetricDefinitions = map[api.ResourceName]metricDefinition{ - //TODO: add memory api.ResourceCPU: {"cpu-usage", func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) { sum, count := calculateSumFromLatestSample(metrics) @@ -86,6 +85,15 @@ var heapsterMetricDefinitions = map[api.ResourceName]metricDefinition{ } return expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count }}, + api.ResourceMemory: {"memory-usage", + func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) { + sum, count := calculateSumFromLatestSample(metrics) + value := int64(0) + if count > 0 { + value = int64(sum) / int64(count) + } + return expapi.ResourceConsumption{Resource: api.ResourceMemory, Quantity: *resource.NewQuantity(value, resource.DecimalSI)}, count + }}, } func (h *HeapsterMetricsClient) ResourceConsumption(namespace string) ResourceConsumptionClient { @@ -111,7 +119,6 @@ func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, s } func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.ResourceName, podNames []string) (*expapi.ResourceConsumption, error) { - metricSpec, metricDefined := h.resourceDefinitions[resourceName] if !metricDefined { return nil, fmt.Errorf("heapster metric not defined for %v", resourceName) diff --git a/pkg/controller/autoscaler/metrics/metrics_client_test.go b/pkg/controller/autoscaler/metrics/metrics_client_test.go index 155842965cf..38211856c14 100644 --- a/pkg/controller/autoscaler/metrics/metrics_client_test.go +++ b/pkg/controller/autoscaler/metrics/metrics_client_test.go @@ -38,10 +38,13 @@ import ( ) const ( - namespace = "test-namespace" - podName = "pod1" - podListHandler = "podlisthandler" - heapsterHandler = "heapsterhandler" + namespace = "test-namespace" + podName = "pod1" + podListHandler = "podlisthandler" + heapsterCpuHandler = "heapstercpuhandler" + heapsterMemHandler = "heapstermemhandler" + cpu = 650 + memory = 20000000 ) type serverResponse struct { @@ -50,7 +53,6 @@ type serverResponse struct { } func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httptest.Server, map[string]*util.FakeHandler) { - handlers := map[string]*util.FakeHandler{} mux := http.NewServeMux() @@ -78,10 +80,16 @@ func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httpte handlers[podListHandler] = mkHandler(fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace), *responses[podListHandler]) } - if responses[heapsterHandler] != nil { - handlers[heapsterHandler] = mkRawHandler( + if responses[heapsterCpuHandler] != nil { + handlers[heapsterCpuHandler] = mkRawHandler( fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/cpu-usage", - namespace, podName), *responses[heapsterHandler]) + namespace, podName), *responses[heapsterCpuHandler]) + } + + if responses[heapsterMemHandler] != nil { + handlers[heapsterMemHandler] = mkRawHandler( + fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/memory-usage", + namespace, podName), *responses[heapsterMemHandler]) } mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) { @@ -92,7 +100,6 @@ func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httpte } func TestHeapsterResourceConsumptionGet(t *testing.T) { - podListResponse := serverResponse{http.StatusOK, &api.PodList{ Items: []api.Pod{ { @@ -103,19 +110,29 @@ func TestHeapsterResourceConsumptionGet(t *testing.T) { }}}} timestamp := time.Now() - metrics := heapster.MetricResultList{ + metricsCpu := heapster.MetricResultList{ Items: []heapster.MetricResult{{ - Metrics: []heapster.MetricPoint{{timestamp, 650}}, + Metrics: []heapster.MetricPoint{{timestamp, cpu}}, LatestTimestamp: timestamp, }}} - heapsterRawResponse, _ := json.Marshal(&metrics) - heapsterStrResponse := string(heapsterRawResponse) - heapsterResponse := serverResponse{http.StatusOK, &heapsterStrResponse} + heapsterRawCpuResponse, _ := json.Marshal(&metricsCpu) + heapsterStrCpuResponse := string(heapsterRawCpuResponse) + heapsterCpuResponse := serverResponse{http.StatusOK, &heapsterStrCpuResponse} + + metricsMem := heapster.MetricResultList{ + Items: []heapster.MetricResult{{ + Metrics: []heapster.MetricPoint{{timestamp, memory}}, + LatestTimestamp: timestamp, + }}} + heapsterRawMemResponse, _ := json.Marshal(&metricsMem) + heapsterStrMemResponse := string(heapsterRawMemResponse) + heapsterMemResponse := serverResponse{http.StatusOK, &heapsterStrMemResponse} testServer, _ := makeTestServer(t, map[string]*serverResponse{ - heapsterHandler: &heapsterResponse, - podListHandler: &podListResponse, + heapsterCpuHandler: &heapsterCpuResponse, + heapsterMemHandler: &heapsterMemResponse, + podListHandler: &podListResponse, }) defer testServer.Close() @@ -127,5 +144,11 @@ func TestHeapsterResourceConsumptionGet(t *testing.T) { if err != nil { t.Fatalf("Error while getting consumption: %v", err) } - assert.Equal(t, int64(650), val.Quantity.MilliValue()) + assert.Equal(t, int64(cpu), val.Quantity.MilliValue()) + + val, err = metricsClient.ResourceConsumption(namespace).Get(api.ResourceMemory, map[string]string{"app": "test"}) + if err != nil { + t.Fatalf("Error while getting consumption: %v", err) + } + assert.Equal(t, int64(memory), val.Quantity.Value()) } From 3ddbb7d96b60ebe25c870d502090a11323ba2dcf Mon Sep 17 00:00:00 2001 From: Ewa Socala Date: Mon, 7 Sep 2015 12:26:23 +0200 Subject: [PATCH 087/101] Horizontal Pod Autoscaling e2e tests --- test/e2e/autoscaling_utils.go | 55 ++++++++--- test/e2e/horizontal_pod_autoscaling.go | 123 +++++++++++++++++++++++++ 2 files changed, 163 insertions(+), 15 deletions(-) create mode 100644 test/e2e/horizontal_pod_autoscaling.go diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index fc7dbeea22e..89e5755fb7e 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -35,43 +35,45 @@ const ( targetPort = 8080 timeoutRC = 120 * time.Second image = "gcr.io/google_containers/resource_consumer:alpha" + rcIsNil = "ERROR: replicationController = nil" ) /* -ConsumingRC is a tool for testing. It helps create specified usage of CPU or memory (Warnig: memory not supported) +ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warnig: memory not supported) typical use case: rc.ConsumeCPU(600) // ... check your assumption here rc.ConsumeCPU(300) // ... check your assumption here */ -type ConsumingRC struct { +type ResourceConsumer struct { name string framework *Framework channel chan int stop chan int } -// NewConsumingRC creates new ConsumingRC -func NewConsumingRC(name string, replicas int, framework *Framework) *ConsumingRC { - startService(framework.Client, framework.Namespace.Name, name, replicas) - rc := &ConsumingRC{ +// NewResourceConsumer creates new ResourceConsumer +// cpu argument is in milicores +func NewResourceConsumer(name string, replicas int, cpu int, framework *Framework) *ResourceConsumer { + runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas) + rc := &ResourceConsumer{ name: name, framework: framework, channel: make(chan int), stop: make(chan int), } go rc.makeConsumeCPURequests() - rc.ConsumeCPU(0) + rc.ConsumeCPU(cpu) return rc } // ConsumeCPU consumes given number of CPU -func (rc *ConsumingRC) ConsumeCPU(milicores int) { +func (rc *ResourceConsumer) ConsumeCPU(milicores int) { rc.channel <- milicores } -func (rc *ConsumingRC) makeConsumeCPURequests() { +func (rc *ResourceConsumer) makeConsumeCPURequests() { defer GinkgoRecover() var count int var rest int @@ -93,14 +95,14 @@ func (rc *ConsumingRC) makeConsumeCPURequests() { } } -func (rc *ConsumingRC) sendConsumeCPUrequests(requests, milicores, durationSec int) { +func (rc *ResourceConsumer) sendConsumeCPUrequests(requests, milicores, durationSec int) { for i := 0; i < requests; i++ { go rc.sendOneConsumeCPUrequest(milicores, durationSec) } } // sendOneConsumeCPUrequest sends POST request for cpu consumption -func (rc *ConsumingRC) sendOneConsumeCPUrequest(milicores int, durationSec int) { +func (rc *ResourceConsumer) sendOneConsumeCPUrequest(milicores int, durationSec int) { _, err := rc.framework.Client.Post(). Prefix("proxy"). Namespace(rc.framework.Namespace.Name). @@ -114,14 +116,37 @@ func (rc *ConsumingRC) sendOneConsumeCPUrequest(milicores int, durationSec int) expectNoError(err) } -func (rc *ConsumingRC) CleanUp() { +func (rc *ResourceConsumer) GetReplicas() int { + replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name) + expectNoError(err) + if replicationController == nil { + Failf(rcIsNil) + } + return replicationController.Status.Replicas +} + +func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) { + timeout := 10 * time.Minute + for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { + if desiredReplicas == rc.GetReplicas() { + Logf("Replication Controller current replicas number is equal to desired replicas number: %d", desiredReplicas) + return + } else { + Logf("Replication Controller current replicas number %d waiting to be %d", rc.GetReplicas(), desiredReplicas) + } + } + Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas) +} + +func (rc *ResourceConsumer) CleanUp() { rc.stop <- 0 expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name)) expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) + expectNoError(rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(rc.name, api.NewDeleteOptions(0))) } -func startService(c *client.Client, ns, name string, replicas int) { - c.Services(ns).Create(&api.Service{ +func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, replicas int) { + _, err := c.Services(ns).Create(&api.Service{ ObjectMeta: api.ObjectMeta{ Name: name, }, @@ -136,7 +161,7 @@ func startService(c *client.Client, ns, name string, replicas int) { }, }, }) - + expectNoError(err) config := RCConfig{ Client: c, Image: image, diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go new file mode 100644 index 00000000000..771e05e1ba2 --- /dev/null +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -0,0 +1,123 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "time" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/expapi" + + . "github.com/onsi/ginkgo" +) + +const ( + sleep = 10 * time.Minute +) + +var _ = Describe("Horizontal pod autoscaling", func() { + var rc *ResourceConsumer + f := NewFramework("horizontal-pod-autoscaling") + + BeforeEach(func() { + Skip("Skipped Horizontal pod autoscaling test") + }) + + AfterEach(func() { + }) + + It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() { + rc = NewResourceConsumer("rc", 1, 700, f) + createHorizontalPodAutoscaler(rc, "0.3") + rc.WaitForReplicas(3) + rc.CleanUp() + }) + + It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod (scale resource: CPU)", func() { + rc = NewResourceConsumer("rc", 3, 0, f) + createHorizontalPodAutoscaler(rc, "0.7") + rc.WaitForReplicas(1) + rc.CleanUp() + }) + + It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to maximum 5 pods (scale resource: CPU)", func() { + rc = NewResourceConsumer("rc", 1, 700, f) + createHorizontalPodAutoscaler(rc, "0.1") + rc.WaitForReplicas(5) + rc.CleanUp() + }) + + It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 1 (scale resource: CPU)", func() { + rc = NewResourceConsumer("rc", 1, 700, f) + createHorizontalPodAutoscaler(rc, "0.3") + rc.WaitForReplicas(3) + rc.ConsumeCPU(300) + rc.WaitForReplicas(1) + rc.CleanUp() + }) + + It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() { + rc = NewResourceConsumer("rc", 1, 300, f) + createHorizontalPodAutoscaler(rc, "0.1") + rc.WaitForReplicas(3) + rc.ConsumeCPU(700) + rc.WaitForReplicas(5) + rc.CleanUp() + }) + + It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod and from 1 to 3 (scale resource: CPU)", func() { + rc = NewResourceConsumer("rc", 3, 0, f) + createHorizontalPodAutoscaler(rc, "0.3") + rc.WaitForReplicas(1) + rc.ConsumeCPU(700) + rc.WaitForReplicas(3) + rc.CleanUp() + }) + + It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() { + rc = NewResourceConsumer("rc", 5, 700, f) + createHorizontalPodAutoscaler(rc, "0.3") + rc.WaitForReplicas(3) + rc.ConsumeCPU(100) + rc.WaitForReplicas(1) + rc.CleanUp() + }) + +}) + +func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) { + hpa := &expapi.HorizontalPodAutoscaler{ + ObjectMeta: api.ObjectMeta{ + Name: rc.name, + Namespace: rc.framework.Namespace.Name, + }, + Spec: expapi.HorizontalPodAutoscalerSpec{ + ScaleRef: &expapi.SubresourceReference{ + Kind: "replicationController", + Name: rc.name, + Namespace: rc.framework.Namespace.Name, + Subresource: "scale", + }, + MinCount: 1, + MaxCount: 5, + Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(cpu)}, + }, + } + _, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) + expectNoError(errHPA) +} From 6cdeb7a711d9a277e234c905b6f2b105b212de6f Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 8 Sep 2015 12:05:14 +0200 Subject: [PATCH 088/101] Test whether pod startup latency is good enough. --- test/e2e/density.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/e2e/density.go b/test/e2e/density.go index c2fc23761b4..00c7ad44ad2 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -148,7 +148,6 @@ var _ = Describe("Density", func() { expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after")) // Verify latency metrics - // TODO: We should reset metrics before the test. Currently previous tests influence latency metrics. highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, util.NewStringSet("events")) expectNoError(err) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") @@ -386,6 +385,16 @@ var _ = Describe("Density", func() { printLatencies(schedToWatchLag, "worst scheduled-to-end total latencies") printLatencies(e2eLag, "worst e2e total latencies") + // Test whether e2e pod startup time is acceptable. + // TODO: Switch it to 5 seconds once we are sure our tests are passing. + podStartupThreshold := 8 * time.Second + e2ePodStartupTime50perc := e2eLag[len(e2eLag)/2].Latency + e2ePodStartupTime90perc := e2eLag[len(e2eLag)*9/10].Latency + e2ePodStartupTime99perc := e2eLag[len(e2eLag)*99/100].Latency + Expect(e2ePodStartupTime50perc < podStartupThreshold).To(Equal(true), "Too high pod startup time 50th percentile") + Expect(e2ePodStartupTime90perc < podStartupThreshold).To(Equal(true), "Too high pod startup time 90th percentile") + Expect(e2ePodStartupTime99perc < podStartupThreshold).To(Equal(true), "Too high pod startup time 99th percentile") + // Log suspicious latency metrics/docker errors from all nodes that had slow startup times for _, l := range startupLag { if l.Latency > NodeStartupThreshold { From 4936b9d5d2fd61cc135cf2b5b0fa60d90a48af5c Mon Sep 17 00:00:00 2001 From: qiaolei Date: Tue, 8 Sep 2015 21:14:30 +0800 Subject: [PATCH 089/101] Add PodPhase 'Unknown' Add PodPhase 'Unknown' and its corresponding description. --- docs/user-guide/pod-states.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/user-guide/pod-states.md b/docs/user-guide/pod-states.md index 16b32d6e27a..35b0824e45f 100644 --- a/docs/user-guide/pod-states.md +++ b/docs/user-guide/pod-states.md @@ -47,6 +47,7 @@ The number and meanings of `PodPhase` values are tightly guarded. Other than wh * Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. * Succeeded: All containers in the pod have terminated in success, and will not be restarted. * Failed: All containers in the pod have terminated, at least one container has terminated in failure (exited with non-zero exit status or was terminated by the system). +* Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod. ## Pod Conditions From 599100dc647ec1303a98457cf4b127ec05f4624e Mon Sep 17 00:00:00 2001 From: Avesh Agarwal Date: Tue, 8 Sep 2015 09:33:42 -0400 Subject: [PATCH 090/101] Fixed kube-proxy birthCry event as it seemed to start too early and had a possibility to not start successfully after the event was already posted. --- cmd/kube-proxy/app/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index fbe7d02262b..306a88ccf5b 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -159,9 +159,6 @@ func (s *ProxyServer) Run(_ []string) error { Namespace: "", } - // Birth Cry - s.birthCry() - serviceConfig := config.NewServiceConfig() endpointsConfig := config.NewEndpointsConfig() @@ -208,6 +205,9 @@ func (s *ProxyServer) Run(_ []string) error { iptables.CleanupLeftovers(ipt) } + // Birth Cry after the birth is successful + s.birthCry() + // Wire proxier to handle changes to services serviceConfig.RegisterHandler(proxier) // And wire endpointsHandler to handle changes to endpoints to services From 4283201aea57ecbe2d8df231d9f18c8f35bb571a Mon Sep 17 00:00:00 2001 From: jay vyas Date: Tue, 8 Sep 2015 09:50:15 -0400 Subject: [PATCH 091/101] [minor] cert file cmd line string fix --- cmd/kube-apiserver/app/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 954d11364d9..99601f47e67 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -233,7 +233,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { fs.BoolVar(&s.KubeletConfig.EnableHttps, "kubelet-https", s.KubeletConfig.EnableHttps, "Use https for kubelet connections") fs.UintVar(&s.KubeletConfig.Port, "kubelet-port", s.KubeletConfig.Port, "Kubelet port") fs.DurationVar(&s.KubeletConfig.HTTPTimeout, "kubelet-timeout", s.KubeletConfig.HTTPTimeout, "Timeout for kubelet operations") - fs.StringVar(&s.KubeletConfig.CertFile, "kubelet-client-certificate", s.KubeletConfig.CertFile, "Path to a client key file for TLS.") + fs.StringVar(&s.KubeletConfig.CertFile, "kubelet-client-certificate", s.KubeletConfig.CertFile, "Path to a client cert file for TLS.") fs.StringVar(&s.KubeletConfig.KeyFile, "kubelet-client-key", s.KubeletConfig.KeyFile, "Path to a client key file for TLS.") fs.StringVar(&s.KubeletConfig.CAFile, "kubelet-certificate-authority", s.KubeletConfig.CAFile, "Path to a cert. file for the certificate authority.") } From 9fdc2510061cf90d111d16e642f1c909f039d055 Mon Sep 17 00:00:00 2001 From: qiaolei Date: Tue, 8 Sep 2015 22:40:45 +0800 Subject: [PATCH 092/101] Fixed a markdown error in rackspace.md --- docs/getting-started-guides/rackspace.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index c0066c582bb..a4ecae09b17 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -79,6 +79,7 @@ The current cluster design is inspired by: ## Cluster There is a specific `cluster/rackspace` directory with the scripts for the following steps: + 1. A cloud network will be created and all instances will be attached to this network. - flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network. 2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines (we do not capture the password). From 7afec6b225400599f9fc49a356df39f4b1a80f94 Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Wed, 2 Sep 2015 13:42:27 -0400 Subject: [PATCH 093/101] Improve reliability of salt-minion on vagrant master --- cluster/vagrant/util.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index f02097912e4..10cae2dd2f8 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -199,6 +199,9 @@ function verify-cluster { local machine="master" local -a required_daemon=("salt-master" "salt-minion" "kubelet") local validated="1" + # This is a hack, but sometimes the salt-minion gets stuck on the master, so we just restart it + # to ensure that users never wait forever + vagrant ssh "$machine" -c "sudo systemctl restart salt-minion" until [[ "$validated" == "0" ]]; do validated="0" local daemon From 10c81790300d73ac938dc0b5b1f17edbfef15a9a Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Fri, 4 Sep 2015 17:28:32 -0700 Subject: [PATCH 094/101] enable verify-description.sh for pkg/expapi/v1/types.go --- hack/after-build/verify-description.sh | 5 ++- pkg/expapi/v1/types.go | 33 ++++++++++++----- pkg/expapi/v1/types_swagger_doc_generated.go | 39 +++++++++++++------- 3 files changed, 53 insertions(+), 24 deletions(-) diff --git a/hack/after-build/verify-description.sh b/hack/after-build/verify-description.sh index b2c2b0fbc2c..f23e2b23245 100755 --- a/hack/after-build/verify-description.sh +++ b/hack/after-build/verify-description.sh @@ -38,7 +38,10 @@ find_files() { -o -wholename '*/third_party/*' \ -o -wholename '*/Godeps/*' \ \) -prune \ - \) -wholename '*pkg/api/v*/types.go' + \) \ + \( -wholename '*pkg/api/v*/types.go' \ + -o -wholename '*pkg/expapi/v*/types.go' \ + \) } if [[ $# -eq 0 ]]; then diff --git a/pkg/expapi/v1/types.go b/pkg/expapi/v1/types.go index 4d4af72a088..19fb02b15a1 100644 --- a/pkg/expapi/v1/types.go +++ b/pkg/expapi/v1/types.go @@ -71,7 +71,9 @@ type SubresourceReference struct { // ResourceConsumption is an object for specifying average resource consumption of a particular resource. type ResourceConsumption struct { - Resource v1.ResourceName `json:"resource,omitempty"` + // Resource specifies either the name of the target resource when present in the spec, or the name of the observed resource when present in the status. + Resource v1.ResourceName `json:"resource,omitempty"` + // Quantity specifies either the target average consumption of the resource when present in the spec, or the observed average consumption when present in the status. Quantity resource.Quantity `json:"quantity,omitempty"` } @@ -109,7 +111,8 @@ type HorizontalPodAutoscalerStatus struct { // HorizontalPodAutoscaler represents the configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { - v1.TypeMeta `json:",inline"` + v1.TypeMeta `json:",inline"` + // Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` // Spec defines the behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status. @@ -119,12 +122,13 @@ type HorizontalPodAutoscaler struct { Status *HorizontalPodAutoscalerStatus `json:"status,omitempty"` } -// HorizontalPodAutoscaler is a collection of pod autoscalers. +// HorizontalPodAutoscalerList is a list of HorizontalPodAutoscalers. type HorizontalPodAutoscalerList struct { v1.TypeMeta `json:",inline"` + // Standard list metadata. v1.ListMeta `json:"metadata,omitempty"` - // Items is the list of horizontal pod autoscalers. + // Items is the list of HorizontalPodAutoscalers. Items []HorizontalPodAutoscaler `json:"items"` } @@ -143,13 +147,14 @@ type ThirdPartyResource struct { Versions []APIVersion `json:"versions,omitempty"` } +// ThirdPartyResourceList is a list of ThirdPartyResources. type ThirdPartyResourceList struct { v1.TypeMeta `json:",inline"` // Standard list metadata. v1.ListMeta `json:"metadata,omitempty"` - // Items is the list of horizontal pod autoscalers. + // Items is the list of ThirdPartyResources. Items []ThirdPartyResource `json:"items"` } @@ -172,8 +177,10 @@ type ThirdPartyResourceData struct { Data []byte `json:"name,omitempty"` } +// Deployment enables declarative updates for Pods and ReplicationControllers. type Deployment struct { - v1.TypeMeta `json:",inline"` + v1.TypeMeta `json:",inline"` + // Standard object metadata. v1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Deployment. @@ -183,6 +190,7 @@ type Deployment struct { Status DeploymentStatus `json:"status,omitempty"` } +// DeploymentSpec is the specification of the desired behavior of the Deployment. type DeploymentSpec struct { // Number of desired pods. This is a pointer to distinguish between explicit // zero and not specified. Defaults to 1. @@ -209,6 +217,7 @@ type DeploymentSpec struct { UniqueLabelKey *string `json:"uniqueLabelKey,omitempty"` } +// DeploymentStrategy describes how to replace existing pods with new ones. type DeploymentStrategy struct { // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. Type DeploymentType `json:"type,omitempty"` @@ -263,6 +272,7 @@ type RollingUpdateDeployment struct { MinReadySeconds int `json:"minReadySeconds,omitempty"` } +// DeploymentStatus is the most recently observed status of the Deployment. type DeploymentStatus struct { // Total number of ready pods targeted by this deployment (this // includes both the old and new pods). @@ -272,11 +282,13 @@ type DeploymentStatus struct { UpdatedReplicas int `json:"updatedReplicas,omitempty"` } +// DeploymentList is a list of Deployments. type DeploymentList struct { v1.TypeMeta `json:",inline"` + // Standard list metadata. v1.ListMeta `json:"metadata,omitempty"` - // Items is the list of deployments. + // Items is the list of Deployments. Items []Deployment `json:"items"` } @@ -330,23 +342,24 @@ type Daemon struct { Status DaemonStatus `json:"status,omitempty"` } -// DaemonList is a collection of daemon. +// DaemonList is a list of Daemons. type DaemonList struct { v1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ListMeta `json:"metadata,omitempty"` - // Items is a list of daemons. + // Items is the list of Daemons. Items []Daemon `json:"items"` } +// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. type ThirdPartyResourceDataList struct { v1.TypeMeta `json:",inline"` // Standard list metadata // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ListMeta `json:"metadata,omitempty"` - // Items is a list of third party objects + // Items is the list of ThirdpartyResourceData. Items []ThirdPartyResourceData `json:"items"` } diff --git a/pkg/expapi/v1/types_swagger_doc_generated.go b/pkg/expapi/v1/types_swagger_doc_generated.go index 40497a50a9c..584d693f198 100644 --- a/pkg/expapi/v1/types_swagger_doc_generated.go +++ b/pkg/expapi/v1/types_swagger_doc_generated.go @@ -49,9 +49,9 @@ func (Daemon) SwaggerDoc() map[string]string { } var map_DaemonList = map[string]string{ - "": "DaemonList is a collection of daemon.", + "": "DaemonList is a list of Daemons.", "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is a list of daemons.", + "items": "Items is the list of Daemons.", } func (DaemonList) SwaggerDoc() map[string]string { @@ -80,8 +80,10 @@ func (DaemonStatus) SwaggerDoc() map[string]string { } var map_Deployment = map[string]string{ - "spec": "Specification of the desired behavior of the Deployment.", - "status": "Most recently observed status of the Deployment.", + "": "Deployment enables declarative updates for Pods and ReplicationControllers.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", } func (Deployment) SwaggerDoc() map[string]string { @@ -89,7 +91,9 @@ func (Deployment) SwaggerDoc() map[string]string { } var map_DeploymentList = map[string]string{ - "items": "Items is the list of deployments.", + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", } func (DeploymentList) SwaggerDoc() map[string]string { @@ -97,6 +101,7 @@ func (DeploymentList) SwaggerDoc() map[string]string { } var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", "selector": "Label selector for pods. Existing ReplicationControllers whose pods are selected by this will be scaled down.", "template": "Template describes the pods that will be created.", @@ -109,6 +114,7 @@ func (DeploymentSpec) SwaggerDoc() map[string]string { } var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", "replicas": "Total number of ready pods targeted by this deployment (this includes both the old and new pods).", "updatedReplicas": "Total number of new ready pods with the desired template spec.", } @@ -118,6 +124,7 @@ func (DeploymentStatus) SwaggerDoc() map[string]string { } var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", "rollingUpdate": "Rolling update config params. Present only if DeploymentType = RollingUpdate.", } @@ -127,9 +134,10 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { } var map_HorizontalPodAutoscaler = map[string]string{ - "": "HorizontalPodAutoscaler represents the configuration of a horizontal pod autoscaler.", - "spec": "Spec defines the behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", - "status": "Status represents the current information about the autoscaler.", + "": "HorizontalPodAutoscaler represents the configuration of a horizontal pod autoscaler.", + "metadata": "Standard object metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", + "spec": "Spec defines the behaviour of autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.", + "status": "Status represents the current information about the autoscaler.", } func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { @@ -137,8 +145,9 @@ func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { } var map_HorizontalPodAutoscalerList = map[string]string{ - "": "HorizontalPodAutoscaler is a collection of pod autoscalers.", - "items": "Items is the list of horizontal pod autoscalers.", + "": "HorizontalPodAutoscalerList is a list of HorizontalPodAutoscalers.", + "metadata": "Standard list metadata.", + "items": "Items is the list of HorizontalPodAutoscalers.", } func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { @@ -178,7 +187,9 @@ func (ReplicationControllerDummy) SwaggerDoc() map[string]string { } var map_ResourceConsumption = map[string]string{ - "": "ResourceConsumption is an object for specifying average resource consumption of a particular resource.", + "": "ResourceConsumption is an object for specifying average resource consumption of a particular resource.", + "resource": "Resource specifies either the name of the target resource when present in the spec, or the name of the observed resource when present in the status.", + "quantity": "Quantity specifies either the target average consumption of the resource when present in the spec, or the observed average consumption when present in the status.", } func (ResourceConsumption) SwaggerDoc() map[string]string { @@ -261,8 +272,9 @@ func (ThirdPartyResourceData) SwaggerDoc() map[string]string { } var map_ThirdPartyResourceDataList = map[string]string{ + "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", "metadata": "Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is a list of third party objects", + "items": "Items is the list of ThirdpartyResourceData.", } func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { @@ -270,8 +282,9 @@ func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { } var map_ThirdPartyResourceList = map[string]string{ + "": "ThirdPartyResourceList is a list of ThirdPartyResources.", "metadata": "Standard list metadata.", - "items": "Items is the list of horizontal pod autoscalers.", + "items": "Items is the list of ThirdPartyResources.", } func (ThirdPartyResourceList) SwaggerDoc() map[string]string { From 502492f3ed368a6797c045fc41910ea8c7c72a1b Mon Sep 17 00:00:00 2001 From: Paul Morie Date: Tue, 8 Sep 2015 13:18:28 -0400 Subject: [PATCH 095/101] Make serialization tests easier to debug --- pkg/api/serialization_test.go | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/pkg/api/serialization_test.go b/pkg/api/serialization_test.go index 6eed0666be9..fd69b276c1e 100644 --- a/pkg/api/serialization_test.go +++ b/pkg/api/serialization_test.go @@ -69,7 +69,7 @@ func roundTrip(t *testing.T, codec runtime.Codec, item runtime.Object) { return } if !api.Semantic.DeepEqual(item, obj2) { - t.Errorf("1: %v: diff: %v\nCodec: %v\nData: %s\nSource: %#v\nFinal: %#v", name, util.ObjectGoPrintDiff(item, obj2), codec, string(data), printer.Sprintf("%#v", item), printer.Sprintf("%#v", obj2)) + t.Errorf("1: %v: diff: %v\nCodec: %v\nSource:\n\n%#v\n\nEncoded:\n\n%s\n\nFinal:\n\n%#v", name, util.ObjectGoPrintDiff(item, obj2), codec, printer.Sprintf("%#v", item), string(data), printer.Sprintf("%#v", obj2)) return } @@ -103,12 +103,7 @@ func TestSpecificKind(t *testing.T) { defer api.Scheme.Log(nil) kind := "PodList" - item, err := api.Scheme.New("", kind) - if err != nil { - t.Errorf("Couldn't make a %v? %v", kind, err) - return - } - roundTripSame(t, item) + doRoundTripTest(kind, t) } func TestList(t *testing.T) { @@ -138,21 +133,25 @@ func TestRoundTripTypes(t *testing.T) { } // Try a few times, since runTest uses random values. for i := 0; i < *fuzzIters; i++ { - item, err := api.Scheme.New("", kind) - if err != nil { - t.Fatalf("Couldn't make a %v? %v", kind, err) - } - if _, err := meta.TypeAccessor(item); err != nil { - t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err) - } - roundTripSame(t, item, nonRoundTrippableTypesByVersion[kind]...) - if !nonInternalRoundTrippableTypes.Has(kind) { - roundTrip(t, api.Codec, fuzzInternalObject(t, "", item, rand.Int63())) - } + doRoundTripTest(kind, t) } } } +func doRoundTripTest(kind string, t *testing.T) { + item, err := api.Scheme.New("", kind) + if err != nil { + t.Fatalf("Couldn't make a %v? %v", kind, err) + } + if _, err := meta.TypeAccessor(item); err != nil { + t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err) + } + roundTripSame(t, item, nonRoundTrippableTypesByVersion[kind]...) + if !nonInternalRoundTrippableTypes.Has(kind) { + roundTrip(t, api.Codec, fuzzInternalObject(t, "", item, rand.Int63())) + } +} + func TestEncode_Ptr(t *testing.T) { grace := int64(30) pod := &api.Pod{ From 6d96b63e628ac78d5205964d8e00a49a36b9cc16 Mon Sep 17 00:00:00 2001 From: Paul Morie Date: Tue, 8 Sep 2015 13:37:12 -0400 Subject: [PATCH 096/101] Update api change docs --- docs/devel/api_changes.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/devel/api_changes.md b/docs/devel/api_changes.md index d26fdda9db7..45f0dd4c9e7 100644 --- a/docs/devel/api_changes.md +++ b/docs/devel/api_changes.md @@ -399,6 +399,10 @@ The conversion code resides with each versioned API. There are two files: functions - `pkg/api//conversion_generated.go` containing auto-generated conversion functions + - `pkg/expapi//conversion.go` containing manually written conversion + functions + - `pkg/expapi//conversion_generated.go` containing auto-generated + conversion functions Since auto-generated conversion functions are using manually written ones, those manually written should be named with a defined convention, i.e. a function @@ -433,6 +437,7 @@ of your versioned api objects. The deep copy code resides with each versioned API: - `pkg/api//deep_copy_generated.go` containing auto-generated copy functions + - `pkg/expapi//deep_copy_generated.go` containing auto-generated copy functions To regenerate them: - run From 7f5e70022b505a4de6cb8aed3ea0ed20c4785ba9 Mon Sep 17 00:00:00 2001 From: Paul Morie Date: Tue, 8 Sep 2015 14:03:45 -0400 Subject: [PATCH 097/101] Fix typo in kube-proxy server.go --- cmd/kube-proxy/app/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index fbe7d02262b..c0d573493a0 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -200,7 +200,7 @@ func (s *ProxyServer) Run(_ []string) error { ipt := utiliptables.New(execer, protocol) proxierUserspace, err := userspace.NewProxier(loadBalancer, s.BindAddress, ipt, s.PortRange, s.SyncPeriod) if err != nil { - glog.Fatalf("Unable to create proxer: %v", err) + glog.Fatalf("Unable to create proxier: %v", err) } proxier = proxierUserspace // Remove artifacts from the pure-iptables Proxier. From b05c11edac93d869ae9669f9d6dfd05f1372b3df Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Tue, 8 Sep 2015 14:12:08 -0400 Subject: [PATCH 098/101] Fix typo for milicores to millicores --- test/e2e/autoscaling_utils.go | 20 +++++++++---------- .../consume-cpu/consume_cpu.go | 8 ++++---- test/images/resource-consumer/utils.go | 6 +++--- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index 89e5755fb7e..e60503a32ff 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -54,7 +54,7 @@ type ResourceConsumer struct { } // NewResourceConsumer creates new ResourceConsumer -// cpu argument is in milicores +// cpu argument is in millicores func NewResourceConsumer(name string, replicas int, cpu int, framework *Framework) *ResourceConsumer { runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas) rc := &ResourceConsumer{ @@ -69,8 +69,8 @@ func NewResourceConsumer(name string, replicas int, cpu int, framework *Framewor } // ConsumeCPU consumes given number of CPU -func (rc *ResourceConsumer) ConsumeCPU(milicores int) { - rc.channel <- milicores +func (rc *ResourceConsumer) ConsumeCPU(millicores int) { + rc.channel <- millicores } func (rc *ResourceConsumer) makeConsumeCPURequests() { @@ -79,9 +79,9 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() { var rest int for { select { - case milicores := <-rc.channel: - count = milicores / requestSizeInMilicores - rest = milicores - count*requestSizeInMilicores + case millicores := <-rc.channel: + count = millicores / requestSizeInMilicores + rest = millicores - count*requestSizeInMilicores case <-time.After(sleepTime): if count > 0 { rc.sendConsumeCPUrequests(count, requestSizeInMilicores, consumptionTimeInSeconds) @@ -95,21 +95,21 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() { } } -func (rc *ResourceConsumer) sendConsumeCPUrequests(requests, milicores, durationSec int) { +func (rc *ResourceConsumer) sendConsumeCPUrequests(requests, millicores, durationSec int) { for i := 0; i < requests; i++ { - go rc.sendOneConsumeCPUrequest(milicores, durationSec) + go rc.sendOneConsumeCPUrequest(millicores, durationSec) } } // sendOneConsumeCPUrequest sends POST request for cpu consumption -func (rc *ResourceConsumer) sendOneConsumeCPUrequest(milicores int, durationSec int) { +func (rc *ResourceConsumer) sendOneConsumeCPUrequest(millicores int, durationSec int) { _, err := rc.framework.Client.Post(). Prefix("proxy"). Namespace(rc.framework.Namespace.Name). Resource("services"). Name(rc.name). Suffix("ConsumeCPU"). - Param("milicores", strconv.Itoa(milicores)). + Param("millicores", strconv.Itoa(millicores)). Param("durationSec", strconv.Itoa(durationSec)). Do(). Raw() diff --git a/test/images/resource-consumer/consume-cpu/consume_cpu.go b/test/images/resource-consumer/consume-cpu/consume_cpu.go index 6f7c6c6233d..d859d9e24d1 100644 --- a/test/images/resource-consumer/consume-cpu/consume_cpu.go +++ b/test/images/resource-consumer/consume-cpu/consume_cpu.go @@ -34,20 +34,20 @@ func doSomething() { } var ( - milicores = flag.Int("milicores", 0, "milicores number") + millicores = flag.Int("millicores", 0, "millicores number") durationSec = flag.Int("duration-sec", 0, "duration time in seconds") ) func main() { flag.Parse() - // converte milicores to percentage - milicoresPct := float64(*milicores) / float64(10) + // convert millicores to percentage + millicoresPct := float64(*millicores) / float64(10) duration := time.Duration(*durationSec) * time.Second start := time.Now() first := systemstat.GetProcCPUSample() for time.Now().Sub(start) < duration { cpu := systemstat.GetProcCPUAverage(first, systemstat.GetProcCPUSample(), systemstat.GetUptime().Uptime) - if cpu.TotalPct < milicoresPct { + if cpu.TotalPct < millicoresPct { doSomething() } else { time.Sleep(sleep) diff --git a/test/images/resource-consumer/utils.go b/test/images/resource-consumer/utils.go index 96f3a7e7e9f..9557e49c952 100644 --- a/test/images/resource-consumer/utils.go +++ b/test/images/resource-consumer/utils.go @@ -24,10 +24,10 @@ import ( const consumeCPUBinary = "./consume-cpu/consume-cpu" -func ConsumeCPU(milicores int, durationSec int) { - log.Printf("ConsumeCPU milicores: %v, durationSec: %v", milicores, durationSec) +func ConsumeCPU(millicores int, durationSec int) { + log.Printf("ConsumeCPU millicores: %v, durationSec: %v", millicores, durationSec) // creating new consume cpu process - arg1 := fmt.Sprintf("-milicores=%d", milicores) + arg1 := fmt.Sprintf("-millicores=%d", millicores) arg2 := fmt.Sprintf("-duration-sec=%d", durationSec) consumeCPU := exec.Command(consumeCPUBinary, arg1, arg2) consumeCPU.Start() From bec4c10dc464d8afd5655f66acfc3ea6c9fdc22e Mon Sep 17 00:00:00 2001 From: goltermann Date: Wed, 2 Sep 2015 14:51:19 -0700 Subject: [PATCH 099/101] Replace IRC with Slack in docs. --- README.md | 2 +- docs/devel/writing-a-getting-started-guide.md | 2 +- docs/getting-started-guides/docker-multinode/master.md | 2 +- docs/getting-started-guides/docker-multinode/testing.md | 3 +-- docs/getting-started-guides/gce.md | 2 +- docs/getting-started-guides/scratch.md | 2 +- docs/troubleshooting.md | 4 ++-- docs/user-guide/debugging-services.md | 2 +- docs/user-guide/monitoring.md | 2 +- docs/user-guide/persistent-volumes/README.md | 3 +-- examples/k8petstore/README.md | 2 +- 11 files changed, 12 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index e79d6789c28..d94a9a2e831 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ You can also view recordings of past events and presentations on our [Media page For Q&A, our threads are at: * [Stack Overflow](http://stackoverflow.com/questions/tagged/kubernetes) - * [BotBot.me (IRC)](https://botbot.me/freenode/google-containers/) + * [Slack](/docs/troubleshooting.md#slack) #### Want to do more than just 'discuss' Kubernetes? diff --git a/docs/devel/writing-a-getting-started-guide.md b/docs/devel/writing-a-getting-started-guide.md index 7441474abb0..c9d4e2ca69c 100644 --- a/docs/devel/writing-a-getting-started-guide.md +++ b/docs/devel/writing-a-getting-started-guide.md @@ -76,7 +76,7 @@ These guidelines say *what* to do. See the Rationale section for *why*. If you have a cluster partially working, but doing all the above steps seems like too much work, we still want to hear from you. We suggest you write a blog post or a Gist, and we will link to it on our wiki page. -Just file an issue or chat us on IRC and one of the committers will link to it from the wiki. +Just file an issue or chat us on [Slack](../troubleshooting.md#slack) and one of the committers will link to it from the wiki. ## Development Distro Guidelines diff --git a/docs/getting-started-guides/docker-multinode/master.md b/docs/getting-started-guides/docker-multinode/master.md index 3f760c51ca8..8b355c138fb 100644 --- a/docs/getting-started-guides/docker-multinode/master.md +++ b/docs/getting-started-guides/docker-multinode/master.md @@ -193,7 +193,7 @@ NAME LABELS STATUS ``` If the status of the node is `NotReady` or `Unknown` please check that all of the containers you created are successfully running. -If all else fails, ask questions on IRC at [#google-containers](http://webchat.freenode.net/?channels=google-containers). +If all else fails, ask questions on [Slack](../../troubleshooting.md#slack). ### Next steps diff --git a/docs/getting-started-guides/docker-multinode/testing.md b/docs/getting-started-guides/docker-multinode/testing.md index f0692e4e6b4..1f77cb38661 100644 --- a/docs/getting-started-guides/docker-multinode/testing.md +++ b/docs/getting-started-guides/docker-multinode/testing.md @@ -47,8 +47,7 @@ NAME LABELS STATUS 127.0.0.1 kubernetes.io/hostname=127.0.0.1 Ready ``` -If the status of any node is `Unknown` or `NotReady` your cluster is broken, double check that all containers are running properly, and if all else fails, contact us on IRC at -[`#google-containers`](http://webchat.freenode.net/?channels=google-containers) for advice. +If the status of any node is `Unknown` or `NotReady` your cluster is broken, double check that all containers are running properly, and if all else fails, contact us on [Slack](../../troubleshooting.md#slack). ### Run an application diff --git a/docs/getting-started-guides/gce.md b/docs/getting-started-guides/gce.md index cf542d42de9..8353ac90299 100644 --- a/docs/getting-started-guides/gce.md +++ b/docs/getting-started-guides/gce.md @@ -100,7 +100,7 @@ cluster/kube-up.sh If you want more than one cluster running in your project, want to use a different name, or want a different number of worker nodes, see the `/cluster/gce/config-default.sh` file for more fine-grained configuration before you start up your cluster. If you run into trouble, please see the section on [troubleshooting](gce.md#troubleshooting), post to the -[google-containers group](https://groups.google.com/forum/#!forum/google-containers), or come ask questions on IRC at [#google-containers](http://webchat.freenode.net/?channels=google-containers) on freenode. +[google-containers group](https://groups.google.com/forum/#!forum/google-containers), or come ask questions on [Slack](../troubleshooting.md#slack). The next few steps will show you: diff --git a/docs/getting-started-guides/scratch.md b/docs/getting-started-guides/scratch.md index 1a74a3eef21..b9841bff754 100644 --- a/docs/getting-started-guides/scratch.md +++ b/docs/getting-started-guides/scratch.md @@ -875,7 +875,7 @@ pinging or SSH-ing from one node to another. ### Getting Help If you run into trouble, please see the section on [troubleshooting](gce.md#troubleshooting), post to the -[google-containers group](https://groups.google.com/forum/#!forum/google-containers), or come ask questions on IRC at [#google-containers](http://webchat.freenode.net/?channels=google-containers) on freenode. +[google-containers group](https://groups.google.com/forum/#!forum/google-containers), or come ask questions on [Slack](../troubleshooting.md#slack). diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 999ef9dfbc1..f241fbba62f 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -62,9 +62,9 @@ You may also find the Stack Overflow topics relevant: Someone else from the community may have already asked a similar question or may be able to help with your problem. The Kubernetes team will also monitor [posts tagged kubernetes](http://stackoverflow.com/questions/tagged/kubernetes). If there aren't any existing questions that help, please [ask a new one](http://stackoverflow.com/questions/ask?tags=kubernetes)! -## IRC +## Slack -The Kubernetes team hangs out on IRC at [`#google-containers`](https://botbot.me/freenode/google-containers/) on freenode. Feel free to come and ask any and all questions there. +The Kubernetes team hangs out on Slack in the `#kubernetes-users` channel. You can participate in the Kubernetes team [here](https://kubernetes.slack.com). Slack requires registration, but the Kubernetes team is open invitation to anyone to register [here](http://slack.kubernetes.io). Feel free to come and ask any and all questions. ## Mailing List diff --git a/docs/user-guide/debugging-services.md b/docs/user-guide/debugging-services.md index a439f6cff38..8399878a476 100644 --- a/docs/user-guide/debugging-services.md +++ b/docs/user-guide/debugging-services.md @@ -546,7 +546,7 @@ misbehaving. And yet your `Service` is not working. You should probably let us know, so we can help investigate! Contact us on -[IRC](http://webchat.freenode.net/?channels=google-containers) or +[Slack](../troubleshooting.md#slack) or [email](https://groups.google.com/forum/#!forum/google-containers) or [GitHub](https://github.com/kubernetes/kubernetes). diff --git a/docs/user-guide/monitoring.md b/docs/user-guide/monitoring.md index 67ba0420586..b54bd21bf2b 100644 --- a/docs/user-guide/monitoring.md +++ b/docs/user-guide/monitoring.md @@ -85,7 +85,7 @@ Here is a snapshot of the a Google Cloud Monitoring dashboard showing cluster-wi ## Try it out! -Now that you’ve learned a bit about Heapster, feel free to try it out on your own clusters! The [Heapster repository](https://github.com/GoogleCloudPlatform/heapster) is available on GitHub. It contains detailed instructions to setup Heapster and its storage backends. Heapster runs by default on most Kubernetes clusters, so you may already have it! Feedback is always welcome. Please let us know if you run into any issues. Heapster and Kubernetes developers hang out in the [#google-containers](http://webchat.freenode.net/?channels=google-containers) IRC channel on freenode.net. You can also reach us on the [google-containers Google Groups mailing list](https://groups.google.com/forum/#!forum/google-containers). +Now that you’ve learned a bit about Heapster, feel free to try it out on your own clusters! The [Heapster repository](https://github.com/kubernetes/heapster) is available on GitHub. It contains detailed instructions to setup Heapster and its storage backends. Heapster runs by default on most Kubernetes clusters, so you may already have it! Feedback is always welcome. Please let us know if you run into any issues via the troubleshooting [channels](../troubleshooting.md). *** *Authors: Vishnu Kannan and Victor Marmol, Google Software Engineers.* diff --git a/docs/user-guide/persistent-volumes/README.md b/docs/user-guide/persistent-volumes/README.md index e999e3ecae9..97a8e3623ef 100644 --- a/docs/user-guide/persistent-volumes/README.md +++ b/docs/user-guide/persistent-volumes/README.md @@ -122,8 +122,7 @@ $ curl 10.0.0.241:3000 I love Kubernetes storage! ``` -Hopefully this simple guide is enough to get you started with PersistentVolumes. If you have any questions, join -[`#google-containers`](https://botbot.me/freenode/google-containers/) on IRC and ask! +Hopefully this simple guide is enough to get you started with PersistentVolumes. If you have any questions, join the team on [Slack](../../troubleshooting.md#slack) and ask! Enjoy! diff --git a/examples/k8petstore/README.md b/examples/k8petstore/README.md index be59840d9e5..e8e2854bc56 100644 --- a/examples/k8petstore/README.md +++ b/examples/k8petstore/README.md @@ -137,7 +137,7 @@ Thus we plan to add another tier of queueing, which empties the REDIS transactio ## Questions -For questions on running this app, you can ask on the google containers group (freenode ~ google-containers@googlegroups.com or #google-containers on IRC) +For questions on running this app, you can ask on [Slack](../../docs/troubleshooting.md#slack). For questions about bigpetstore, and how the data is generated, ask on the apache bigtop mailing list. From d4a3e30edd005c98b20a299b2a065397b865c5b4 Mon Sep 17 00:00:00 2001 From: feihujiang Date: Wed, 9 Sep 2015 11:40:36 +0800 Subject: [PATCH 100/101] Remove the redundant definite article --- pkg/registry/pod/strategy.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/registry/pod/strategy.go b/pkg/registry/pod/strategy.go index 622c689bdba..7d4c44f6307 100644 --- a/pkg/registry/pod/strategy.go +++ b/pkg/registry/pod/strategy.go @@ -220,7 +220,7 @@ func ResourceLocation(getter ResourceGetter, ctx api.Context, id string) (*url.U return loc, nil, nil } -// LogLocation returns a the log URL for a pod container. If opts.Container is blank +// LogLocation returns the log URL for a pod container. If opts.Container is blank // and only one container is present in the pod, that container is used. func LogLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ctx api.Context, name string, opts *api.PodLogOptions) (*url.URL, http.RoundTripper, error) { pod, err := getPod(getter, ctx, name) @@ -347,7 +347,7 @@ func streamLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, return loc, nodeTransport, nil } -// PortForwardLocation returns a the port-forward URL for a pod. +// PortForwardLocation returns the port-forward URL for a pod. func PortForwardLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ctx api.Context, name string) (*url.URL, http.RoundTripper, error) { pod, err := getPod(getter, ctx, name) if err != nil { From 61f272c8cdb01335e8cc9d1b189c18e61f69eb75 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Wed, 2 Sep 2015 17:13:38 -0700 Subject: [PATCH 101/101] Update the third party data codec to automatically include everything in api.ObjectMeta. Add more tests. --- pkg/master/master_test.go | 21 ++++-- pkg/registry/thirdpartyresourcedata/codec.go | 64 +++++++++---------- .../thirdpartyresourcedata/codec_test.go | 12 ++++ 3 files changed, 57 insertions(+), 40 deletions(-) diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 5934983c15f..f7ac4ebc381 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -237,9 +237,10 @@ func testInstallThirdPartyAPIGetVersion(t *testing.T, version string) { if err := decodeResponse(resp, &item); err != nil { t.Errorf("unexpected error: %v", err) } - + // Fill in data that the apiserver injects + expectedObj.SelfLink = item.SelfLink if !reflect.DeepEqual(item, expectedObj) { - t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) + t.Errorf("expected:\n%#v\nsaw:\n%#v\n", expectedObj, item) } } @@ -285,8 +286,14 @@ func testInstallThirdPartyAPIPostForVersion(t *testing.T, version string) { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(item, inputObj) { - t.Errorf("expected:\n%v\nsaw:\n%v\n", inputObj, item) + // fill in fields set by the apiserver + expectedObj := inputObj + expectedObj.SelfLink = item.SelfLink + expectedObj.Namespace = item.Namespace + expectedObj.UID = item.UID + expectedObj.CreationTimestamp = item.CreationTimestamp + if !reflect.DeepEqual(item, expectedObj) { + t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) } etcdResp, err := fakeClient.Get(etcdtest.PathPrefix()+"/ThirdPartyResourceData/company.com/foos/default/test", false, false) @@ -324,7 +331,8 @@ func testInstallThirdPartyAPIDeleteVersion(t *testing.T, version string) { expectedObj := Foo{ ObjectMeta: api.ObjectMeta{ - Name: "test", + Name: "test", + Namespace: "default", }, TypeMeta: api.TypeMeta{ Kind: "Foo", @@ -353,6 +361,9 @@ func testInstallThirdPartyAPIDeleteVersion(t *testing.T, version string) { t.Errorf("unexpected error: %v", err) } + // Fill in fields set by the apiserver + expectedObj.SelfLink = item.SelfLink + expectedObj.Namespace = item.Namespace if !reflect.DeepEqual(item, expectedObj) { t.Errorf("expected:\n%v\nsaw:\n%v\n", expectedObj, item) } diff --git a/pkg/registry/thirdpartyresourcedata/codec.go b/pkg/registry/thirdpartyresourcedata/codec.go index bc190a1b9fd..5ecf9aaa963 100644 --- a/pkg/registry/thirdpartyresourcedata/codec.go +++ b/pkg/registry/thirdpartyresourcedata/codec.go @@ -100,12 +100,12 @@ func (t *thirdPartyResourceDataCodec) populate(objIn *expapi.ThirdPartyResourceD } func (t *thirdPartyResourceDataCodec) populateFromObject(objIn *expapi.ThirdPartyResourceData, mapObj map[string]interface{}, data []byte) error { - kind, ok := mapObj["kind"].(string) - if !ok { - return fmt.Errorf("unexpected object for kind: %#v", mapObj["kind"]) + typeMeta := api.TypeMeta{} + if err := json.Unmarshal(data, &typeMeta); err != nil { + return err } - if kind != t.kind { - return fmt.Errorf("unexpected kind: %s, expected: %s", kind, t.kind) + if typeMeta.Kind != t.kind { + return fmt.Errorf("unexpected kind: %s, expected %s", typeMeta.Kind, t.kind) } metadata, ok := mapObj["metadata"].(map[string]interface{}) @@ -113,38 +113,15 @@ func (t *thirdPartyResourceDataCodec) populateFromObject(objIn *expapi.ThirdPart return fmt.Errorf("unexpected object for metadata: %#v", mapObj["metadata"]) } - if resourceVersion, ok := metadata["resourceVersion"]; ok { - resourceVersionStr, ok := resourceVersion.(string) - if !ok { - return fmt.Errorf("unexpected object for resourceVersion: %v", resourceVersion) - } - - objIn.ResourceVersion = resourceVersionStr + metadataData, err := json.Marshal(metadata) + if err != nil { + return err } - name, ok := metadata["name"].(string) - if !ok { - return fmt.Errorf("unexpected object for name: %#v", metadata) + if err := json.Unmarshal(metadataData, &objIn.ObjectMeta); err != nil { + return err } - if labels, ok := metadata["labels"]; ok { - labelMap, ok := labels.(map[string]interface{}) - if !ok { - return fmt.Errorf("unexpected object for labels: %v", labelMap) - } - for key, value := range labelMap { - valueStr, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected label: %v", value) - } - if objIn.Labels == nil { - objIn.Labels = map[string]string{} - } - objIn.Labels[key] = valueStr - } - } - - objIn.Name = name objIn.Data = data return nil } @@ -230,16 +207,33 @@ const template = `{ "items": [ %s ] }` +func encodeToJSON(obj *expapi.ThirdPartyResourceData) ([]byte, error) { + var objOut interface{} + if err := json.Unmarshal(obj.Data, &objOut); err != nil { + return nil, err + } + objMap, ok := objOut.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unexpected type: %v", objOut) + } + objMap["metadata"] = obj.ObjectMeta + return json.Marshal(objMap) +} + func (t *thirdPartyResourceDataCodec) Encode(obj runtime.Object) (data []byte, err error) { switch obj := obj.(type) { case *expapi.ThirdPartyResourceData: - return obj.Data, nil + return encodeToJSON(obj) case *expapi.ThirdPartyResourceDataList: // TODO: There must be a better way to do this... buff := &bytes.Buffer{} dataStrings := make([]string, len(obj.Items)) for ix := range obj.Items { - dataStrings[ix] = string(obj.Items[ix].Data) + data, err := encodeToJSON(&obj.Items[ix]) + if err != nil { + return nil, err + } + dataStrings[ix] = string(data) } fmt.Fprintf(buff, template, t.kind+"List", strings.Join(dataStrings, ",")) return buff.Bytes(), nil diff --git a/pkg/registry/thirdpartyresourcedata/codec_test.go b/pkg/registry/thirdpartyresourcedata/codec_test.go index 30665cfd7c5..72fc1e5b709 100644 --- a/pkg/registry/thirdpartyresourcedata/codec_test.go +++ b/pkg/registry/thirdpartyresourcedata/codec_test.go @@ -20,9 +20,11 @@ import ( "encoding/json" "reflect" "testing" + "time" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/util" ) type Foo struct { @@ -59,6 +61,16 @@ func TestCodec(t *testing.T) { obj: &Foo{ObjectMeta: api.ObjectMeta{Name: "bar", ResourceVersion: "baz"}, TypeMeta: api.TypeMeta{Kind: "Foo"}}, name: "resource version", }, + { + obj: &Foo{ + ObjectMeta: api.ObjectMeta{ + Name: "bar", + CreationTimestamp: util.Time{time.Unix(100, 0)}, + }, + TypeMeta: api.TypeMeta{Kind: "Foo"}, + }, + name: "creation time", + }, { obj: &Foo{ ObjectMeta: api.ObjectMeta{