diff --git a/.gitignore b/.gitignore index aa8b207dbbb..43253f23283 100644 --- a/.gitignore +++ b/.gitignore @@ -50,3 +50,6 @@ network_closure.sh .kubeconfig .tags* + +# Web UI +www/master/node_modules/ diff --git a/CHANGELOG.md b/CHANGELOG.md index d4318babfa6..e84f786ec5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,117 @@ # Changelog +## 0.15.0 +* Enables v1beta3 API and sets it to the default API version (#6098) + * See the [v1beta3 conversion guide](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api.md#v1beta3-conversion-tips) +* Added multi-port Services (#6182) +* New Getting Started Guides + * Multi-node local startup guide (#6505) + * JUJU (#5414) + * Mesos on Google Cloud Platform (#5442) + * Ansible Setup instructions (#6237) +* Added a controller framework (#5270, #5473) +* The Kubelet now listens on a secure HTTPS port (#6380) +* Made kubectl errors more user-friendly (#6338) +* The apiserver now supports client cert authentication (#6190) +* The apiserver now limits the number of concurrent requests it processes (#6207) +* Added rate limiting to pod deleting (#6355) +* Implement Balanced Resource Allocation algorithm as a PriorityFunction in scheduler package (#6150) +* Enabled log collection from master (#6396) +* Added an api endpoint to pull logs from Pods (#6497) +* Added latency metrics to scheduler (#6368) +* Added latency metrics to REST client (#6409) +* etcd now runs in a pod on the master (#6221) +* nginx now runs in a container on the master (#6334) +* Began creating Docker images for master components (#6326) +* Updated GCE provider to work with gcloud 0.9.54 (#6270) +* Updated AWS provider to fix Region vs Zone semantics (#6011) +* Record event when image GC fails (#6091) +* Add a QPS limiter to the kubernetes client (#6203) +* Decrease the time it takes to run make release (#6196) +* New volume support + * Added iscsi volume plugin (#5506) + * Added glusterfs volume plugin (#6174) + * AWS EBS volume support (#5138) +* Updated to heapster version to v0.10.0 (#6331) +* Updated to etcd 2.0.9 (#6544) +* Updated to Kibana to v1.2 (#6426) +* Bug Fixes + * Kube-proxy now updates iptables rules if a service's public IPs change (#6123) + * Retry kube-addons creation if the initial creation fails (#6200) + * Make kube-proxy more resiliant to running out of file descriptors (#6727) + +## 0.14.2 + * Fix a regression in service port handling validation + * Add a work around for etcd bugs in watch + +## 0.14.1 + * Fixed an issue where containers with hostPort would sometimes go pending forever. (#6110) + +## 0.14.0 + * Add HostNetworking container option to the API. + * PersistentVolume API + * NFS volume plugin fixed/re-added + * Upgraded to etcd 2.0.5 on Salt configs + * .kubeconfig changes + * Kubelet now posts pod status to master, versus master polling. + * All cluster add-on images are pulled from gcr.io + +## 0.13.2 + * Fixes possible cluster bring-up flakiness on GCE/Salt based clusters + + +## 0.12.2 + * #5348 - Health check the docker socket and Docker generally + * #5395 - Garbage collect unknown containers + +## 0.12.1 + * DockerCache doesn't get containers at startup (#5115) + * Update version of kube2sky to 1.1 (#5127) + * Monit health check kubelet and restart unhealthy one (#5120) + +## 0.12.0 + * Hide the infrastructure pod from users + * Configure scheduler via JSON + * Improved object validation + * Improved messages on scheduler failure + * Improved messages on port conflicts + * Move to thread-per-pod in the kubelet + * Misc. kubectl improvements + * Update etcd used by SkyDNS to 2.0.3 + * Fixes to GCE PD support + * Improved support for secrets in the API + * Improved OOM behavior + +## 0.11 +* Secret API Resources +* Better error handling in various places +* Improved RackSpace support +* Fix ```kubectl``` patch behavior +* Health check failures fire events +* Don't delete the pod infrastructure container on health check failures +* Improvements to Pod Status detection and reporting +* Reduce the size of scheduled pods in etcd +* Fix some bugs in namespace clashing +* More detailed info on failed image pulls +* Remove pods from a failed node +* Safe format and mount of GCE PDs +* Make events more resilient to etcd watch failures +* Upgrade to container-vm 01-29-2015 + +## 0.10 + * Improvements to swagger API documentation. + * Upgrade container VM to 20150129 + * Start to move e2e tests to Ginkgo + * Fix apiserver proxy path rewriting + * Upgrade to etcd 2.0.0 + * Add a wordpress/mysql example + * Improve responsiveness of the master when creating new pods + * Improve api object validation in numerous small ways + * Add support for IPC namespaces + * Improve GCE PD support + * Make replica controllers with node selectors work correctly + * Lots of improvements to e2e tests (more to come) + ## 0.9 ### Features - Various improvements to kubectl diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index a9470def833..44b7b87b758 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -62,20 +62,10 @@ "Comment": "v0.1-62-g8d75e11", "Rev": "8d75e11374a1928608c906fe745b538483e7aeb2" }, - { - "ImportPath": "github.com/coreos/etcd/etcdserver/etcdhttp/httptypes", - "Comment": "v2.0.4-288-g866a9d4", - "Rev": "866a9d4e41401657ea44bf539b2c5561d6fdcd67" - }, - { - "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v2.0.4-288-g866a9d4", - "Rev": "866a9d4e41401657ea44bf539b2c5561d6fdcd67" - }, { "ImportPath": "github.com/coreos/go-etcd/etcd", - "Comment": "v0.4.6-8-g60e12ca", - "Rev": "60e12cac3db8ffce00b576b4af0e7b0a968f1003" + "Comment": "v2.0.0-3-g0424b5f", + "Rev": "0424b5f86ef0ca57a5309c599f74bbb3e97ecd9d" }, { "ImportPath": "github.com/coreos/go-systemd/dbus", @@ -372,8 +362,8 @@ }, { "ImportPath": "github.com/rackspace/gophercloud", - "Comment": "v1.0.0-490-g32d0a89", - "Rev": "32d0a893a8ef70abe76dc5153e2925b39cbea7f7" + "Comment": "v1.0.0-569-gf3ced00", + "Rev": "f3ced00552c1c7d4a6184500af9062cfb4ff4463" }, { "ImportPath": "github.com/russross/blackfriday", diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/doc.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/doc.go deleted file mode 100644 index fa0158020e7..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package httptypes defines how etcd's HTTP API entities are serialized to and deserialized from JSON. -*/ - -package httptypes diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/errors.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/errors.go deleted file mode 100644 index 7e0d275ebbb..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/errors.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptypes - -import ( - "encoding/json" - "log" - "net/http" -) - -type HTTPError struct { - Message string `json:"message"` - // HTTP return code - Code int `json:"-"` -} - -func (e HTTPError) Error() string { - return e.Message -} - -// TODO(xiangli): handle http write errors -func (e HTTPError) WriteTo(w http.ResponseWriter) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(e.Code) - b, err := json.Marshal(e) - if err != nil { - log.Panicf("marshal HTTPError should never fail: %v", err) - } - w.Write(b) -} - -func NewHTTPError(code int, m string) *HTTPError { - return &HTTPError{ - Message: m, - Code: code, - } -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/errors_test.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/errors_test.go deleted file mode 100644 index f5cec6d4579..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/errors_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptypes - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" -) - -func TestHTTPErrorWriteTo(t *testing.T) { - err := NewHTTPError(http.StatusBadRequest, "what a bad request you made!") - rr := httptest.NewRecorder() - err.WriteTo(rr) - - wcode := http.StatusBadRequest - wheader := http.Header(map[string][]string{ - "Content-Type": []string{"application/json"}, - }) - wbody := `{"message":"what a bad request you made!"}` - - if wcode != rr.Code { - t.Errorf("HTTP status code %d, want %d", rr.Code, wcode) - } - - if !reflect.DeepEqual(wheader, rr.HeaderMap) { - t.Errorf("HTTP headers %v, want %v", rr.HeaderMap, wheader) - } - - gbody := rr.Body.String() - if wbody != gbody { - t.Errorf("HTTP body %q, want %q", gbody, wbody) - } -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/member.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/member.go deleted file mode 100644 index 30ecbb53939..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/member.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptypes - -import ( - "encoding/json" - - "github.com/coreos/etcd/pkg/types" -) - -type Member struct { - ID string `json:"id"` - Name string `json:"name"` - PeerURLs []string `json:"peerURLs"` - ClientURLs []string `json:"clientURLs"` -} - -type MemberCreateRequest struct { - PeerURLs types.URLs -} - -type MemberUpdateRequest struct { - MemberCreateRequest -} - -func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{} - - err := json.Unmarshal(data, &s) - if err != nil { - return err - } - - urls, err := types.NewURLs(s.PeerURLs) - if err != nil { - return err - } - - m.PeerURLs = urls - return nil -} - -type MemberCollection []Member - -func (c *MemberCollection) MarshalJSON() ([]byte, error) { - d := struct { - Members []Member `json:"members"` - }{ - Members: []Member(*c), - } - - return json.Marshal(d) -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/member_test.go b/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/member_test.go deleted file mode 100644 index e0b29d88311..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/etcdserver/etcdhttp/httptypes/member_test.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package httptypes - -import ( - "encoding/json" - "net/url" - "reflect" - "testing" - - "github.com/coreos/etcd/pkg/types" -) - -func TestMemberUnmarshal(t *testing.T) { - tests := []struct { - body []byte - wantMember Member - wantError bool - }{ - // no URLs, just check ID & Name - { - body: []byte(`{"id": "c", "name": "dungarees"}`), - wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil}, - }, - - // both client and peer URLs - { - body: []byte(`{"peerURLs": ["http://127.0.0.1:4001"], "clientURLs": ["http://127.0.0.1:4001"]}`), - wantMember: Member{ - PeerURLs: []string{ - "http://127.0.0.1:4001", - }, - ClientURLs: []string{ - "http://127.0.0.1:4001", - }, - }, - }, - - // multiple peer URLs - { - body: []byte(`{"peerURLs": ["http://127.0.0.1:4001", "https://example.com"]}`), - wantMember: Member{ - PeerURLs: []string{ - "http://127.0.0.1:4001", - "https://example.com", - }, - ClientURLs: nil, - }, - }, - - // multiple client URLs - { - body: []byte(`{"clientURLs": ["http://127.0.0.1:4001", "https://example.com"]}`), - wantMember: Member{ - PeerURLs: nil, - ClientURLs: []string{ - "http://127.0.0.1:4001", - "https://example.com", - }, - }, - }, - - // invalid JSON - { - body: []byte(`{"peerU`), - wantError: true, - }, - } - - for i, tt := range tests { - got := Member{} - err := json.Unmarshal(tt.body, &got) - if tt.wantError != (err != nil) { - t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err) - continue - } - - if !reflect.DeepEqual(tt.wantMember, got) { - t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got) - } - } -} - -func TestMemberCreateRequestUnmarshal(t *testing.T) { - body := []byte(`{"peerURLs": ["http://127.0.0.1:8081", "https://127.0.0.1:8080"]}`) - want := MemberCreateRequest{ - PeerURLs: types.URLs([]url.URL{ - url.URL{Scheme: "http", Host: "127.0.0.1:8081"}, - url.URL{Scheme: "https", Host: "127.0.0.1:8080"}, - }), - } - - var req MemberCreateRequest - if err := json.Unmarshal(body, &req); err != nil { - t.Fatalf("Unmarshal returned unexpected err=%v", err) - } - - if !reflect.DeepEqual(want, req) { - t.Fatalf("Failed to unmarshal MemberCreateRequest: want=%#v, got=%#v", want, req) - } -} - -func TestMemberCreateRequestUnmarshalFail(t *testing.T) { - tests := [][]byte{ - // invalid JSON - []byte(``), - []byte(`{`), - - // spot-check validation done in types.NewURLs - []byte(`{"peerURLs": "foo"}`), - []byte(`{"peerURLs": ["."]}`), - []byte(`{"peerURLs": []}`), - []byte(`{"peerURLs": ["http://127.0.0.1:4001/foo"]}`), - []byte(`{"peerURLs": ["http://127.0.0.1"]}`), - } - - for i, tt := range tests { - var req MemberCreateRequest - if err := json.Unmarshal(tt, &req); err == nil { - t.Errorf("#%d: expected err, got nil", i) - } - } -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id.go deleted file mode 100644 index 88cb9e63494..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id_test.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id_test.go deleted file mode 100644 index 97d168f58e2..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/id_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestIDString(t *testing.T) { - tests := []struct { - input ID - want string - }{ - { - input: 12, - want: "c", - }, - { - input: 4918257920282737594, - want: "444129853c343bba", - }, - } - - for i, tt := range tests { - got := tt.input.String() - if tt.want != got { - t.Errorf("#%d: ID.String failure: want=%v, got=%v", i, tt.want, got) - } - } -} - -func TestIDFromString(t *testing.T) { - tests := []struct { - input string - want ID - }{ - { - input: "17", - want: 23, - }, - { - input: "612840dae127353", - want: 437557308098245459, - }, - } - - for i, tt := range tests { - got, err := IDFromString(tt.input) - if err != nil { - t.Errorf("#%d: IDFromString failure: err=%v", i, err) - continue - } - if tt.want != got { - t.Errorf("#%d: IDFromString failure: want=%v, got=%v", i, tt.want, got) - } - } -} - -func TestIDFromStringFail(t *testing.T) { - tests := []string{ - "", - "XXX", - "612840dae127353612840dae127353", - } - - for i, tt := range tests { - _, err := IDFromString(tt) - if err == nil { - t.Fatalf("#%d: IDFromString expected error, but err=nil", i) - } - } -} - -func TestIDSlice(t *testing.T) { - g := []ID{10, 500, 5, 1, 100, 25} - w := []ID{1, 5, 10, 25, 100, 500} - sort.Sort(IDSlice(g)) - if !reflect.DeepEqual(g, w) { - t.Errorf("slice after sort = %#v, want %#v", g, w) - } -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set.go deleted file mode 100644 index 32287522b11..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "sync" -) - -type Set interface { - Add(string) - Remove(string) - Contains(string) bool - Equals(Set) bool - Length() int - Values() []string - Copy() Set - Sub(Set) Set -} - -func NewUnsafeSet(values ...string) *unsafeSet { - set := &unsafeSet{make(map[string]struct{})} - for _, v := range values { - set.Add(v) - } - return set -} - -func NewThreadsafeSet(values ...string) *tsafeSet { - us := NewUnsafeSet(values...) - return &tsafeSet{us, sync.RWMutex{}} -} - -type unsafeSet struct { - d map[string]struct{} -} - -// Add adds a new value to the set (no-op if the value is already present) -func (us *unsafeSet) Add(value string) { - us.d[value] = struct{}{} -} - -// Remove removes the given value from the set -func (us *unsafeSet) Remove(value string) { - delete(us.d, value) -} - -// Contains returns whether the set contains the given value -func (us *unsafeSet) Contains(value string) (exists bool) { - _, exists = us.d[value] - return -} - -// ContainsAll returns whether the set contains all given values -func (us *unsafeSet) ContainsAll(values []string) bool { - for _, s := range values { - if !us.Contains(s) { - return false - } - } - return true -} - -// Equals returns whether the contents of two sets are identical -func (us *unsafeSet) Equals(other Set) bool { - v1 := sort.StringSlice(us.Values()) - v2 := sort.StringSlice(other.Values()) - v1.Sort() - v2.Sort() - return reflect.DeepEqual(v1, v2) -} - -// Length returns the number of elements in the set -func (us *unsafeSet) Length() int { - return len(us.d) -} - -// Values returns the values of the Set in an unspecified order. -func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) - for val, _ := range us.d { - values = append(values, val) - } - return -} - -// Copy creates a new Set containing the values of the first -func (us *unsafeSet) Copy() Set { - cp := NewUnsafeSet() - for val, _ := range us.d { - cp.Add(val) - } - - return cp -} - -// Sub removes all elements in other from the set -func (us *unsafeSet) Sub(other Set) Set { - oValues := other.Values() - result := us.Copy().(*unsafeSet) - - for _, val := range oValues { - if _, ok := result.d[val]; !ok { - continue - } - delete(result.d, val) - } - - return result -} - -type tsafeSet struct { - us *unsafeSet - m sync.RWMutex -} - -func (ts *tsafeSet) Add(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Add(value) -} - -func (ts *tsafeSet) Remove(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Remove(value) -} - -func (ts *tsafeSet) Contains(value string) (exists bool) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Contains(value) -} - -func (ts *tsafeSet) Equals(other Set) bool { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Equals(other) -} - -func (ts *tsafeSet) Length() int { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Length() -} - -func (ts *tsafeSet) Values() (values []string) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Values() -} - -func (ts *tsafeSet) Copy() Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Copy().(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} - -func (ts *tsafeSet) Sub(other Set) Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Sub(other).(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set_test.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set_test.go deleted file mode 100644 index ff1ecc68d3c..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/set_test.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestUnsafeSet(t *testing.T) { - driveSetTests(t, NewUnsafeSet()) -} - -func TestThreadsafeSet(t *testing.T) { - driveSetTests(t, NewThreadsafeSet()) -} - -// Check that two slices contents are equal; order is irrelevant -func equal(a, b []string) bool { - as := sort.StringSlice(a) - bs := sort.StringSlice(b) - as.Sort() - bs.Sort() - return reflect.DeepEqual(as, bs) -} - -func driveSetTests(t *testing.T, s Set) { - // Verify operations on an empty set - eValues := []string{} - values := s.Values() - if !reflect.DeepEqual(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - if l := s.Length(); l != 0 { - t.Fatalf("Expected length=0, got %d", l) - } - for _, v := range []string{"foo", "bar", "baz"} { - if s.Contains(v) { - t.Fatalf("Expect s.Contains(%q) to be fale, got true", v) - } - } - - // Add three items, ensure they show up - s.Add("foo") - s.Add("bar") - s.Add("baz") - - eValues = []string{"foo", "bar", "baz"} - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - - for _, v := range eValues { - if !s.Contains(v) { - t.Fatalf("Expect s.Contains(%q) to be true, got false", v) - } - } - - if l := s.Length(); l != 3 { - t.Fatalf("Expected length=3, got %d", l) - } - - // Add the same item a second time, ensuring it is not duplicated - s.Add("foo") - - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - if l := s.Length(); l != 3 { - t.Fatalf("Expected length=3, got %d", l) - } - - // Remove all items, ensure they are gone - s.Remove("foo") - s.Remove("bar") - s.Remove("baz") - - eValues = []string{} - values = s.Values() - if !equal(values, eValues) { - t.Fatalf("Expect values=%v got %v", eValues, values) - } - - if l := s.Length(); l != 0 { - t.Fatalf("Expected length=0, got %d", l) - } - - // Create new copies of the set, and ensure they are unlinked to the - // original Set by making modifications - s.Add("foo") - s.Add("bar") - cp1 := s.Copy() - cp2 := s.Copy() - s.Remove("foo") - cp3 := s.Copy() - cp1.Add("baz") - - for i, tt := range []struct { - want []string - got []string - }{ - {[]string{"bar"}, s.Values()}, - {[]string{"foo", "bar", "baz"}, cp1.Values()}, - {[]string{"foo", "bar"}, cp2.Values()}, - {[]string{"bar"}, cp3.Values()}, - } { - if !equal(tt.want, tt.got) { - t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) - } - } - - for i, tt := range []struct { - want bool - got bool - }{ - {true, s.Equals(cp3)}, - {true, cp3.Equals(s)}, - {false, s.Equals(cp2)}, - {false, s.Equals(cp1)}, - {false, cp1.Equals(s)}, - {false, cp2.Equals(s)}, - {false, cp2.Equals(cp1)}, - } { - if tt.got != tt.want { - t.Fatalf("case %d: want %t, got %t", i, tt.want, tt.got) - - } - } - - // Subtract values from a Set, ensuring a new Set is created and - // the original Sets are unmodified - sub1 := cp1.Sub(s) - sub2 := cp2.Sub(cp1) - - for i, tt := range []struct { - want []string - got []string - }{ - {[]string{"foo", "bar", "baz"}, cp1.Values()}, - {[]string{"foo", "bar"}, cp2.Values()}, - {[]string{"bar"}, s.Values()}, - {[]string{"foo", "baz"}, sub1.Values()}, - {[]string{}, sub2.Values()}, - } { - if !equal(tt.want, tt.got) { - t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got) - } - } -} - -func TestUnsafeSetContainsAll(t *testing.T) { - vals := []string{"foo", "bar", "baz"} - s := NewUnsafeSet(vals...) - - tests := []struct { - strs []string - wcontain bool - }{ - {[]string{}, true}, - {vals[:1], true}, - {vals[:2], true}, - {vals, true}, - {[]string{"cuz"}, false}, - {[]string{vals[0], "cuz"}, false}, - } - for i, tt := range tests { - if g := s.ContainsAll(tt.strs); g != tt.wcontain { - t.Errorf("#%d: ok = %v, want %v", i, g, tt.wcontain) - } - } -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice.go deleted file mode 100644 index 0327950f706..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// Uint64Slice implements sort interface -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice_test.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice_test.go deleted file mode 100644 index 95e37e04d20..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/slice_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "testing" -) - -func TestUint64Slice(t *testing.T) { - g := Uint64Slice{10, 500, 5, 1, 100, 25} - w := Uint64Slice{1, 5, 10, 25, 100, 500} - sort.Sort(g) - if !reflect.DeepEqual(g, w) { - t.Errorf("slice after sort = %#v, want %#v", g, w) - } -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls.go deleted file mode 100644 index ce2483ffaaa..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" -) - -type URLs []url.URL - -func NewURLs(strs []string) (URLs, error) { - all := make([]url.URL, len(strs)) - if len(all) == 0 { - return nil, errors.New("no valid URLs given") - } - for i, in := range strs { - in = strings.TrimSpace(in) - u, err := url.Parse(in) - if err != nil { - return nil, err - } - if u.Scheme != "http" && u.Scheme != "https" { - return nil, fmt.Errorf("URL scheme must be http or https: %s", in) - } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - all[i] = *u - } - us := URLs(all) - us.Sort() - - return us, nil -} - -func (us URLs) String() string { - return strings.Join(us.StringSlice(), ",") -} - -func (us *URLs) Sort() { - sort.Sort(us) -} -func (us URLs) Len() int { return len(us) } -func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } -func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } - -func (us URLs) StringSlice() []string { - out := make([]string, len(us)) - for i := range us { - out[i] = us[i].String() - } - - return out -} diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls_test.go b/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls_test.go deleted file mode 100644 index 41caa5d68a5..00000000000 --- a/Godeps/_workspace/src/github.com/coreos/etcd/pkg/types/urls_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "testing" - - "github.com/coreos/etcd/pkg/testutil" -) - -func TestNewURLs(t *testing.T) { - tests := []struct { - strs []string - wurls URLs - }{ - { - []string{"http://127.0.0.1:4001"}, - testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}), - }, - // it can trim space - { - []string{" http://127.0.0.1:4001 "}, - testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}), - }, - // it does sort - { - []string{ - "http://127.0.0.2:4001", - "http://127.0.0.1:4001", - }, - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:4001", - "http://127.0.0.2:4001", - }), - }, - } - for i, tt := range tests { - urls, _ := NewURLs(tt.strs) - if !reflect.DeepEqual(urls, tt.wurls) { - t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls) - } - } -} - -func TestURLsString(t *testing.T) { - tests := []struct { - us URLs - wstr string - }{ - { - URLs{}, - "", - }, - { - testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}), - "http://127.0.0.1:4001", - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:4001", - "http://127.0.0.2:4001", - }), - "http://127.0.0.1:4001,http://127.0.0.2:4001", - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.2:4001", - "http://127.0.0.1:4001", - }), - "http://127.0.0.2:4001,http://127.0.0.1:4001", - }, - } - for i, tt := range tests { - g := tt.us.String() - if g != tt.wstr { - t.Errorf("#%d: string = %s, want %s", i, g, tt.wstr) - } - } -} - -func TestURLsSort(t *testing.T) { - g := testutil.MustNewURLs(t, []string{ - "http://127.0.0.4:4001", - "http://127.0.0.2:4001", - "http://127.0.0.1:4001", - "http://127.0.0.3:4001", - }) - w := testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:4001", - "http://127.0.0.2:4001", - "http://127.0.0.3:4001", - "http://127.0.0.4:4001", - }) - gurls := URLs(g) - gurls.Sort() - if !reflect.DeepEqual(g, w) { - t.Errorf("URLs after sort = %#v, want %#v", g, w) - } -} - -func TestURLsStringSlice(t *testing.T) { - tests := []struct { - us URLs - wstr []string - }{ - { - URLs{}, - []string{}, - }, - { - testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}), - []string{"http://127.0.0.1:4001"}, - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.1:4001", - "http://127.0.0.2:4001", - }), - []string{"http://127.0.0.1:4001", "http://127.0.0.2:4001"}, - }, - { - testutil.MustNewURLs(t, []string{ - "http://127.0.0.2:4001", - "http://127.0.0.1:4001", - }), - []string{"http://127.0.0.2:4001", "http://127.0.0.1:4001"}, - }, - } - for i, tt := range tests { - g := tt.us.StringSlice() - if !reflect.DeepEqual(g, tt.wstr) { - t.Errorf("#%d: string slice = %+v, want %+v", i, g, tt.wstr) - } - } -} - -func TestNewURLsFail(t *testing.T) { - tests := [][]string{ - // no urls given - {}, - // missing protocol scheme - {"://127.0.0.1:4001"}, - // unsupported scheme - {"mailto://127.0.0.1:4001"}, - // not conform to host:port - {"http://127.0.0.1"}, - // contain a path - {"http://127.0.0.1:4001/path"}, - } - for i, tt := range tests { - _, err := NewURLs(tt) - if err == nil { - t.Errorf("#%d: err = nil, but error", i) - } - } -} diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go index 8ecb50ee53f..c6cf3341bac 100644 --- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client.go @@ -15,8 +15,6 @@ import ( "path" "strings" "time" - - "github.com/coreos/etcd/etcdserver/etcdhttp/httptypes" ) // See SetConsistency for how to use these constants. @@ -44,10 +42,17 @@ type Config struct { Consistency string `json:"consistency"` } +type credentials struct { + username string + password string +} + type Client struct { config Config `json:"config"` cluster *Cluster `json:"cluster"` httpClient *http.Client + credentials *credentials + transport *http.Transport persistence io.Writer cURLch chan string // CheckRetry can be used to control the policy for failed requests @@ -172,17 +177,27 @@ func NewClientFromReader(reader io.Reader) (*Client, error) { // Override the Client's HTTP Transport object func (c *Client) SetTransport(tr *http.Transport) { c.httpClient.Transport = tr + c.transport = tr +} + +func (c *Client) SetCredentials(username, password string) { + c.credentials = &credentials{username, password} +} + +func (c *Client) Close() { + c.transport.DisableKeepAlives = true + c.transport.CloseIdleConnections() } // initHTTPClient initializes a HTTP client for etcd client func (c *Client) initHTTPClient() { - tr := &http.Transport{ + c.transport = &http.Transport{ Dial: c.dial, TLSClientConfig: &tls.Config{ InsecureSkipVerify: true, }, } - c.httpClient = &http.Client{Transport: tr} + c.httpClient = &http.Client{Transport: c.transport} } // initHTTPClient initializes a HTTPS client for etcd client @@ -305,31 +320,49 @@ func (c *Client) internalSyncCluster(machines []string) bool { continue } - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - // try another machine in the cluster - continue - } + if resp.StatusCode != http.StatusOK { // fall-back to old endpoint + httpPath := c.createHttpPath(machine, path.Join(version, "machines")) + resp, err := c.httpClient.Get(httpPath) + if err != nil { + // try another machine in the cluster + continue + } + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + // try another machine in the cluster + continue + } + // update Machines List + c.cluster.updateFromStr(string(b)) + } else { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + // try another machine in the cluster + continue + } - var mCollection httptypes.MemberCollection - if err := json.Unmarshal(b, &mCollection); err != nil { - // try another machine - continue - } + var mCollection memberCollection + if err := json.Unmarshal(b, &mCollection); err != nil { + // try another machine + continue + } - urls := make([]string, 0) - for _, m := range mCollection { - urls = append(urls, m.ClientURLs...) - } + urls := make([]string, 0) + for _, m := range mCollection { + urls = append(urls, m.ClientURLs...) + } - // update Machines List - c.cluster.updateFromStr(strings.Join(urls, ",")) + // update Machines List + c.cluster.updateFromStr(strings.Join(urls, ",")) + } logger.Debug("sync.machines ", c.cluster.Machines) c.saveConfig() return true } + return false } diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go index 66d79d73320..4720d8d693e 100644 --- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/client_test.go @@ -94,3 +94,15 @@ func TestPersistence(t *testing.T) { t.Fatalf("The two configs should be equal!") } } + +func TestClientRetry(t *testing.T) { + c := NewClient([]string{"http://strange", "http://127.0.0.1:4001"}) + // use first endpoint as the picked url + c.cluster.picked = 0 + if _, err := c.Set("foo", "bar", 5); err != nil { + t.Fatal(err) + } + if _, err := c.Delete("foo", true); err != nil { + t.Fatal(err) + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go index 787cf753ba2..1ad3e155be5 100644 --- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/cluster.go @@ -30,5 +30,8 @@ func (cl *Cluster) pick() string { return cl.Machines[cl.picked] } func (cl *Cluster) updateFromStr(machines string) { cl.Machines = strings.Split(machines, ",") + for i := range cl.Machines { + cl.Machines[i] = strings.TrimSpace(cl.Machines[i]) + } cl.picked = rand.Intn(len(cl.Machines)) } diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member.go new file mode 100644 index 00000000000..5b13b28e1a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member.go @@ -0,0 +1,30 @@ +package etcd + +import "encoding/json" + +type Member struct { + ID string `json:"id"` + Name string `json:"name"` + PeerURLs []string `json:"peerURLs"` + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go new file mode 100644 index 00000000000..53ebdd4bfde --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/member_test.go @@ -0,0 +1,71 @@ +package etcd + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestMemberCollectionUnmarshal(t *testing.T) { + tests := []struct { + body []byte + want memberCollection + }{ + { + body: []byte(`{"members":[]}`), + want: memberCollection([]Member{}), + }, + { + body: []byte(`{"members":[{"id":"2745e2525fce8fe","peerURLs":["http://127.0.0.1:7003"],"name":"node3","clientURLs":["http://127.0.0.1:4003"]},{"id":"42134f434382925","peerURLs":["http://127.0.0.1:2380","http://127.0.0.1:7001"],"name":"node1","clientURLs":["http://127.0.0.1:2379","http://127.0.0.1:4001"]},{"id":"94088180e21eb87b","peerURLs":["http://127.0.0.1:7002"],"name":"node2","clientURLs":["http://127.0.0.1:4002"]}]}`), + want: memberCollection( + []Member{ + { + ID: "2745e2525fce8fe", + Name: "node3", + PeerURLs: []string{ + "http://127.0.0.1:7003", + }, + ClientURLs: []string{ + "http://127.0.0.1:4003", + }, + }, + { + ID: "42134f434382925", + Name: "node1", + PeerURLs: []string{ + "http://127.0.0.1:2380", + "http://127.0.0.1:7001", + }, + ClientURLs: []string{ + "http://127.0.0.1:2379", + "http://127.0.0.1:4001", + }, + }, + { + ID: "94088180e21eb87b", + Name: "node2", + PeerURLs: []string{ + "http://127.0.0.1:7002", + }, + ClientURLs: []string{ + "http://127.0.0.1:4002", + }, + }, + }, + ), + }, + } + + for i, tt := range tests { + var got memberCollection + err := json.Unmarshal(tt.body, &got) + if err != nil { + t.Errorf("#%d: unexpected error: %v", i, err) + continue + } + + if !reflect.DeepEqual(tt.want, got) { + t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.want, got) + } + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go index 70d9db2defb..c4d2267da79 100644 --- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "io/ioutil" - "net" "net/http" "net/url" "path" @@ -189,7 +188,10 @@ func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) { logger.Debug("Connecting to etcd: attempt ", attempt+1, " for ", rr.RelativePath) - httpPath = c.getHttpPath(rr.RelativePath) + // get httpPath if not set + if httpPath == "" { + httpPath = c.getHttpPath(rr.RelativePath) + } // Return a cURL command if curlChan is set if c.cURLch != nil { @@ -197,6 +199,9 @@ func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) { for key, value := range rr.Values { command += fmt.Sprintf(" -d %s=%s", key, value[0]) } + if c.credentials != nil { + command += fmt.Sprintf(" -u %s", c.credentials.username) + } c.sendCURL(command) } @@ -226,7 +231,13 @@ func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) { return nil, err } + if c.credentials != nil { + req.SetBasicAuth(c.credentials.username, c.credentials.password) + } + resp, err = c.httpClient.Do(req) + // clear previous httpPath + httpPath = "" defer func() { if resp != nil { resp.Body.Close() @@ -281,6 +292,19 @@ func (c *Client) SendRequest(rr *RawRequest) (*RawResponse, error) { } } + if resp.StatusCode == http.StatusTemporaryRedirect { + u, err := resp.Location() + + if err != nil { + logger.Warning(err) + } else { + // set httpPath for following redirection + httpPath = u.String() + } + resp.Body.Close() + continue + } + if checkErr := checkRetry(c.cluster, numReqs, *resp, errors.New("Unexpected HTTP status code")); checkErr != nil { return nil, checkErr @@ -304,9 +328,8 @@ func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response, err error) error { if isEmptyResponse(lastResp) { - if !isConnectionError(err) { - return err - } + // always retry if it failed to get response from one machine + return nil } else if !shouldRetry(lastResp) { body := []byte("nil") if lastResp.Body != nil { @@ -333,11 +356,6 @@ func DefaultCheckRetry(cluster *Cluster, numReqs int, lastResp http.Response, func isEmptyResponse(r http.Response) bool { return r.StatusCode == 0 } -func isConnectionError(err error) bool { - _, ok := err.(*net.OpError) - return ok -} - // shouldRetry returns whether the reponse deserves retry. func shouldRetry(r http.Response) bool { // TODO: only retry when the cluster is in leader election diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go new file mode 100644 index 00000000000..7a2bd190a10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/requests_test.go @@ -0,0 +1,22 @@ +package etcd + +import "testing" + +func TestKeyToPath(t *testing.T) { + tests := []struct { + key string + wpath string + }{ + {"", "keys/"}, + {"foo", "keys/foo"}, + {"foo/bar", "keys/foo/bar"}, + {"%z", "keys/%25z"}, + {"/", "keys/"}, + } + for i, tt := range tests { + path := keyToPath(tt.key) + if path != tt.wpath { + t.Errorf("#%d: path = %s, want %s", i, path, tt.wpath) + } + } +} diff --git a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go index b3d05df70bc..b1e9ed2713d 100644 --- a/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go +++ b/Godeps/_workspace/src/github.com/coreos/go-etcd/etcd/version.go @@ -1,3 +1,6 @@ package etcd -const version = "v2" +const ( + version = "v2" + packageVersion = "v2.0.0+git" +) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml b/Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml index 946f98cb3da..0882a569501 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/.travis.yml @@ -9,7 +9,7 @@ go: - tip script: script/cibuild after_success: - - go get code.google.com/p/go.tools/cmd/cover + - go get golang.org/x/tools/cmd/cover - go get github.com/axw/gocov/gocov - go get github.com/mattn/goveralls - export PATH=$PATH:$HOME/gopath/bin/ diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md index 93b798e5a2b..9748c1ad2b1 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/CONTRIBUTING.md @@ -205,7 +205,7 @@ On of the easiest ways to get readily involved in our project is to let us know about your experiences using our SDK. Feedback like this is incredibly useful to us, because it allows us to refine and change features based on what our users want and expect of us. There are a bunch of ways to get in contact! You -can [ping us](mailto:sdk-support@rackspace.com) via e-mail, talk to us on irc +can [ping us](https://developer.rackspace.com/support/) via e-mail, talk to us on irc (#rackspace-dev on freenode), [tweet us](https://twitter.com/rackspace), or submit an issue on our [bug tracker](/issues). Things you might like to tell us are: diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md index 9f7552b0d2d..19e90e0f4d5 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/README.md @@ -25,7 +25,7 @@ export GOPATH=$HOME/go ``` To protect yourself against changes in your dependencies, we highly recommend choosing a -[dependency management solution](https://code.google.com/p/go-wiki/wiki/PackageManagementTools) for +[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install Gophercloud as a dependency like so: @@ -151,11 +151,10 @@ Engaging the community and lowering barriers for contributors is something we care a lot about. For this reason, we've taken the time to write a [contributing guide](./CONTRIBUTING.md) for folks interested in getting involved in our project. If you're not sure how you can get involved, feel free to submit an issue or -[e-mail us](mailto:sdk-support@rackspace.com) privately. You don't need to be a +[contact us](https://developer.rackspace.com/support/). You don't need to be a Go expert - all members of the community are welcome! ## Help and feedback If you're struggling with something or have spotted a potential bug, feel free -to submit an issue to our [bug tracker](/issues) or e-mail us directly at -[sdk-support@rackspace.com](mailto:sdk-support@rackspace.com). +to submit an issue to our [bug tracker](/issues) or [contact us directly](https://developer.rackspace.com/support/). diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md b/Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md index a702cfc5095..76a94d57032 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/UPGRADING.md @@ -8,7 +8,7 @@ extensible, maintainable and easy-to-use. Below we've compiled upgrade instructions for the various services that existed before. If you have a specific issue that is not addressed below, please [submit an issue](/issues/new) or -[e-mail our support team](mailto:sdk-support@rackspace.com). +[e-mail our support team](https://developer.rackspace.com/support/). * [Authentication](#authentication) * [Servers](#servers) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go index 33e49fea9bb..c1bbf7961f4 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/compute_test.go @@ -56,6 +56,9 @@ type ComputeChoices struct { // FlavorIDResize contains the ID of a different flavor available on the same OpenStack installation, that is distinct // from FlavorID. FlavorIDResize string + + // NetworkName is the name of a network to launch the instance on. + NetworkName string } // ComputeChoicesFromEnv populates a ComputeChoices struct from environment variables. @@ -64,6 +67,7 @@ func ComputeChoicesFromEnv() (*ComputeChoices, error) { imageID := os.Getenv("OS_IMAGE_ID") flavorID := os.Getenv("OS_FLAVOR_ID") flavorIDResize := os.Getenv("OS_FLAVOR_ID_RESIZE") + networkName := os.Getenv("OS_NETWORK_NAME") missing := make([]string, 0, 3) if imageID == "" { @@ -75,6 +79,9 @@ func ComputeChoicesFromEnv() (*ComputeChoices, error) { if flavorIDResize == "" { missing = append(missing, "OS_FLAVOR_ID_RESIZE") } + if networkName == "" { + networkName = "public" + } notDistinct := "" if flavorID == flavorIDResize { @@ -93,5 +100,5 @@ func ComputeChoicesFromEnv() (*ComputeChoices, error) { return nil, fmt.Errorf(text) } - return &ComputeChoices{ImageID: imageID, FlavorID: flavorID, FlavorIDResize: flavorIDResize}, nil + return &ComputeChoices{ImageID: imageID, FlavorID: flavorID, FlavorIDResize: flavorIDResize, NetworkName: networkName}, nil } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go index 3e12d6b3cd4..a4fe8db2d0d 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go @@ -12,7 +12,7 @@ import ( "github.com/rackspace/gophercloud/openstack/compute/v2/servers" th "github.com/rackspace/gophercloud/testhelper" - "code.google.com/p/go.crypto/ssh" + "golang.org/x/crypto/ssh" ) const keyName = "gophercloud_test_key_pair" diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go new file mode 100644 index 00000000000..80015e143f5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go @@ -0,0 +1,58 @@ +// +build acceptance compute servers + +package v2 + +import ( + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups" +) + +func createServerGroup(t *testing.T, computeClient *gophercloud.ServiceClient) (*servergroups.ServerGroup, error) { + sg, err := servergroups.Create(computeClient, &servergroups.CreateOpts{ + Name: "test", + Policies: []string{"affinity"}, + }).Extract() + + if err != nil { + t.Fatalf("Unable to create server group: %v", err) + } + + t.Logf("Created server group: %v", sg.ID) + t.Logf("It has policies: %v", sg.Policies) + + return sg, nil +} + +func getServerGroup(t *testing.T, computeClient *gophercloud.ServiceClient, sgID string) error { + sg, err := servergroups.Get(computeClient, sgID).Extract() + if err != nil { + t.Fatalf("Unable to get server group: %v", err) + } + + t.Logf("Got server group: %v", sg.Name) + + return nil +} + +func TestServerGroups(t *testing.T) { + computeClient, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + sg, err := createServerGroup(t, computeClient) + if err != nil { + t.Fatalf("Unable to create server group: %v", err) + } + defer func() { + servergroups.Delete(computeClient, sg.ID) + t.Logf("ServerGroup deleted.") + }() + + err = getServerGroup(t, computeClient, sg.ID) + if err != nil { + t.Fatalf("Unable to get server group: %v", err) + } +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go index d52a9d35376..7b928e9ef50 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/servers_test.go @@ -57,7 +57,6 @@ func networkingClient() (*gophercloud.ServiceClient, error) { } return openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ - Name: "neutron", Region: os.Getenv("OS_REGION_NAME"), }) } @@ -74,7 +73,10 @@ func createServer(t *testing.T, client *gophercloud.ServiceClient, choices *Comp t.Fatalf("Unable to create a networking client: %v", err) } - pager := networks.List(networkingClient, networks.ListOpts{Name: "public", Limit: 1}) + pager := networks.List(networkingClient, networks.ListOpts{ + Name: choices.NetworkName, + Limit: 1, + }) pager.EachPage(func(page pagination.Page) (bool, error) { networks, err := networks.ExtractNetworks(page) if err != nil { @@ -138,6 +140,32 @@ func TestCreateDestroyServer(t *testing.T) { if err = waitForStatus(client, server, "ACTIVE"); err != nil { t.Fatalf("Unable to wait for server: %v", err) } + + pager := servers.ListAddresses(client, server.ID) + pager.EachPage(func(page pagination.Page) (bool, error) { + networks, err := servers.ExtractAddresses(page) + if err != nil { + return false, err + } + + for n, a := range networks { + t.Logf("%s: %+v\n", n, a) + } + return true, nil + }) + + pager = servers.ListAddressesByNetwork(client, server.ID, choices.NetworkName) + pager.EachPage(func(page pagination.Page) (bool, error) { + addresses, err := servers.ExtractNetworkAddresses(page) + if err != nil { + return false, err + } + + for _, a := range addresses { + t.Logf("%+v\n", a) + } + return true, nil + }) } func TestUpdateServer(t *testing.T) { diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go new file mode 100644 index 00000000000..a92e8bf5314 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go @@ -0,0 +1,109 @@ +// +build acceptance compute servers + +package v2 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/acceptance/tools" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + th "github.com/rackspace/gophercloud/testhelper" +) + +func getNetworkID(t *testing.T, client *gophercloud.ServiceClient, networkName string) (string, error) { + allPages, err := tenantnetworks.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkList, err := tenantnetworks.ExtractNetworks(allPages) + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkID := "" + for _, network := range networkList { + t.Logf("Network: %v", network) + if network.Name == networkName { + networkID = network.ID + } + } + + t.Logf("Found network ID for %s: %s\n", networkName, networkID) + + return networkID, nil +} + +func createNetworkServer(t *testing.T, client *gophercloud.ServiceClient, choices *ComputeChoices, networkID string) (*servers.Server, error) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s\n", name) + + pwd := tools.MakeNewPassword("") + + networks := make([]servers.Network, 1) + networks[0] = servers.Network{ + UUID: networkID, + } + + server, err := servers.Create(client, servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + AdminPass: pwd, + Networks: networks, + }).Extract() + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + th.AssertEquals(t, pwd, server.AdminPass) + + return server, err +} + +func TestTenantNetworks(t *testing.T) { + networkName := os.Getenv("OS_NETWORK_NAME") + if networkName == "" { + t.Fatalf("OS_NETWORK_NAME must be set") + } + + choices, err := ComputeChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + client, err := newClient() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + networkID, err := getNetworkID(t, client, networkName) + if err != nil { + t.Fatalf("Unable to get network ID: %v", err) + } + + server, err := createNetworkServer(t, client, choices, networkID) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer func() { + servers.Delete(client, server.ID) + t.Logf("Server deleted.") + }() + + if err = waitForStatus(client, server, "ACTIVE"); err != nil { + t.Fatalf("Unable to wait for server: %v", err) + } + + allPages, err := tenantnetworks.List(client).AllPages() + allNetworks, err := tenantnetworks.ExtractNetworks(allPages) + th.AssertNoErr(t, err) + t.Logf("Retrieved all %d networks: %+v", len(allNetworks), allNetworks) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/security_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/security_test.go new file mode 100644 index 00000000000..ec029913e3b --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/networking/v2/security_test.go @@ -0,0 +1,165 @@ +// +build acceptance networking security + +package v2 + +import ( + "testing" + + osGroups "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" + osRules "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + osNetworks "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + osPorts "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/pagination" + rsNetworks "github.com/rackspace/gophercloud/rackspace/networking/v2/networks" + rsPorts "github.com/rackspace/gophercloud/rackspace/networking/v2/ports" + rsGroups "github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups" + rsRules "github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestSecurityGroups(t *testing.T) { + Setup(t) + defer Teardown() + + // create security group + groupID := createSecGroup(t) + + // delete security group + defer deleteSecGroup(t, groupID) + + // list security group + listSecGroups(t) + + // get security group + getSecGroup(t, groupID) +} + +func TestSecurityGroupRules(t *testing.T) { + Setup(t) + defer Teardown() + + // create security group + groupID := createSecGroup(t) + + defer deleteSecGroup(t, groupID) + + // create security group rule + ruleID := createSecRule(t, groupID) + + // delete security group rule + defer deleteSecRule(t, ruleID) + + // list security group rule + listSecRules(t) + + // get security group rule + getSecRule(t, ruleID) +} + +func createSecGroup(t *testing.T) string { + sg, err := rsGroups.Create(Client, osGroups.CreateOpts{ + Name: "new-webservers", + Description: "security group for webservers", + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created security group %s", sg.ID) + + return sg.ID +} + +func listSecGroups(t *testing.T) { + err := rsGroups.List(Client, osGroups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + list, err := osGroups.ExtractGroups(page) + if err != nil { + t.Errorf("Failed to extract secgroups: %v", err) + return false, err + } + + for _, sg := range list { + t.Logf("Listing security group: ID [%s] Name [%s]", sg.ID, sg.Name) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getSecGroup(t *testing.T, id string) { + sg, err := rsGroups.Get(Client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting security group: ID [%s] Name [%s] Description [%s]", sg.ID, sg.Name, sg.Description) +} + +func createSecGroupPort(t *testing.T, groupID string) (string, string) { + n, err := rsNetworks.Create(Client, osNetworks.CreateOpts{Name: "tmp_network"}).Extract() + th.AssertNoErr(t, err) + t.Logf("Created network %s", n.ID) + + opts := osPorts.CreateOpts{ + NetworkID: n.ID, + Name: "my_port", + SecurityGroups: []string{groupID}, + } + p, err := rsPorts.Create(Client, opts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created port %s with security group %s", p.ID, groupID) + + return n.ID, p.ID +} + +func deleteSecGroup(t *testing.T, groupID string) { + res := rsGroups.Delete(Client, groupID) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted security group %s", groupID) +} + +func createSecRule(t *testing.T, groupID string) string { + r, err := rsRules.Create(Client, osRules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: "IPv4", + PortRangeMax: 80, + Protocol: "tcp", + SecGroupID: groupID, + }).Extract() + + th.AssertNoErr(t, err) + + t.Logf("Created security group rule %s", r.ID) + + return r.ID +} + +func listSecRules(t *testing.T) { + err := rsRules.List(Client, osRules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + list, err := osRules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract sec rules: %v", err) + return false, err + } + + for _, r := range list { + t.Logf("Listing security rule: ID [%s]", r.ID) + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func getSecRule(t *testing.T, id string) { + r, err := rsRules.Get(Client, id).Extract() + th.AssertNoErr(t, err) + t.Logf("Getting security rule: ID [%s] Direction [%s] EtherType [%s] Protocol [%s]", + r.ID, r.Direction, r.EtherType, r.Protocol) +} + +func deleteSecRule(t *testing.T, id string) { + res := rsRules.Delete(Client, id) + th.AssertNoErr(t, res.Err) + t.Logf("Deleted security rule %s", id) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/cloudnetworks_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/cloudnetworks_test.go new file mode 100644 index 00000000000..2c6287e9f77 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/cloudnetworks_test.go @@ -0,0 +1,36 @@ +// +build acceptance + +package v3 + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestCloudNetworks(t *testing.T) { + c := newClient(t) + cnID := testListNetworks(t, c) + testGetNetworks(t, c, cnID) +} + +func testListNetworks(t *testing.T, c *gophercloud.ServiceClient) string { + allPages, err := cloudnetworks.List(c).AllPages() + th.AssertNoErr(t, err) + allcn, err := cloudnetworks.ExtractCloudNetworks(allPages) + fmt.Printf("Listing all cloud networks: %+v\n\n", allcn) + var cnID string + if len(allcn) > 0 { + cnID = allcn[0].ID + } + return cnID +} + +func testGetNetworks(t *testing.T, c *gophercloud.ServiceClient, id string) { + cn, err := cloudnetworks.Get(c, id).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved cloud network: %+v\n\n", cn) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/common.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/common.go new file mode 100644 index 00000000000..8c753141745 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/common.go @@ -0,0 +1,26 @@ +// +build acceptance + +package v3 + +import ( + "os" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace" + th "github.com/rackspace/gophercloud/testhelper" +) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := rackspace.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := rackspace.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := rackspace.NewRackConnectV3(client, gophercloud.EndpointOpts{ + Region: os.Getenv("RS_REGION_NAME"), + }) + th.AssertNoErr(t, err) + return c +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/lbpools_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/lbpools_test.go new file mode 100644 index 00000000000..85ac931b9ca --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/lbpools_test.go @@ -0,0 +1,71 @@ +// +build acceptance + +package v3 + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestLBPools(t *testing.T) { + c := newClient(t) + pID := testListPools(t, c) + testGetPools(t, c, pID) + nID := testListNodes(t, c, pID) + testListNodeDetails(t, c, pID) + testGetNode(t, c, pID, nID) + testGetNodeDetails(t, c, pID, nID) +} + +func testListPools(t *testing.T, c *gophercloud.ServiceClient) string { + allPages, err := lbpools.List(c).AllPages() + th.AssertNoErr(t, err) + allp, err := lbpools.ExtractPools(allPages) + fmt.Printf("Listing all LB pools: %+v\n\n", allp) + var pID string + if len(allp) > 0 { + pID = allp[0].ID + } + return pID +} + +func testGetPools(t *testing.T, c *gophercloud.ServiceClient, pID string) { + p, err := lbpools.Get(c, pID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved LB pool: %+v\n\n", p) +} + +func testListNodes(t *testing.T, c *gophercloud.ServiceClient, pID string) string { + allPages, err := lbpools.ListNodes(c, pID).AllPages() + th.AssertNoErr(t, err) + alln, err := lbpools.ExtractNodes(allPages) + fmt.Printf("Listing all LB pool nodes for pool (%s): %+v\n\n", pID, alln) + var nID string + if len(alln) > 0 { + nID = alln[0].ID + } + return nID +} + +func testListNodeDetails(t *testing.T, c *gophercloud.ServiceClient, pID string) { + allPages, err := lbpools.ListNodesDetails(c, pID).AllPages() + th.AssertNoErr(t, err) + alln, err := lbpools.ExtractNodesDetails(allPages) + fmt.Printf("Listing all LB pool nodes details for pool (%s): %+v\n\n", pID, alln) +} + +func testGetNode(t *testing.T, c *gophercloud.ServiceClient, pID, nID string) { + n, err := lbpools.GetNode(c, pID, nID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved LB node: %+v\n\n", n) +} + +func testGetNodeDetails(t *testing.T, c *gophercloud.ServiceClient, pID, nID string) { + n, err := lbpools.GetNodeDetails(c, pID, nID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved LB node details: %+v\n\n", n) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/publicips_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/publicips_test.go new file mode 100644 index 00000000000..8dc62703ba7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/acceptance/rackspace/rackconnect/v3/publicips_test.go @@ -0,0 +1,45 @@ +// +build acceptance + +package v3 + +import ( + "fmt" + "testing" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestPublicIPs(t *testing.T) { + c := newClient(t) + ipID := testListIPs(t, c) + sID := testGetIP(t, c, ipID) + testListIPsForServer(t, c, sID) +} + +func testListIPs(t *testing.T, c *gophercloud.ServiceClient) string { + allPages, err := publicips.List(c).AllPages() + th.AssertNoErr(t, err) + allip, err := publicips.ExtractPublicIPs(allPages) + fmt.Printf("Listing all public IPs: %+v\n\n", allip) + var ipID string + if len(allip) > 0 { + ipID = allip[0].ID + } + return ipID +} + +func testGetIP(t *testing.T, c *gophercloud.ServiceClient, ipID string) string { + ip, err := publicips.Get(c, ipID).Extract() + th.AssertNoErr(t, err) + fmt.Printf("Retrieved public IP (%s): %+v\n\n", ipID, ip) + return ip.CloudServer.ID +} + +func testListIPsForServer(t *testing.T, c *gophercloud.ServiceClient, sID string) { + allPages, err := publicips.ListForServer(c, sID).AllPages() + th.AssertNoErr(t, err) + allip, err := publicips.ExtractPublicIPs(allPages) + fmt.Printf("Listing all public IPs for server (%s): %+v\n\n", sID, allip) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go index f5a793c35ce..bb2c2591584 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/apiversions/requests.go @@ -16,9 +16,6 @@ func List(c *gophercloud.ServiceClient) pagination.Pager { // type from the result, call the Extract method on the GetResult. func Get(client *gophercloud.ServiceClient, v string) GetResult { var res GetResult - _, res.Err = client.Request("GET", getURL(client, v), gophercloud.RequestOpts{ - OkCodes: []int{200}, - JSONResponse: &res.Body, - }) + _, res.Err = client.Get(getURL(client, v), &res.Body, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go index 1b313a6080e..d2f10aa6b59 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/snapshots/requests.go @@ -67,10 +67,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return res } - _, res.Err = client.Request("POST", createURL(client), gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - JSONBody: &reqBody, - JSONResponse: &res.Body, + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, }) return res } @@ -78,9 +76,7 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes // Delete will delete the existing Snapshot with the provided ID. func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", deleteURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{202, 204}, - }) + _, res.Err = client.Delete(deleteURL(client, id), nil) return res } @@ -88,10 +84,7 @@ func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { // object from the response, call the Extract method on the GetResult. func Get(client *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{200}, - JSONResponse: &res.Body, - }) + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) return res } @@ -173,10 +166,8 @@ func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMet return res } - _, res.Err = client.Request("PUT", updateMetadataURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{200}, - JSONBody: &reqBody, - JSONResponse: &res.Body, + _, res.Err = client.Put(updateMetadataURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go index e67ba105817..253aaf7c545 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go @@ -83,10 +83,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return res } - _, res.Err = client.Request("POST", createURL(client), gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - JSONBody: &reqBody, - JSONResponse: &res.Body, + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, }) return res } @@ -94,9 +92,7 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes // Delete will delete the existing Volume with the provided ID. func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", deleteURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{202, 204}, - }) + _, res.Err = client.Delete(deleteURL(client, id), nil) return res } @@ -104,10 +100,7 @@ func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { // from the response, call the Extract method on the GetResult. func Get(client *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) return res } @@ -203,10 +196,8 @@ func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder return res } - _, res.Err = client.Request("PUT", updateURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{200}, - JSONBody: &reqBody, - JSONResponse: &res.Body, + _, res.Err = client.Put(updateURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go index 6fedaa68968..1673d13aaf3 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go @@ -44,11 +44,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return res } - _, res.Err = client.Request("POST", createURL(client), gophercloud.RequestOpts{ - MoreHeaders: client.AuthenticatedHeaders(), - OkCodes: []int{200, 201}, - JSONBody: &reqBody, - JSONResponse: &res.Body, + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, }) return res } @@ -56,10 +53,7 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes // Delete will delete the volume type with the provided ID. func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", deleteURL(client, id), gophercloud.RequestOpts{ - MoreHeaders: client.AuthenticatedHeaders(), - OkCodes: []int{202}, - }) + _, res.Err = client.Delete(deleteURL(client, id), nil) return res } @@ -67,11 +61,7 @@ func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { // type from the result, call the Extract method on the GetResult. func Get(client *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, err := client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ - MoreHeaders: client.AuthenticatedHeaders(), - OkCodes: []int{200}, - JSONResponse: &res.Body, - }) + _, err := client.Get(getURL(client, id), &res.Body, nil) res.Err = err return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go index b63dc95a71a..dd221bc983b 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests.go @@ -6,17 +6,14 @@ import "github.com/rackspace/gophercloud" // entire API. func Get(c *gophercloud.ServiceClient) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c), &res.Body, nil) return res } // Ping retrieves a ping to the server. func Ping(c *gophercloud.ServiceClient) PingResult { var res PingResult - _, res.Err = c.Request("GET", pingURL(c), gophercloud.RequestOpts{ + _, res.Err = c.Get(pingURL(c), nil, &gophercloud.RequestOpts{ OkCodes: []int{204}, MoreHeaders: map[string]string{"Accept": ""}, }) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go index a8d95f9ace9..2c20a71103f 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/base/requests_test.go @@ -17,18 +17,18 @@ func TestGetHomeDocument(t *testing.T) { expected := HomeDocument{ "rel/cdn": map[string]interface{}{ - "href-template": "services{?marker,limit}", - "href-vars": map[string]interface{}{ - "marker": "param/marker", - "limit": "param/limit", - }, - "hints": map[string]interface{}{ - "allow": []string{"GET"}, - "formats": map[string]interface{}{ - "application/json": map[string]interface{}{}, - }, - }, - }, + "href-template": "services{?marker,limit}", + "href-vars": map[string]interface{}{ + "marker": "param/marker", + "limit": "param/limit", + }, + "hints": map[string]interface{}{ + "allow": []string{"GET"}, + "formats": map[string]interface{}{ + "application/json": map[string]interface{}{}, + }, + }, + }, } th.CheckDeepEquals(t, expected, *actual) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go index f413b6bc41b..d7ec1a00d53 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/fixtures.go @@ -1,24 +1,24 @@ package flavors import ( - "fmt" - "net/http" - "testing" + "fmt" + "net/http" + "testing" - th "github.com/rackspace/gophercloud/testhelper" - fake "github.com/rackspace/gophercloud/testhelper/client" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" ) // HandleListCDNFlavorsSuccessfully creates an HTTP handler at `/flavors` on the test handler mux // that responds with a `List` response. func HandleListCDNFlavorsSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/flavors", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.Mux.HandleFunc("/flavors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, ` + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` { "flavors": [ { @@ -44,19 +44,19 @@ func HandleListCDNFlavorsSuccessfully(t *testing.T) { ] } `) - }) + }) } // HandleGetCDNFlavorSuccessfully creates an HTTP handler at `/flavors/{id}` on the test handler mux // that responds with a `Get` response. func HandleGetCDNFlavorSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/flavors/asia", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.Mux.HandleFunc("/flavors/asia", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, ` + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` { "id" : "asia", "providers" : [ @@ -78,5 +78,5 @@ func HandleGetCDNFlavorSuccessfully(t *testing.T) { ] } `) - }) + }) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go index 138fd976319..8755a95b8f9 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests.go @@ -17,9 +17,6 @@ func List(c *gophercloud.ServiceClient) pagination.Pager { // Get retrieves a specific flavor based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go index 7ddf1b1c660..f7317382797 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/flavors/requests_test.go @@ -1,90 +1,89 @@ package flavors import ( - "testing" + "testing" - "github.com/rackspace/gophercloud" - "github.com/rackspace/gophercloud/pagination" - th "github.com/rackspace/gophercloud/testhelper" - fake "github.com/rackspace/gophercloud/testhelper/client" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" ) func TestList(t *testing.T) { - th.SetupHTTP() - defer th.TeardownHTTP() + th.SetupHTTP() + defer th.TeardownHTTP() - HandleListCDNFlavorsSuccessfully(t) + HandleListCDNFlavorsSuccessfully(t) - count := 0 + count := 0 - err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { - count++ - actual, err := ExtractFlavors(page) - if err != nil { - t.Errorf("Failed to extract flavors: %v", err) - return false, err - } + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractFlavors(page) + if err != nil { + t.Errorf("Failed to extract flavors: %v", err) + return false, err + } - expected := []Flavor{ - Flavor{ - ID: "europe", - Providers: []Provider{ - Provider{ - Provider: "Fastly", - Links: []gophercloud.Link{ - gophercloud.Link{ - Href: "http://www.fastly.com", - Rel: "provider_url", - }, - }, - }, - }, - Links: []gophercloud.Link{ - gophercloud.Link{ - Href: "https://www.poppycdn.io/v1.0/flavors/europe", - Rel: "self", - }, - }, - }, - } + expected := []Flavor{ + Flavor{ + ID: "europe", + Providers: []Provider{ + Provider{ + Provider: "Fastly", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.fastly.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/europe", + Rel: "self", + }, + }, + }, + } - th.CheckDeepEquals(t, expected, actual) + th.CheckDeepEquals(t, expected, actual) - return true, nil - }) - th.AssertNoErr(t, err) - th.CheckEquals(t, 1, count) + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) } func TestGet(t *testing.T) { - th.SetupHTTP() - defer th.TeardownHTTP() + th.SetupHTTP() + defer th.TeardownHTTP() - HandleGetCDNFlavorSuccessfully(t) + HandleGetCDNFlavorSuccessfully(t) - expected := &Flavor{ - ID: "asia", - Providers: []Provider{ - Provider{ - Provider: "ChinaCache", - Links: []gophercloud.Link{ - gophercloud.Link{ - Href: "http://www.chinacache.com", - Rel: "provider_url", - }, - }, - }, - }, - Links: []gophercloud.Link{ - gophercloud.Link{ - Href: "https://www.poppycdn.io/v1.0/flavors/asia", - Rel: "self", - }, - }, - } + expected := &Flavor{ + ID: "asia", + Providers: []Provider{ + Provider{ + Provider: "ChinaCache", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.chinacache.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/asia", + Rel: "self", + }, + }, + } - - actual, err := Get(fake.ServiceClient(), "asia").Extract() - th.AssertNoErr(t, err) - th.AssertDeepEquals(t, expected, actual) + actual, err := Get(fake.ServiceClient(), "asia").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go index 38e7fc55f8d..5c6b5d00e49 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/fixtures.go @@ -1,19 +1,19 @@ package serviceassets import ( - "net/http" - "testing" + "net/http" + "testing" - th "github.com/rackspace/gophercloud/testhelper" - fake "github.com/rackspace/gophercloud/testhelper/client" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" ) // HandleDeleteCDNAssetSuccessfully creates an HTTP handler at `/services/{id}/assets` on the test handler mux // that responds with a `Delete` response. func HandleDeleteCDNAssetSuccessfully(t *testing.T) { - th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0/assets", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "DELETE") - th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) - w.WriteHeader(http.StatusAccepted) - }) + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0/assets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go index a80aa0db28b..1ddc65fafd3 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests.go @@ -43,8 +43,6 @@ func Delete(c *gophercloud.ServiceClient, idOrURL string, opts DeleteOptsBuilder } var res DeleteResult - _, res.Err = c.Request("DELETE", url, gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = c.Delete(url, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go index 32896eef7a8..dde7bc171d2 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/serviceassets/requests_test.go @@ -1,18 +1,18 @@ package serviceassets import ( - "testing" + "testing" - th "github.com/rackspace/gophercloud/testhelper" - fake "github.com/rackspace/gophercloud/testhelper/client" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" ) func TestDelete(t *testing.T) { - th.SetupHTTP() - defer th.TeardownHTTP() + th.SetupHTTP() + defer th.TeardownHTTP() - HandleDeleteCDNAssetSuccessfully(t) + HandleDeleteCDNAssetSuccessfully(t) - err := Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", nil).ExtractErr() - th.AssertNoErr(t, err) + err := Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", nil).ExtractErr() + th.AssertNoErr(t, err) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go index 78a3087c98b..8b37928e219 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/cdn/v1/services/requests.go @@ -177,10 +177,7 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { } // Send request to API - resp, err := c.Request("POST", createURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) + resp, err := c.Post(createURL(c), &reqBody, nil, nil) res.Header = resp.Header res.Err = err return res @@ -199,10 +196,7 @@ func Get(c *gophercloud.ServiceClient, idOrURL string) GetResult { } var res GetResult - _, res.Err = c.Request("GET", url, gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(url, &res.Body, nil) return res } @@ -379,8 +373,6 @@ func Delete(c *gophercloud.ServiceClient, idOrURL string) DeleteResult { } var res DeleteResult - _, res.Err = c.Request("DELETE", url, gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = c.Delete(url, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go index 6818d9d734b..1193b19a7af 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/client.go @@ -109,6 +109,7 @@ func v2auth(client *gophercloud.ProviderClient, endpoint string, options gopherc if options.AllowReauth { client.ReauthFunc = func() error { + client.TokenID = "" return AuthenticateV2(client, options) } } @@ -132,10 +133,36 @@ func v3auth(client *gophercloud.ProviderClient, endpoint string, options gopherc v3Client.Endpoint = endpoint } - token, err := tokens3.Create(v3Client, options, nil).Extract() + var scope *tokens3.Scope + if options.TenantID != "" { + scope = &tokens3.Scope{ + ProjectID: options.TenantID, + } + options.TenantID = "" + options.TenantName = "" + } else { + if options.TenantName != "" { + scope = &tokens3.Scope{ + ProjectName: options.TenantName, + DomainID: options.DomainID, + DomainName: options.DomainName, + } + options.TenantName = "" + } + } + + result := tokens3.Create(v3Client, options, scope) + + token, err := result.ExtractToken() if err != nil { return err } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + client.TokenID = token.ID if options.AllowReauth { @@ -144,7 +171,7 @@ func v3auth(client *gophercloud.ProviderClient, endpoint string, options gopherc } } client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(v3Client, opts) + return V3EndpointURL(catalog, opts) } return nil diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go index dfd81c99a0d..0b710850156 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/common/extensions/requests.go @@ -8,10 +8,7 @@ import ( // Get retrieves information for a specific extension using its alias. func Get(c *gophercloud.ServiceClient, alias string) GetResult { var res GetResult - _, res.Err = c.Request("GET", ExtensionURL(c, alias), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(ExtensionURL(c, alias), &res.Body, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go index b64014f36b5..c0ba368db75 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go @@ -99,10 +99,8 @@ func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) s return res } - _, res.Err = client.Request("POST", createURL(client), gophercloud.RequestOpts{ - JSONBody: reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 202}, + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, }) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go index 294bae35783..9f27ef172c1 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go @@ -73,10 +73,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return result } - _, result.Err = client.Request("POST", rootURL(client), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: &reqBody, - OkCodes: []int{200}, + _, result.Err = client.Post(rootURL(client), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return result @@ -85,22 +83,13 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes // Get will return details for a particular default rule. func Get(client *gophercloud.ServiceClient, id string) GetResult { var result GetResult - - _, result.Err = client.Request("GET", resourceURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - OkCodes: []int{200}, - }) - + _, result.Err = client.Get(resourceURL(client, id), &result.Body, nil) return result } // Delete will permanently delete a default rule from the project. func Delete(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { var result gophercloud.ErrResult - - _, result.Err = client.Request("DELETE", resourceURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) - + _, result.Err = client.Delete(resourceURL(client, id), nil) return result } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go index d1540380108..8abb72dcdec 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip/requests.go @@ -45,10 +45,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return res } - _, res.Err = client.Request("POST", createURL(client), gophercloud.RequestOpts{ - JSONBody: reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -56,19 +54,14 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes // Get returns data about a previously created FloatingIP. func Get(client *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) return res } // Delete requests the deletion of a previous allocated FloatingIP. func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", deleteURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = client.Delete(deleteURL(client, id), nil) return res } @@ -82,10 +75,7 @@ func Associate(client *gophercloud.ServiceClient, serverId, fip string) Associat addFloatingIp["address"] = fip reqBody := map[string]interface{}{"addFloatingIp": addFloatingIp} - _, res.Err = client.Request("POST", associateURL(client, serverId), gophercloud.RequestOpts{ - JSONBody: reqBody, - OkCodes: []int{202}, - }) + _, res.Err = client.Post(associateURL(client, serverId), reqBody, nil, nil) return res } @@ -97,9 +87,6 @@ func Disassociate(client *gophercloud.ServiceClient, serverId, fip string) Disas removeFloatingIp["address"] = fip reqBody := map[string]interface{}{"removeFloatingIp": removeFloatingIp} - _, res.Err = client.Request("POST", disassociateURL(client, serverId), gophercloud.RequestOpts{ - JSONBody: reqBody, - OkCodes: []int{202}, - }) + _, res.Err = client.Post(disassociateURL(client, serverId), reqBody, nil, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go index 287e4127c5a..c56ee67ea2d 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go @@ -81,10 +81,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return res } - _, res.Err = client.Request("POST", createURL(client), gophercloud.RequestOpts{ - JSONBody: reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -92,18 +90,13 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes // Get returns public data about a previously uploaded KeyPair. func Get(client *gophercloud.ServiceClient, name string) GetResult { var res GetResult - _, res.Err = client.Request("GET", getURL(client, name), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = client.Get(getURL(client, name), &res.Body, nil) return res } // Delete requests the deletion of a previous stored KeyPair from the server. func Delete(client *gophercloud.ServiceClient, name string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", deleteURL(client, name), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = client.Delete(deleteURL(client, name), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go index 8f0a7a032fd..4cef4802223 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go @@ -78,10 +78,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return result } - _, result.Err = client.Request("POST", rootURL(client), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: &reqBody, - OkCodes: []int{200}, + _, result.Err = client.Post(rootURL(client), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return result @@ -123,10 +121,8 @@ func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder return result } - _, result.Err = client.Request("PUT", resourceURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: &reqBody, - OkCodes: []int{200}, + _, result.Err = client.Put(resourceURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return result @@ -135,23 +131,14 @@ func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder // Get will return details for a particular security group. func Get(client *gophercloud.ServiceClient, id string) GetResult { var result GetResult - - _, result.Err = client.Request("GET", resourceURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - OkCodes: []int{200}, - }) - + _, result.Err = client.Get(resourceURL(client, id), &result.Body, nil) return result } // Delete will permanently delete a security group from the project. func Delete(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { var result gophercloud.ErrResult - - _, result.Err = client.Request("DELETE", resourceURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, result.Err = client.Delete(resourceURL(client, id), nil) return result } @@ -234,10 +221,8 @@ func CreateRule(client *gophercloud.ServiceClient, opts CreateRuleOptsBuilder) C return result } - _, result.Err = client.Request("POST", rootRuleURL(client), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: &reqBody, - OkCodes: []int{200}, + _, result.Err = client.Post(rootRuleURL(client), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return result @@ -246,11 +231,7 @@ func CreateRule(client *gophercloud.ServiceClient, opts CreateRuleOptsBuilder) C // DeleteRule will permanently delete a rule from a security group. func DeleteRule(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { var result gophercloud.ErrResult - - _, result.Err = client.Request("DELETE", resourceRuleURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, result.Err = client.Delete(resourceRuleURL(client, id), nil) return result } @@ -264,25 +245,13 @@ func actionMap(prefix, groupName string) map[string]map[string]string { // rules of the group on the server. func AddServerToGroup(client *gophercloud.ServiceClient, serverID, groupName string) gophercloud.ErrResult { var result gophercloud.ErrResult - - _, result.Err = client.Request("POST", serverActionURL(client, serverID), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: actionMap("add", groupName), - OkCodes: []int{202}, - }) - + _, result.Err = client.Post(serverActionURL(client, serverID), actionMap("add", groupName), &result.Body, nil) return result } // RemoveServerFromGroup will disassociate a server from a security group. func RemoveServerFromGroup(client *gophercloud.ServiceClient, serverID, groupName string) gophercloud.ErrResult { var result gophercloud.ErrResult - - _, result.Err = client.Request("POST", serverActionURL(client, serverID), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: actionMap("remove", groupName), - OkCodes: []int{202}, - }) - + _, result.Err = client.Post(serverActionURL(client, serverID), actionMap("remove", groupName), &result.Body, nil) return result } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go new file mode 100644 index 00000000000..1e5ed568daa --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go @@ -0,0 +1,2 @@ +// Package servergroups provides the ability to manage server groups +package servergroups diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/fixtures.go new file mode 100644 index 00000000000..133fd85ced1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/fixtures.go @@ -0,0 +1,161 @@ +// +build fixtures + +package servergroups + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "server_groups": [ + { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + }, + { + "id": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "name": "test2", + "policies": [ + "affinity" + ], + "members": [], + "metadata": {} + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "server_group": { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + } +} +` + +// CreateOutput is a sample response to a Post call +const CreateOutput = ` +{ + "server_group": { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + } +} +` + +// FirstServerGroup is the first result in ListOutput. +var FirstServerGroup = ServerGroup{ + ID: "616fb98f-46ca-475e-917e-2563e5a8cd19", + Name: "test", + Policies: []string{ + "anti-affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// SecondServerGroup is the second result in ListOutput. +var SecondServerGroup = ServerGroup{ + ID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + Name: "test2", + Policies: []string{ + "affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// ExpectedServerGroupSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedServerGroupSlice = []ServerGroup{FirstServerGroup, SecondServerGroup} + +// CreatedServerGroup is the parsed result from CreateOutput. +var CreatedServerGroup = ServerGroup{ + ID: "616fb98f-46ca-475e-917e-2563e5a8cd19", + Name: "test", + Policies: []string{ + "anti-affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing server group +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups/4d8c3732-a248-40ed-bebc-539a6ffd25c0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request +// for a new server group +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "server_group": { + "name": "test", + "policies": [ + "anti-affinity" + ] + } +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// an existing server group +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups/616fb98f-46ca-475e-917e-2563e5a8cd19", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go new file mode 100644 index 00000000000..1597b43eb22 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go @@ -0,0 +1,77 @@ +package servergroups + +import ( + "errors" + + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of ServerGroups. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return ServerGroupsPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder describes struct types that can be accepted by the Create call. Notably, the +// CreateOpts struct in this package does. +type CreateOptsBuilder interface { + ToServerGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies a Server Group allocation request +type CreateOpts struct { + // Name is the name of the server group + Name string + + // Policies are the server group policies + Policies []string +} + +// ToServerGroupCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { + if opts.Name == "" { + return nil, errors.New("Missing field required for server group creation: Name") + } + + if len(opts.Policies) < 1 { + return nil, errors.New("Missing field required for server group creation: Policies") + } + + serverGroup := make(map[string]interface{}) + serverGroup["name"] = opts.Name + serverGroup["policies"] = opts.Policies + + return map[string]interface{}{"server_group": serverGroup}, nil +} + +// Create requests the creation of a new Server Group +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { + var res CreateResult + + reqBody, err := opts.ToServerGroupCreateMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = client.Post(createURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return res +} + +// Get returns data about a previously created ServerGroup. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} + +// Delete requests the deletion of a previously allocated ServerGroup. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = client.Delete(deleteURL(client, id), nil) + return res +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests_test.go new file mode 100644 index 00000000000..07fec51b1b8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/requests_test.go @@ -0,0 +1,59 @@ +package servergroups + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractServerGroups(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedServerGroupSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + actual, err := Create(client.ServiceClient(), CreateOpts{ + Name: "test", + Policies: []string{"anti-affinity"}, + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedServerGroup, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "4d8c3732-a248-40ed-bebc-539a6ffd25c0").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstServerGroup, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := Delete(client.ServiceClient(), "616fb98f-46ca-475e-917e-2563e5a8cd19").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/results.go new file mode 100644 index 00000000000..d74ee5dbb00 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/results.go @@ -0,0 +1,87 @@ +package servergroups + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// A ServerGroup creates a policy for instance placement in the cloud +type ServerGroup struct { + // ID is the unique ID of the Server Group. + ID string `mapstructure:"id"` + + // Name is the common name of the server group. + Name string `mapstructure:"name"` + + // Polices are the group policies. + Policies []string `mapstructure:"policies"` + + // Members are the members of the server group. + Members []string `mapstructure:"members"` + + // Metadata includes a list of all user-specified key-value pairs attached to the Server Group. + Metadata map[string]interface{} +} + +// ServerGroupsPage stores a single, only page of ServerGroups +// results from a List call. +type ServerGroupsPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a ServerGroupsPage is empty. +func (page ServerGroupsPage) IsEmpty() (bool, error) { + va, err := ExtractServerGroups(page) + return len(va) == 0, err +} + +// ExtractServerGroups interprets a page of results as a slice of +// ServerGroups. +func ExtractServerGroups(page pagination.Page) ([]ServerGroup, error) { + casted := page.(ServerGroupsPage).Body + var response struct { + ServerGroups []ServerGroup `mapstructure:"server_groups"` + } + + err := mapstructure.WeakDecode(casted, &response) + + return response.ServerGroups, err +} + +type ServerGroupResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Server Group resource +// response as a ServerGroup struct. +func (r ServerGroupResult) Extract() (*ServerGroup, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + ServerGroup *ServerGroup `json:"server_group" mapstructure:"server_group"` + } + + err := mapstructure.WeakDecode(r.Body, &res) + return res.ServerGroup, err +} + +// CreateResult is the response from a Create operation. Call its Extract method to interpret it +// as a ServerGroup. +type CreateResult struct { + ServerGroupResult +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a ServerGroup. +type GetResult struct { + ServerGroupResult +} + +// DeleteResult is the response from a Delete operation. Call its Extract method to determine if +// the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go new file mode 100644 index 00000000000..074a16c67f2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go @@ -0,0 +1,25 @@ +package servergroups + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-server-groups" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls_test.go new file mode 100644 index 00000000000..bff4dfc7205 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups/urls_test.go @@ -0,0 +1,42 @@ +package servergroups + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-server-groups", listURL(c)) +} + +func TestCreateURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-server-groups", createURL(c)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-server-groups/"+id, getURL(c, id)) +} + +func TestDeleteURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-server-groups/"+id, deleteURL(c, id)) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go index 04b5909372e..0e090e69f26 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/startstop/requests.go @@ -9,27 +9,15 @@ func actionURL(client *gophercloud.ServiceClient, id string) string { // Start is the operation responsible for starting a Compute server. func Start(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { var res gophercloud.ErrResult - reqBody := map[string]interface{}{"os-start": nil} - - _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) return res } // Stop is the operation responsible for stopping a Compute server. func Stop(client *gophercloud.ServiceClient, id string) gophercloud.ErrResult { var res gophercloud.ErrResult - reqBody := map[string]interface{}{"os-stop": nil} - - _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go new file mode 100644 index 00000000000..65c46ff5078 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go @@ -0,0 +1,2 @@ +// Package tenantnetworks provides the ability for tenants to see information about the networks they have access to +package tenantnetworks diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go new file mode 100644 index 00000000000..0cfa72ab061 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go @@ -0,0 +1,84 @@ +// +build fixtures + +package tenantnetworks + +import ( + "fmt" + "net/http" + "testing" + "time" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "networks": [ + { + "cidr": "10.0.0.0/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "label": "mynet_0" + }, + { + "cidr": "10.0.0.10/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "label": "mynet_1" + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "network": { + "cidr": "10.0.0.10/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "label": "mynet_1" + } +} +` + +// FirstNetwork is the first result in ListOutput. +var nilTime time.Time +var FirstNetwork = Network{ + CIDR: "10.0.0.0/29", + ID: "20c8acc0-f747-4d71-a389-46d078ebf047", + Name: "mynet_0", +} + +// SecondNetwork is the second result in ListOutput. +var SecondNetwork = Network{ + CIDR: "10.0.0.10/29", + ID: "20c8acc0-f747-4d71-a389-46d078ebf000", + Name: "mynet_1", +} + +// ExpectedNetworkSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedNetworkSlice = []Network{FirstNetwork, SecondNetwork} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-tenant-networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing network. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-tenant-networks/20c8acc0-f747-4d71-a389-46d078ebf000", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go new file mode 100644 index 00000000000..3ec13d384b0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go @@ -0,0 +1,22 @@ +package tenantnetworks + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of Network. +func List(client *gophercloud.ServiceClient) pagination.Pager { + url := listURL(client) + createPage := func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, url, createPage) +} + +// Get returns data about a previously created Network. +func Get(client *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests_test.go new file mode 100644 index 00000000000..fc4ee4f4bab --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests_test.go @@ -0,0 +1,37 @@ +package tenantnetworks + +import ( + "testing" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNetworks(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedNetworkSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := Get(client.ServiceClient(), "20c8acc0-f747-4d71-a389-46d078ebf000").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondNetwork, actual) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go new file mode 100644 index 00000000000..805009247a9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go @@ -0,0 +1,68 @@ +package tenantnetworks + +import ( + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// A Network represents a nova-network that an instance communicates on +type Network struct { + // CIDR is the IPv4 subnet. + CIDR string `mapstructure:"cidr"` + + // ID is the UUID of the network. + ID string `mapstructure:"id"` + + // Name is the common name that the network has. + Name string `mapstructure:"label"` +} + +// NetworkPage stores a single, only page of Networks +// results from a List call. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkPage is empty. +func (page NetworkPage) IsEmpty() (bool, error) { + va, err := ExtractNetworks(page) + return len(va) == 0, err +} + +// ExtractNetworks interprets a page of results as a slice of Networks +func ExtractNetworks(page pagination.Page) ([]Network, error) { + networks := page.(NetworkPage).Body + var res struct { + Networks []Network `mapstructure:"networks"` + } + + err := mapstructure.WeakDecode(networks, &res) + + return res.Networks, err +} + +type NetworkResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Network resource +// response as a Network struct. +func (r NetworkResult) Extract() (*Network, error) { + if r.Err != nil { + return nil, r.Err + } + + var res struct { + Network *Network `json:"network" mapstructure:"network"` + } + + err := mapstructure.Decode(r.Body, &res) + return res.Network, err +} + +// GetResult is the response from a Get operation. Call its Extract method to interpret it +// as a Network. +type GetResult struct { + NetworkResult +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go new file mode 100644 index 00000000000..2401a5d038f --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go @@ -0,0 +1,17 @@ +package tenantnetworks + +import "github.com/rackspace/gophercloud" + +const resourcePath = "os-tenant-networks" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls_test.go new file mode 100644 index 00000000000..39c464e9fbb --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls_test.go @@ -0,0 +1,25 @@ +package tenantnetworks + +import ( + "testing" + + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + + th.CheckEquals(t, c.Endpoint+"os-tenant-networks", listURL(c)) +} + +func TestGetURL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + c := client.ServiceClient() + id := "1" + + th.CheckEquals(t, c.Endpoint+"os-tenant-networks/"+id, getURL(c, id)) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go index 79709fdbe40..b4ebedea86f 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go @@ -54,10 +54,8 @@ func Create(client *gophercloud.ServiceClient, serverId string, opts CreateOptsB return res } - _, res.Err = client.Request("POST", createURL(client, serverId), gophercloud.RequestOpts{ - JSONBody: reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = client.Post(createURL(client, serverId), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -65,18 +63,13 @@ func Create(client *gophercloud.ServiceClient, serverId string, opts CreateOptsB // Get returns public data about a previously created VolumeAttachment. func Get(client *gophercloud.ServiceClient, serverId, aId string) GetResult { var res GetResult - _, res.Err = client.Request("GET", getURL(client, serverId, aId), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = client.Get(getURL(client, serverId, aId), &res.Body, nil) return res } // Delete requests the deletion of a previous stored VolumeAttachment from the server. func Delete(client *gophercloud.ServiceClient, serverId, aId string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", deleteURL(client, serverId, aId), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = client.Delete(deleteURL(client, serverId, aId), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go index 1d33f58ad56..586be67ac25 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/flavors/requests.go @@ -62,9 +62,7 @@ func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) paginat // Get instructs OpenStack to provide details on a single flavor, identified by its ID. // Use ExtractFlavor to convert its result into a Flavor. func Get(client *gophercloud.ServiceClient, id string) GetResult { - var gr GetResult - _, gr.Err = client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &gr.Body, - }) - return gr + var res GetResult + _, res.Err = client.Get(getURL(client, id), &res.Body, nil) + return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go index 9e9c3b1d4e8..7ce5139519c 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests.go @@ -60,9 +60,13 @@ func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) paginat // Use ExtractImage() to interpret the result as an openstack Image. func Get(client *gophercloud.ServiceClient, id string) GetResult { var result GetResult - _, result.Err = client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - OkCodes: []int{200}, - }) + _, result.Err = client.Get(getURL(client, id), &result.Body, nil) + return result +} + +// Delete deletes the specified image ID. +func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { + var result DeleteResult + _, result.Err = client.Delete(deleteURL(client, id), nil) return result } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go index 9a05f97ec0d..93a97bdc65b 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/requests_test.go @@ -173,3 +173,19 @@ func TestNextPageURL(t *testing.T) { th.AssertNoErr(t, err) th.CheckEquals(t, expected, actual) } + +// Test Image delete +func TestDeleteImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/12345678", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "12345678") + th.AssertNoErr(t, res.Err) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go index 493d51192c0..40e814d1de0 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/results.go @@ -11,6 +11,11 @@ type GetResult struct { gophercloud.Result } +// DeleteResult represents the result of an image.Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + // Extract interprets a GetResult as an Image. func (gr GetResult) Extract() (*Image, error) { if gr.Err != nil { diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go index 9b3c86d435f..b1bf1038fb0 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/images/urls.go @@ -9,3 +9,7 @@ func listDetailURL(client *gophercloud.ServiceClient) string { func getURL(client *gophercloud.ServiceClient, id string) string { return client.ServiceURL("images", id) } + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go index 6125d530b30..4339a16d440 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/fixtures.go @@ -567,3 +567,98 @@ func HandleMetadataUpdateSuccessfully(t *testing.T) { w.Write([]byte(`{ "metadata": {"foo":"baz", "this":"those"}}`)) }) } + +// ListAddressesExpected represents an expected repsonse from a ListAddresses request. +var ListAddressesExpected = map[string][]Address{ + "public": []Address{ + Address{ + Version: 4, + Address: "80.56.136.39", + }, + Address{ + Version: 6, + Address: "2001:4800:790e:510:be76:4eff:fe04:82a8", + }, + }, + "private": []Address{ + Address{ + Version: 4, + Address: "10.880.3.154", + }, + }, +} + +// HandleAddressListSuccessfully sets up the test server to respond to a ListAddresses request. +func HandleAddressListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf/ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "addresses": { + "public": [ + { + "version": 4, + "addr": "50.56.176.35" + }, + { + "version": 6, + "addr": "2001:4800:780e:510:be76:4eff:fe04:84a8" + } + ], + "private": [ + { + "version": 4, + "addr": "10.180.3.155" + } + ] + } + }`) + }) +} + +// ListNetworkAddressesExpected represents an expected repsonse from a ListAddressesByNetwork request. +var ListNetworkAddressesExpected = []Address{ + Address{ + Version: 4, + Address: "50.56.176.35", + }, + Address{ + Version: 6, + Address: "2001:4800:780e:510:be76:4eff:fe04:84a8", + }, +} + +// HandleNetworkAddressListSuccessfully sets up the test server to respond to a ListAddressesByNetwork request. +func HandleNetworkAddressListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf/ips/public", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "public": [ + { + "version": 4, + "addr": "50.56.176.35" + }, + { + "version": 6, + "addr": "2001:4800:780e:510:be76:4eff:fe04:84a8" + } + ] + }`) + }) +} + +// HandleCreateServerImageSuccessfully sets up the test server to respond to a TestCreateServerImage request. +func HandleCreateServerImageSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/serverimage/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.Header().Add("Location", "https://0.0.0.0/images/xxxx-xxxxx-xxxxx-xxxx") + w.WriteHeader(http.StatusAccepted) + }) +} + diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go index b7c1611d3c3..aa8c1a87b27 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests.go @@ -14,7 +14,6 @@ import ( type ListOptsBuilder interface { ToServerListQuery() (string, error) } - // ListOpts allows the filtering and sorting of paginated collections through // the API. Filtering is achieved by passing in struct field values that map to // the server attributes you want to see returned. Marker and Limit are used @@ -216,29 +215,22 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return res } - _, res.Err = client.Request("POST", listURL(client), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - JSONBody: reqBody, - OkCodes: []int{202}, - }) + _, res.Err = client.Post(listURL(client), reqBody, &res.Body, nil) return res } // Delete requests that a server previously provisioned be removed from your account. func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", deleteURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = client.Delete(deleteURL(client, id), nil) return res } // Get requests details on a single server, by ID. func Get(client *gophercloud.ServiceClient, id string) GetResult { var result GetResult - _, result.Err = client.Request("GET", getURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - OkCodes: []int{200, 203}, + _, result.Err = client.Get(getURL(client, id), &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, }) return result } @@ -280,9 +272,9 @@ func (opts UpdateOpts) ToServerUpdateMap() map[string]interface{} { // Update requests that various attributes of the indicated server be changed. func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { var result UpdateResult - _, result.Err = client.Request("PUT", updateURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: opts.ToServerUpdateMap(), + reqBody := opts.ToServerUpdateMap() + _, result.Err = client.Put(updateURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return result } @@ -298,12 +290,7 @@ func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword stri req.ChangePassword.AdminPass = newPassword var res ActionResult - - _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: req, - OkCodes: []int{202}, - }) - + _, res.Err = client.Post(actionURL(client, id), req, nil, nil) return res } @@ -367,15 +354,13 @@ func Reboot(client *gophercloud.ServiceClient, id string, how RebootMethod) Acti return res } - _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: struct { - C map[string]string `json:"reboot"` - }{ - map[string]string{"type": string(how)}, - }, - OkCodes: []int{202}, - }) + reqBody := struct { + C map[string]string `json:"reboot"` + }{ + map[string]string{"type": string(how)}, + } + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) return res } @@ -468,12 +453,7 @@ func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuild return result } - _, result.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &result.Body, - OkCodes: []int{202}, - }) - + _, result.Err = client.Post(actionURL(client, id), reqBody, &result.Body, nil) return result } @@ -514,11 +494,7 @@ func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder return res } - _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) return res } @@ -527,11 +503,10 @@ func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder func ConfirmResize(client *gophercloud.ServiceClient, id string) ActionResult { var res ActionResult - _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: map[string]interface{}{"confirmResize": nil}, - OkCodes: []int{204}, + reqBody := map[string]interface{}{"confirmResize": nil} + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202, 204}, }) - return res } @@ -539,12 +514,8 @@ func ConfirmResize(client *gophercloud.ServiceClient, id string) ActionResult { // See Resize() for more details. func RevertResize(client *gophercloud.ServiceClient, id string) ActionResult { var res ActionResult - - _, res.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONBody: map[string]interface{}{"revertResize": nil}, - OkCodes: []int{202}, - }) - + reqBody := map[string]interface{}{"revertResize": nil} + _, res.Err = client.Post(actionURL(client, id), reqBody, nil, nil) return res } @@ -586,10 +557,8 @@ func Rescue(client *gophercloud.ServiceClient, id string, opts RescueOptsBuilder return result } - _, result.Err = client.Request("POST", actionURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: &reqBody, - OkCodes: []int{200}, + _, result.Err = client.Post(actionURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return result @@ -625,9 +594,8 @@ func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetad res.Err = err return res } - _, res.Err = client.Request("PUT", metadataURL(client, id), gophercloud.RequestOpts{ - JSONBody: metadata, - JSONResponse: &res.Body, + _, res.Err = client.Put(metadataURL(client, id), metadata, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -635,9 +603,7 @@ func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetad // Metadata requests all the metadata for the given server ID. func Metadata(client *gophercloud.ServiceClient, id string) GetMetadataResult { var res GetMetadataResult - _, res.Err = client.Request("GET", metadataURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - }) + _, res.Err = client.Get(metadataURL(client, id), &res.Body, nil) return res } @@ -657,9 +623,8 @@ func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMet res.Err = err return res } - _, res.Err = client.Request("POST", metadataURL(client, id), gophercloud.RequestOpts{ - JSONBody: metadata, - JSONResponse: &res.Body, + _, res.Err = client.Post(metadataURL(client, id), metadata, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -695,9 +660,8 @@ func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts Metadatu return res } - _, res.Err = client.Request("PUT", metadatumURL(client, id, key), gophercloud.RequestOpts{ - JSONBody: metadatum, - JSONResponse: &res.Body, + _, res.Err = client.Put(metadatumURL(client, id, key), metadatum, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -714,8 +678,68 @@ func Metadatum(client *gophercloud.ServiceClient, id, key string) GetMetadatumRe // DeleteMetadatum will delete the key-value pair with the given key for the given server ID. func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) DeleteMetadatumResult { var res DeleteMetadatumResult - _, res.Err = client.Request("DELETE", metadatumURL(client, id, key), gophercloud.RequestOpts{ + _, res.Err = client.Delete(metadatumURL(client, id, key), &gophercloud.RequestOpts{ JSONResponse: &res.Body, }) return res } + +// ListAddresses makes a request against the API to list the servers IP addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return AddressPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, listAddressesURL(client, id), createPageFn) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP addresses +// for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + createPageFn := func(r pagination.PageResult) pagination.Page { + return NetworkAddressPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), createPageFn) +} + +type CreateImageOpts struct { + // Name [required] of the image/snapshot + Name string + // Metadata [optional] contains key-value pairs (up to 255 bytes each) to attach to the created image. + Metadata map[string]string +} + +type CreateImageOptsBuilder interface { + ToServerCreateImageMap() (map[string]interface{}, error) +} + +// ToServerCreateImageMap formats a CreateImageOpts structure into a request body. +func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { + var err error + img := make(map[string]interface{}) + if opts.Name == "" { + return nil, fmt.Errorf("Cannot create a server image without a name") + } + img["name"] = opts.Name + if opts.Metadata != nil { + img["metadata"] = opts.Metadata + } + createImage := make(map[string]interface{}) + createImage["createImage"] = img + return createImage, err +} + +// CreateImage makes a request against the nova API to schedule an image to be created of the server +func CreateImage(client *gophercloud.ServiceClient, serverId string, opts CreateImageOptsBuilder) CreateImageResult { + var res CreateImageResult + reqBody, err := opts.ToServerCreateImageMap() + if err != nil { + res.Err = err + return res + } + response, err := client.Post(actionURL(client, serverId), reqBody, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + res.Err = err + res.Header = response.Header + return res +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go index 253a179191d..1f39fe143bd 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/requests_test.go @@ -277,3 +277,60 @@ func TestUpdateMetadata(t *testing.T) { th.AssertNoErr(t, err) th.AssertDeepEquals(t, expected, actual) } + +func TestListAddresses(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAddressListSuccessfully(t) + + expected := ListAddressesExpected + pages := 0 + err := ListAddresses(client.ServiceClient(), "asdfasdfasdf").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 networks, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestListAddressesByNetwork(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleNetworkAddressListSuccessfully(t) + + expected := ListNetworkAddressesExpected + pages := 0 + err := ListAddressesByNetwork(client.ServiceClient(), "asdfasdfasdf", "public").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractNetworkAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 addresses, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestCreateServerImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateServerImageSuccessfully(t) + + _, err := CreateImage(client.ServiceClient(), "serverimage", CreateImageOpts{Name: "test"}).ExtractImageID() + th.AssertNoErr(t, err) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go index 1b22f219b21..f27870984ae 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/results.go @@ -2,6 +2,9 @@ package servers import ( "reflect" + "fmt" + "path" + "net/url" "github.com/mitchellh/mapstructure" "github.com/rackspace/gophercloud" @@ -74,6 +77,28 @@ type RescueResult struct { ActionResult } +// CreateImageResult represents the result of an image creation operation +type CreateImageResult struct { + gophercloud.Result +} + +// ExtractImageID gets the ID of the newly created server image from the header +func (res CreateImageResult) ExtractImageID() (string, error) { + if res.Err != nil { + return "", res.Err + } + // Get the image id from the header + u, err := url.ParseRequestURI(res.Header.Get("Location")) + if err != nil { + return "", fmt.Errorf("Failed to parse the image id: %s", err.Error()) + } + imageId := path.Base(u.Path) + if imageId == "." || imageId == "/" { + return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) + } + return imageId, nil +} + // Extract interprets any RescueResult as an AdminPass, if possible. func (r RescueResult) Extract() (string, error) { if r.Err != nil { @@ -194,7 +219,6 @@ func ExtractServers(page pagination.Page) ([]Server, error) { err = decoder.Decode(casted) - //err := mapstructure.Decode(casted, &response) return response.Servers, err } @@ -272,3 +296,77 @@ func toMapFromString(from reflect.Kind, to reflect.Kind, data interface{}) (inte } return data, nil } + +// Address represents an IP address. +type Address struct { + Version int `mapstructure:"version"` + Address string `mapstructure:"addr"` +} + +// AddressPage abstracts the raw results of making a ListAddresses() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned +// to the client, you may only safely access the data provided through the ExtractAddresses call. +type AddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an AddressPage contains no networks. +func (r AddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractAddresses(r) + if err != nil { + return true, err + } + return len(addresses) == 0, nil +} + +// ExtractAddresses interprets the results of a single page from a ListAddresses() call, +// producing a map of addresses. +func ExtractAddresses(page pagination.Page) (map[string][]Address, error) { + casted := page.(AddressPage).Body + + var response struct { + Addresses map[string][]Address `mapstructure:"addresses"` + } + + err := mapstructure.Decode(casted, &response) + if err != nil { + return nil, err + } + + return response.Addresses, err +} + +// NetworkAddressPage abstracts the raw results of making a ListAddressesByNetwork() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned +// to the client, you may only safely access the data provided through the ExtractAddresses call. +type NetworkAddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NetworkAddressPage contains no addresses. +func (r NetworkAddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractNetworkAddresses(r) + if err != nil { + return true, err + } + return len(addresses) == 0, nil +} + +// ExtractNetworkAddresses interprets the results of a single page from a ListAddressesByNetwork() call, +// producing a slice of addresses. +func ExtractNetworkAddresses(page pagination.Page) ([]Address, error) { + casted := page.(NetworkAddressPage).Body + + var response map[string][]Address + err := mapstructure.Decode(casted, &response) + if err != nil { + return nil, err + } + + var key string + for k := range response { + key = k + } + + return response[key], err +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go index 4bc6586a507..8998354939a 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/compute/v2/servers/urls.go @@ -37,3 +37,11 @@ func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { func metadataURL(client *gophercloud.ServiceClient, id string) string { return client.ServiceURL("servers", id, "metadata") } + +func listAddressesURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "ips") +} + +func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { + return client.ServiceURL("servers", id, "ips", network) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go index 5a311e40855..29d02c43f92 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location.go @@ -5,9 +5,7 @@ import ( "github.com/rackspace/gophercloud" tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" - endpoints3 "github.com/rackspace/gophercloud/openstack/identity/v3/endpoints" - services3 "github.com/rackspace/gophercloud/openstack/identity/v3/services" - "github.com/rackspace/gophercloud/pagination" + tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" ) // V2EndpointURL discovers the endpoint URL for a specific service from a ServiceCatalog acquired @@ -52,73 +50,42 @@ func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpt return "", gophercloud.ErrEndpointNotFound } -// V3EndpointURL discovers the endpoint URL for a specific service using multiple calls against -// an identity v3 service endpoint. The specified EndpointOpts are used to identify a unique, +// V3EndpointURL discovers the endpoint URL for a specific service from a Catalog acquired +// during the v3 identity service. The specified EndpointOpts are used to identify a unique, // unambiguous endpoint to return. It's an error both when multiple endpoints match the provided // criteria and when none do. The minimum that can be specified is a Type, but you will also often // need to specify a Name and/or a Region depending on what's available on your OpenStack // deployment. -func V3EndpointURL(v3Client *gophercloud.ServiceClient, opts gophercloud.EndpointOpts) (string, error) { - // Discover the service we're interested in. - var services = make([]services3.Service, 0, 1) - servicePager := services3.List(v3Client, services3.ListOpts{ServiceType: opts.Type}) - err := servicePager.EachPage(func(page pagination.Page) (bool, error) { - part, err := services3.ExtractServices(page) - if err != nil { - return false, err - } - - for _, service := range part { - if service.Name == opts.Name { - services = append(services, service) +func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + var endpoints = make([]tokens3.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + return "", fmt.Errorf("Unexpected availability in endpoint query: %s", opts.Availability) + } + if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && + (opts.Region == "" || endpoint.Region == opts.Region) { + endpoints = append(endpoints, endpoint) + } } } - - return true, nil - }) - if err != nil { - return "", err } - if len(services) == 0 { - return "", gophercloud.ErrServiceNotFound - } - if len(services) > 1 { - return "", fmt.Errorf("Discovered %d matching services: %#v", len(services), services) - } - service := services[0] - - // Enumerate the endpoints available for this service. - var endpoints []endpoints3.Endpoint - endpointPager := endpoints3.List(v3Client, endpoints3.ListOpts{ - Availability: opts.Availability, - ServiceID: service.ID, - }) - err = endpointPager.EachPage(func(page pagination.Page) (bool, error) { - part, err := endpoints3.ExtractEndpoints(page) - if err != nil { - return false, err - } - - for _, endpoint := range part { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) - } - } - - return true, nil - }) - if err != nil { - return "", err - } - - if len(endpoints) == 0 { - return "", gophercloud.ErrEndpointNotFound - } + // Report an error if the options were ambiguous. if len(endpoints) > 1 { return "", fmt.Errorf("Discovered %d matching endpoints: %#v", len(endpoints), endpoints) } - endpoint := endpoints[0] - return gophercloud.NormalizeURL(endpoint.URL), nil + // Extract the URL from the matching Endpoint. + for _, endpoint := range endpoints { + return gophercloud.NormalizeURL(endpoint.URL), nil + } + + // Report an error if there were no matching endpoints. + return "", gophercloud.ErrEndpointNotFound } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go index 4e0569ac1f8..8e65918abfe 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/endpoint_location_test.go @@ -1,15 +1,13 @@ package openstack import ( - "fmt" - "net/http" "strings" "testing" "github.com/rackspace/gophercloud" tokens2 "github.com/rackspace/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/rackspace/gophercloud/openstack/identity/v3/tokens" th "github.com/rackspace/gophercloud/testhelper" - fake "github.com/rackspace/gophercloud/testhelper/client" ) // Service catalog fixtures take too much vertical space! @@ -107,119 +105,124 @@ func TestV2EndpointBadAvailability(t *testing.T) { Region: "same", Availability: "wat", }) - th.CheckEquals(t, err.Error(), "Unexpected availability in endpoint query: wat") + th.CheckEquals(t, "Unexpected availability in endpoint query: wat", err.Error()) } -func setupV3Responses(t *testing.T) { - // Mock the service query. - th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) - - w.Header().Add("Content-Type", "application/json") - fmt.Fprintf(w, ` - { - "links": { - "next": null, - "previous": null +var catalog3 = tokens3.ServiceCatalog{ + Entries: []tokens3.CatalogEntry{ + tokens3.CatalogEntry{ + Type: "same", + Name: "same", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "1", + Region: "same", + Interface: "public", + URL: "https://public.correct.com/", }, - "services": [ - { - "description": "Correct", - "id": "1234", - "name": "same", - "type": "same" - }, - { - "description": "Bad Name", - "id": "9876", - "name": "different", - "type": "same" - } - ] - } - `) - }) - - // Mock the endpoint query. - th.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) - th.TestFormValues(t, r, map[string]string{ - "service_id": "1234", - "interface": "public", - }) - - w.Header().Add("Content-Type", "application/json") - fmt.Fprintf(w, ` - { - "endpoints": [ - { - "id": "12", - "interface": "public", - "name": "the-right-one", - "region": "same", - "service_id": "1234", - "url": "https://correct:9000/" - }, - { - "id": "14", - "interface": "public", - "name": "bad-region", - "region": "different", - "service_id": "1234", - "url": "https://bad-region:9001/" - } - ], - "links": { - "next": null, - "previous": null - } - } - `) - }) + tokens3.Endpoint{ + ID: "2", + Region: "same", + Interface: "admin", + URL: "https://admin.correct.com/", + }, + tokens3.Endpoint{ + ID: "3", + Region: "same", + Interface: "internal", + URL: "https://internal.correct.com/", + }, + tokens3.Endpoint{ + ID: "4", + Region: "different", + Interface: "public", + URL: "https://badregion.com/", + }, + }, + }, + tokens3.CatalogEntry{ + Type: "same", + Name: "different", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "5", + Region: "same", + Interface: "public", + URL: "https://badname.com/", + }, + tokens3.Endpoint{ + ID: "6", + Region: "different", + Interface: "public", + URL: "https://badname.com/+badregion", + }, + }, + }, + tokens3.CatalogEntry{ + Type: "different", + Name: "different", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "7", + Region: "same", + Interface: "public", + URL: "https://badtype.com/+badname", + }, + tokens3.Endpoint{ + ID: "8", + Region: "different", + Interface: "public", + URL: "https://badtype.com/+badregion+badname", + }, + }, + }, + }, } func TestV3EndpointExact(t *testing.T) { - th.SetupHTTP() - defer th.TeardownHTTP() - setupV3Responses(t) + expectedURLs := map[gophercloud.Availability]string{ + gophercloud.AvailabilityPublic: "https://public.correct.com/", + gophercloud.AvailabilityAdmin: "https://admin.correct.com/", + gophercloud.AvailabilityInternal: "https://internal.correct.com/", + } - actual, err := V3EndpointURL(fake.ServiceClient(), gophercloud.EndpointOpts{ + for availability, expected := range expectedURLs { + actual, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: availability, + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) + } +} + +func TestV3EndpointNone(t *testing.T) { + _, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "nope", + Availability: gophercloud.AvailabilityPublic, + }) + th.CheckEquals(t, gophercloud.ErrEndpointNotFound, err) +} + +func TestV3EndpointMultiple(t *testing.T) { + _, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Region: "same", + Availability: gophercloud.AvailabilityPublic, + }) + if !strings.HasPrefix(err.Error(), "Discovered 2 matching endpoints:") { + t.Errorf("Received unexpected error: %v", err) + } +} + +func TestV3EndpointBadAvailability(t *testing.T) { + _, err := V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ Type: "same", Name: "same", Region: "same", - Availability: gophercloud.AvailabilityPublic, + Availability: "wat", }) - th.AssertNoErr(t, err) - th.CheckEquals(t, actual, "https://correct:9000/") -} - -func TestV3EndpointNoService(t *testing.T) { - th.SetupHTTP() - defer th.TeardownHTTP() - - th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { - th.TestMethod(t, r, "GET") - th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) - - w.Header().Add("Content-Type", "application/json") - fmt.Fprintf(w, ` - { - "links": { - "next": null, - "previous": null - }, - "services": [] - } - `) - }) - - _, err := V3EndpointURL(fake.ServiceClient(), gophercloud.EndpointOpts{ - Type: "nope", - Name: "same", - Region: "same", - Availability: gophercloud.AvailabilityPublic, - }) - th.CheckEquals(t, gophercloud.ErrServiceNotFound, err) + th.CheckEquals(t, "Unexpected availability in endpoint query: wat", err.Error()) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go index bbdf76a46ce..9a333140b2b 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go @@ -19,11 +19,7 @@ func List(client *gophercloud.ServiceClient) pagination.Pager { // ID is a required argument. func AddUserRole(client *gophercloud.ServiceClient, tenantID, userID, roleID string) UserRoleResult { var result UserRoleResult - - _, result.Err = client.Request("PUT", userRoleURL(client, tenantID, userID, roleID), gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - + _, result.Err = client.Put(userRoleURL(client, tenantID, userID, roleID), nil, nil, nil) return result } @@ -32,10 +28,6 @@ func AddUserRole(client *gophercloud.ServiceClient, tenantID, userID, roleID str // tenant ID is a required argument. func DeleteUserRole(client *gophercloud.ServiceClient, tenantID, userID, roleID string) UserRoleResult { var result UserRoleResult - - _, result.Err = client.Request("DELETE", userRoleURL(client, tenantID, userID, roleID), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) - + _, result.Err = client.Delete(userRoleURL(client, tenantID, userID, roleID), nil) return result } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go index db1ac8284fb..efa054fb399 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/tokens/requests.go @@ -75,10 +75,8 @@ func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) CreateRe } var result CreateResult - _, result.Err = client.Request("POST", CreateURL(client), gophercloud.RequestOpts{ - JSONBody: &request, - JSONResponse: &result.Body, - OkCodes: []int{200, 203}, + _, result.Err = client.Post(CreateURL(client), request, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, }) return result } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go index 2afe62a6134..88be45ecc01 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/users/requests.go @@ -90,10 +90,8 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes return res } - _, res.Err = client.Request("POST", rootURL(client), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - JSONBody: reqBody, - OkCodes: []int{200, 201}, + _, res.Err = client.Post(rootURL(client), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, }) return res @@ -102,12 +100,7 @@ func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateRes // Get requests details on a single user, either by ID. func Get(client *gophercloud.ServiceClient, id string) GetResult { var result GetResult - - _, result.Err = client.Request("GET", ResourceURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - OkCodes: []int{200}, - }) - + _, result.Err = client.Get(ResourceURL(client, id), &result.Body, nil) return result } @@ -145,24 +138,17 @@ func (opts UpdateOpts) ToUserUpdateMap() map[string]interface{} { // Update is the operation responsible for updating exist users by their UUID. func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) UpdateResult { var result UpdateResult - - _, result.Err = client.Request("PUT", ResourceURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - JSONBody: opts.ToUserUpdateMap(), - OkCodes: []int{200}, + reqBody := opts.ToUserUpdateMap() + _, result.Err = client.Put(ResourceURL(client, id), reqBody, &result.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) - return result } // Delete is the operation responsible for permanently deleting an API user. func Delete(client *gophercloud.ServiceClient, id string) DeleteResult { var result DeleteResult - - _, result.Err = client.Request("DELETE", ResourceURL(client, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) - + _, result.Err = client.Delete(ResourceURL(client, id), nil) return result } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go index 3e09b2aef0d..99a495d594e 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/endpoints/requests.go @@ -56,11 +56,7 @@ func Create(client *gophercloud.ServiceClient, opts EndpointOpts) CreateResult { reqBody.Endpoint.Region = gophercloud.MaybeString(opts.Region) var result CreateResult - _, result.Err = client.Request("POST", listURL(client), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &result.Body, - OkCodes: []int{201}, - }) + _, result.Err = client.Post(listURL(client), reqBody, &result.Body, nil) return result } @@ -122,8 +118,6 @@ func Update(client *gophercloud.ServiceClient, endpointID string, opts EndpointO // Delete removes an endpoint from the service catalog. func Delete(client *gophercloud.ServiceClient, endpointID string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", endpointURL(client, endpointID), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = client.Delete(endpointURL(client, endpointID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go index c6820c700b2..3ee924f3ee6 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/services/requests.go @@ -18,11 +18,7 @@ func Create(client *gophercloud.ServiceClient, serviceType string) CreateResult req := request{Type: serviceType} var result CreateResult - _, result.Err = client.Request("POST", listURL(client), gophercloud.RequestOpts{ - JSONBody: &req, - JSONResponse: &result.Body, - OkCodes: []int{201}, - }) + _, result.Err = client.Post(listURL(client), req, &result.Body, nil) return result } @@ -51,10 +47,7 @@ func List(client *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { // Get returns additional information about a service, given its ID. func Get(client *gophercloud.ServiceClient, serviceID string) GetResult { var result GetResult - _, result.Err = client.Request("GET", serviceURL(client, serviceID), gophercloud.RequestOpts{ - JSONResponse: &result.Body, - OkCodes: []int{200}, - }) + _, result.Err = client.Get(serviceURL(client, serviceID), &result.Body, nil) return result } @@ -79,8 +72,6 @@ func Update(client *gophercloud.ServiceClient, serviceID string, serviceType str // It either deletes all associated endpoints, or fails until all endpoints are deleted. func Delete(client *gophercloud.ServiceClient, serviceID string) DeleteResult { var res DeleteResult - _, res.Err = client.Request("DELETE", serviceURL(client, serviceID), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = client.Delete(serviceURL(client, serviceID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go index bbd3c56231b..d449ca36e89 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/requests.go @@ -235,11 +235,7 @@ func Create(c *gophercloud.ServiceClient, options gophercloud.AuthOptions, scope var result CreateResult var response *http.Response - response, result.Err = c.Request("POST", tokenURL(c), gophercloud.RequestOpts{ - JSONBody: &req, - JSONResponse: &result.Body, - OkCodes: []int{201}, - }) + response, result.Err = c.Post(tokenURL(c), req, &result.Body, nil) if result.Err != nil { return result } @@ -251,10 +247,9 @@ func Create(c *gophercloud.ServiceClient, options gophercloud.AuthOptions, scope func Get(c *gophercloud.ServiceClient, token string) GetResult { var result GetResult var response *http.Response - response, result.Err = c.Request("GET", tokenURL(c), gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(c, token), - JSONResponse: &result.Body, - OkCodes: []int{200, 203}, + response, result.Err = c.Get(tokenURL(c), &result.Body, &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 203}, }) if result.Err != nil { return result @@ -279,9 +274,8 @@ func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { // Revoke immediately makes specified token invalid. func Revoke(c *gophercloud.ServiceClient, token string) RevokeResult { var res RevokeResult - _, res.Err = c.Request("DELETE", tokenURL(c), gophercloud.RequestOpts{ + _, res.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ MoreHeaders: subjectTokenHeaders(c, token), - OkCodes: []int{204}, }) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go index d1fff4c2a5f..d134f7d4d07 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v3/tokens/results.go @@ -7,13 +7,58 @@ import ( "github.com/rackspace/gophercloud" ) +// Endpoint represents a single API endpoint offered by a service. +// It matches either a public, internal or admin URL. +// If supported, it contains a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +type Endpoint struct { + ID string `mapstructure:"id"` + Region string `mapstructure:"region"` + Interface string `mapstructure:"interface"` + URL string `mapstructure:"url"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V3 service catalog listing. +// Each class of service, such as cloud DNS or block storage services, could have multiple +// CatalogEntry representing it (one by interface type, e.g public, admin or internal). +// +// Note: when looking for the desired service, try, whenever possible, to key off the type field. +// Otherwise, you'll tie the representation of the service to a specific provider. +type CatalogEntry struct { + + // Service ID + ID string `mapstructure:"id"` + + // Name will contain the provider-specified name for the service. + Name string `mapstructure:"name"` + + // Type will contain a type string if OpenStack defines a type for the service. + // Otherwise, for provider-specific services, the provider may assign their own type strings. + Type string `mapstructure:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that may exist for + // the service. + Endpoints []Endpoint `mapstructure:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + // commonResult is the deferred result of a Create or a Get call. type commonResult struct { gophercloud.Result } -// Extract interprets a commonResult as a Token. +// Extract is a shortcut for ExtractToken. +// This function is deprecated and still present for backward compatibility. func (r commonResult) Extract() (*Token, error) { + return r.ExtractToken() +} + +// ExtractToken interprets a commonResult as a Token. +func (r commonResult) ExtractToken() (*Token, error) { if r.Err != nil { return nil, r.Err } @@ -40,7 +85,28 @@ func (r commonResult) Extract() (*Token, error) { return &token, err } -// CreateResult is the deferred response from a Create call. +// ExtractServiceCatalog returns the ServiceCatalog that was generated along with the user's Token. +func (result CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + if result.Err != nil { + return nil, result.Err + } + + var response struct { + Token struct { + Entries []CatalogEntry `mapstructure:"catalog"` + } `mapstructure:"token"` + } + + err := mapstructure.Decode(result.Body, &response) + if err != nil { + return nil, err + } + + return &ServiceCatalog{Entries: response.Token.Entries}, nil +} + +// CreateResult defers the interpretation of a created token. +// Use ExtractToken() to interpret it as a Token, or ExtractServiceCatalog() to interpret it as a service catalog. type CreateResult struct { commonResult } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go index 2f04593db95..097ae37f243 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/requests.go @@ -1,6 +1,10 @@ package external -import "github.com/rackspace/gophercloud/openstack/networking/v2/networks" +import ( + "time" + + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" +) // AdminState gives users a solid type to work with for create and update // operations. It is recommended that users use the `Up` and `Down` enums. @@ -25,6 +29,15 @@ type CreateOpts struct { // ToNetworkCreateMap casts a CreateOpts struct to a map. func (o CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + + // DO NOT REMOVE. Though this line seemingly does nothing of value, it is a + // splint to prevent the unit test from failing on Go Tip. We suspect it is a + // compiler issue that will hopefully be worked out prior to our next release. + // Again, for all the unit tests to pass, this line is necessary and sufficient + // at the moment. We should reassess after the Go 1.5 release to determine + // if this line is still needed. + time.Sleep(0 * time.Millisecond) + outer, err := o.Parent.ToNetworkCreateMap() if err != nil { return nil, err diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go index 1c173c07a34..54dbf4bb69e 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/external/results.go @@ -68,7 +68,7 @@ func ExtractUpdate(r networks.UpdateResult) (*NetworkExternal, error) { } // ExtractList accepts a Page struct, specifically a NetworkPage struct, and -// extracts the elements into a slice of NetworkExtAttrs structs. In other +// extracts the elements into a slice of NetworkExternal structs. In other // words, a generic collection is mapped into a relevant slice. func ExtractList(page pagination.Page) ([]NetworkExternal, error) { var resp struct { diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go index 69f3dcad134..12d587f389b 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go @@ -139,21 +139,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular firewall based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -209,10 +202,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd } // Send request to API - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -220,8 +211,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd // Delete will permanently delete a particular firewall based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go index 95081dfa031..fe07d9abb14 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go @@ -128,21 +128,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular firewall policy based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -198,10 +191,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd } // Send request to API - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -209,9 +200,7 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd // Delete will permanently delete a particular firewall policy based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } @@ -230,10 +219,8 @@ func InsertRule(c *gophercloud.ServiceClient, policyID, ruleID, beforeID, afterI // Send request to API var res commonResult - _, res.Err = c.Request("PUT", insertURL(c, policyID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(insertURL(c, policyID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res.Err } @@ -249,10 +236,8 @@ func RemoveRule(c *gophercloud.ServiceClient, policyID, ruleID string) error { // Send request to API var res commonResult - _, res.Err = c.Request("PUT", removeURL(c, policyID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(removeURL(c, policyID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res.Err } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go index 37801068bc6..57a0e8baffc 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go @@ -163,21 +163,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular firewall rule based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -277,10 +270,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd } // Send request to API - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res @@ -289,8 +280,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd // Delete will permanently delete a particular firewall rule based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go index 46f2b22cb16..49d6f0b7a51 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go @@ -107,23 +107,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { TenantID: opts.TenantID, }} - // Send request to API - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) - + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular floating IP resource based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -159,10 +150,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Send request to API var res UpdateResult - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res @@ -173,8 +162,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // internal ports. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go index 12640dee1e5..077a71755a8 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -81,21 +81,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { } var res CreateResult - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular router based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -133,10 +126,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Send request to API var res UpdateResult - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res @@ -145,9 +136,7 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Delete will permanently delete a particular router based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } @@ -197,10 +186,8 @@ func AddInterface(c *gophercloud.ServiceClient, id string, opts InterfaceOpts) I body := request{SubnetID: opts.SubnetID, PortID: opts.PortID} - _, res.Err = c.Request("PUT", addInterfaceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &body, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(addInterfaceURL(c, id), body, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res @@ -229,10 +216,8 @@ func RemoveInterface(c *gophercloud.ServiceClient, id string, opts InterfaceOpts body := request{SubnetID: opts.SubnetID, PortID: opts.PortID} - _, res.Err = c.Request("PUT", removeInterfaceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &body, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(removeInterfaceURL(c, id), body, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go index 023a04dc04a..848938f9837 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go @@ -79,21 +79,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { }} var res CreateResult - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular pool member based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -116,10 +109,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Send request to API var res UpdateResult - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, }) return res } @@ -127,8 +118,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Delete will permanently delete a particular member based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go index de6f68862fc..71b21ef16ea 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go @@ -176,22 +176,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { AdminStateUp: opts.AdminStateUp, }} - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) - + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular health monitor based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -258,10 +250,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu AdminStateUp: opts.AdminStateUp, }} - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 202}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, }) return res @@ -270,8 +260,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Delete will permanently delete a particular monitor based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go index e7e6d944d05..2bb0acc447f 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go @@ -99,21 +99,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { }} var res CreateResult - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular pool based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -145,10 +138,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Send request to API var res UpdateResult - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -156,9 +147,7 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Delete will permanently delete a particular pool based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } @@ -178,11 +167,7 @@ func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) As reqBody := request{hm{ID: monitorID}} var res AssociateResult - _, res.Err = c.Request("POST", associateURL(c, poolID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(associateURL(c, poolID), reqBody, &res.Body, nil) return res } @@ -191,8 +176,6 @@ func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) As // check for the health of the members of the pool. func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) AssociateResult { var res AssociateResult - _, res.Err = c.Request("DELETE", disassociateURL(c, poolID, monitorID), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(disassociateURL(c, poolID, monitorID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go index 5b0bfd9a0bf..6216f873e31 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go @@ -178,22 +178,14 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { reqBody.VirtualIP.Persistence = opts.Persistence } - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) - + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular virtual IP based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } @@ -249,10 +241,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu } var res UpdateResult - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 202}, + _, res.Err = c.Put(resourceURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, }) return res @@ -261,8 +251,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) UpdateResu // Delete will permanently delete a particular virtual IP based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go index 34535845876..f07d6285dba 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results.go @@ -73,7 +73,7 @@ func ExtractGet(r networks.GetResult) (*NetworkExtAttrs, error) { Network *NetworkExtAttrs `json:"network"` } - err := mapstructure.Decode(r.Body, &res) + err := mapstructure.WeakDecode(r.Body, &res) return res.Network, err } @@ -89,7 +89,7 @@ func ExtractCreate(r networks.CreateResult) (*NetworkExtAttrs, error) { Network *NetworkExtAttrs `json:"network"` } - err := mapstructure.Decode(r.Body, &res) + err := mapstructure.WeakDecode(r.Body, &res) return res.Network, err } @@ -105,7 +105,7 @@ func ExtractUpdate(r networks.UpdateResult) (*NetworkExtAttrs, error) { Network *NetworkExtAttrs `json:"network"` } - err := mapstructure.Decode(r.Body, &res) + err := mapstructure.WeakDecode(r.Body, &res) return res.Network, err } @@ -118,7 +118,7 @@ func ExtractList(page pagination.Page) ([]NetworkExtAttrs, error) { Networks []NetworkExtAttrs `mapstructure:"networks" json:"networks"` } - err := mapstructure.Decode(page.(networks.NetworkPage).Body, &resp) + err := mapstructure.WeakDecode(page.(networks.NetworkPage).Body, &resp) return resp.Networks, err } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go index 9801b2e5e3a..80816926da6 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/provider/results_test.go @@ -49,7 +49,7 @@ func TestList(t *testing.T) { "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", "shared": true, "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", - "provider:segmentation_id": null, + "provider:segmentation_id": 1234567890, "provider:physical_network": null, "provider:network_type": "local" } @@ -91,7 +91,7 @@ func TestList(t *testing.T) { ID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", NetworkType: "local", PhysicalNetwork: "", - SegmentationID: "", + SegmentationID: "1234567890", }, } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go index 8ef455ffb39..31f744ccd7a 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/doc.go @@ -15,13 +15,13 @@ // // For ingress traffic (to an instance) // - Only traffic matched with security group rules are allowed. -// - When there is no rule defined, all traffic are dropped. +// - When there is no rule defined, all traffic is dropped. // // For egress traffic (from an instance) // - Only traffic matched with security group rules are allowed. // - When there is no rule defined, all egress traffic are dropped. // - When a new security group is created, rules to allow all egress traffic -// are automatically added. +// is automatically added. // // "default security group" is defined for each tenant. // - For the default security group a rule which allows intercommunication diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go index c07508bd7bc..55e4b3b8043 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go @@ -74,30 +74,20 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { Description: opts.Description, }} - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) - + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular security group based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } // Delete will permanently delete a particular security group based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go index 108acf670dc..0b2d10b0efe 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go @@ -150,30 +150,20 @@ func Create(c *gophercloud.ServiceClient, opts CreateOpts) CreateResult { RemoteIPPrefix: opts.RemoteIPPrefix, }} - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) - + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } // Get retrieves a particular security group based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(resourceURL(c, id), &res.Body, nil) return res } // Delete will permanently delete a particular security group based on its unique ID. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go index b0db67e7d0f..7be32274006 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/networks/requests.go @@ -79,10 +79,7 @@ func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { // Get retrieves a specific network based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) return res } @@ -134,12 +131,7 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - // Send request to API - _, res.Err = c.Request("POST", createURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) return res } @@ -184,10 +176,8 @@ func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuild } // Send request to API - _, res.Err = c.Request("PUT", updateURL(c, networkID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 201}, + _, res.Err = c.Put(updateURL(c, networkID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, }) return res @@ -196,8 +186,6 @@ func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuild // Delete accepts a unique ID and deletes the network associated with it. func Delete(c *gophercloud.ServiceClient, networkID string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", deleteURL(c, networkID), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(deleteURL(c, networkID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go index 01d550fc1af..781a3c3e745 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/ports/requests.go @@ -79,10 +79,7 @@ func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { // Get retrieves a specific port based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) return res } @@ -155,13 +152,7 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - // Response - _, res.Err = c.Request("POST", createURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) - + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) return res } @@ -220,10 +211,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd return res } - _, res.Err = c.Request("PUT", updateURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 201}, + _, res.Err = c.Put(updateURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, }) return res } @@ -231,8 +220,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd // Delete accepts a unique ID and deletes the port associated with it. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", deleteURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(deleteURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go index 63ac2901b7b..6e01f059d75 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/subnets/requests.go @@ -78,10 +78,7 @@ func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { // Get retrieves a specific subnet based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) return res } @@ -171,12 +168,7 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - _, res.Err = c.Request("POST", createURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) - + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) return res } @@ -229,10 +221,8 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd return res } - _, res.Err = c.Request("PUT", updateURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 201}, + _, res.Err = c.Put(updateURL(c, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, }) return res @@ -241,8 +231,6 @@ func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) Upd // Delete accepts a unique ID and deletes the subnet associated with it. func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", deleteURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(deleteURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go index 3e404c3b6bf..a6451157050 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/accounts/requests.go @@ -95,7 +95,7 @@ func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) UpdateResult { resp, err := c.Request("POST", updateURL(c), gophercloud.RequestOpts{ MoreHeaders: h, - OkCodes: []int{204}, + OkCodes: []int{201, 202, 204}, }) res.Header = resp.Header res.Err = err diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go index a29d7da5d68..bbf8cdb952c 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers/requests.go @@ -122,9 +122,7 @@ func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsB // Delete is a function that deletes a container. func Delete(c *gophercloud.ServiceClient, containerName string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", deleteURL(c, containerName), gophercloud.RequestOpts{ - OkCodes: []int{202, 204}, - }) + _, res.Err = c.Delete(deleteURL(c, containerName), nil) return res } @@ -180,7 +178,7 @@ func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsB resp, err := c.Request("POST", updateURL(c, containerName), gophercloud.RequestOpts{ MoreHeaders: h, - OkCodes: []int{202, 204}, + OkCodes: []int{201, 202, 204}, }) res.Header = resp.Header res.Err = err diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go index 30ea94cc2d7..7eedde25509 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/objectstorage/v1/objects/requests.go @@ -211,7 +211,6 @@ func Create(c *gophercloud.ServiceClient, containerName, objectName string, cont ropts := gophercloud.RequestOpts{ RawBody: content, MoreHeaders: h, - OkCodes: []int{201, 202}, } resp, err := c.Request("PUT", url, ropts) @@ -310,9 +309,7 @@ func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts url += query } - resp, err := c.Request("DELETE", url, gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + resp, err := c.Delete(url, nil) res.Header = resp.Header res.Err = err return res @@ -412,7 +409,6 @@ func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts url := updateURL(c, containerName, objectName) resp, err := c.Request("POST", url, gophercloud.RequestOpts{ MoreHeaders: h, - OkCodes: []int{202}, }) res.Header = resp.Header res.Err = err diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go index 379f34f30ba..9e03e5cc85b 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/buildinfo/requests.go @@ -5,9 +5,6 @@ import "github.com/rackspace/gophercloud" // Get retreives data for the given stack template. func Get(c *gophercloud.ServiceClient) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c), &res.Body, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go index 37eab1e763e..53c39160206 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/requests.go @@ -11,7 +11,6 @@ func Find(c *gophercloud.ServiceClient, stackName string) FindResult { _, res.Err = c.Request("GET", findURL(c, stackName), gophercloud.RequestOpts{ JSONResponse: &res.Body, - OkCodes: []int{200}, }) return res } @@ -197,9 +196,8 @@ func ListResourceEvents(client *gophercloud.ServiceClient, stackName, stackID, r // Get retreives data for the given stack resource. func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c, stackName, stackID, resourceName, eventID), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Get(getURL(c, stackName, stackID, resourceName, eventID), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go index bf233ae2ad4..3c8f1da4910 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackevents/results.go @@ -1,6 +1,8 @@ package stackevents import ( + "fmt" + "reflect" "time" "github.com/mitchellh/mapstructure" @@ -106,7 +108,15 @@ func ExtractEvents(page pagination.Page) ([]Event, error) { return nil, err } - events := casted.(map[string]interface{})["events"].([]interface{}) + var events []interface{} + switch casted.(type) { + case map[string]interface{}: + events = casted.(map[string]interface{})["events"].([]interface{}) + case map[string][]interface{}: + events = casted.(map[string][]interface{})["events"] + default: + return res.Res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } for i, eventRaw := range events { event := eventRaw.(map[string]interface{}) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go index 2a66edc8c25..ee9c3c250cc 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/requests.go @@ -12,7 +12,6 @@ func Find(c *gophercloud.ServiceClient, stackName string) FindResult { // Send request to API _, res.Err = c.Request("GET", findURL(c, stackName), gophercloud.RequestOpts{ JSONResponse: &res.Body, - OkCodes: []int{200}, }) return res } @@ -71,9 +70,8 @@ func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) var res GetResult // Send request to API - _, res.Err = c.Request("GET", getURL(c, stackName, stackID, resourceName), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Get(getURL(c, stackName, stackID, resourceName), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -83,9 +81,8 @@ func Metadata(c *gophercloud.ServiceClient, stackName, stackID, resourceName str var res MetadataResult // Send request to API - _, res.Err = c.Request("GET", metadataURL(c, stackName, stackID, resourceName), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Get(metadataURL(c, stackName, stackID, resourceName), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -106,9 +103,8 @@ func Schema(c *gophercloud.ServiceClient, resourceType string) SchemaResult { var res SchemaResult // Send request to API - _, res.Err = c.Request("GET", schemaURL(c, resourceType), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Get(schemaURL(c, resourceType), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -118,9 +114,8 @@ func Template(c *gophercloud.ServiceClient, resourceType string) TemplateResult var res TemplateResult // Send request to API - _, res.Err = c.Request("GET", templateURL(c, resourceType), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Get(templateURL(c, resourceType), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go index 13f5dd21fbe..69f21daef39 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stackresources/results.go @@ -1,6 +1,8 @@ package stackresources import ( + "fmt" + "reflect" "time" "github.com/mitchellh/mapstructure" @@ -94,7 +96,15 @@ func ExtractResources(page pagination.Page) ([]Resource, error) { } err := mapstructure.Decode(casted, &response) - resources := casted.(map[string]interface{})["resources"].([]interface{}) + var resources []interface{} + switch casted.(type) { + case map[string]interface{}: + resources = casted.(map[string]interface{})["resources"].([]interface{}) + case map[string][]interface{}: + resources = casted.(map[string][]interface{})["resources"] + default: + return response.Resources, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } for i, resourceRaw := range resources { resource := resourceRaw.(map[string]interface{}) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go index c0388c366d1..0dd6af2cfa8 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/requests.go @@ -111,12 +111,7 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - // Send request to API - _, res.Err = c.Request("POST", createURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) return res } @@ -221,12 +216,7 @@ func Adopt(c *gophercloud.ServiceClient, opts AdoptOptsBuilder) AdoptResult { return res } - // Send request to API - _, res.Err = c.Request("POST", adoptURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{201}, - }) + _, res.Err = c.Post(adoptURL(c), reqBody, &res.Body, nil) return res } @@ -302,12 +292,7 @@ func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { // Get retreives a stack based on the stack name and stack ID. func Get(c *gophercloud.ServiceClient, stackName, stackID string) GetResult { var res GetResult - - // Send request to API - _, res.Err = c.Request("GET", getURL(c, stackName, stackID), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c, stackName, stackID), &res.Body, nil) return res } @@ -388,22 +373,14 @@ func Update(c *gophercloud.ServiceClient, stackName, stackID string, opts Update return res } - // Send request to API - _, res.Err = c.Request("PUT", updateURL(c, stackName, stackID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) + _, res.Err = c.Put(updateURL(c, stackName, stackID), reqBody, nil, nil) return res } // Delete deletes a stack based on the stack name and stack ID. func Delete(c *gophercloud.ServiceClient, stackName, stackID string) DeleteResult { var res DeleteResult - - // Send request to API - _, res.Err = c.Request("DELETE", deleteURL(c, stackName, stackID), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(deleteURL(c, stackName, stackID), nil) return res } @@ -498,10 +475,8 @@ func Preview(c *gophercloud.ServiceClient, opts PreviewOptsBuilder) PreviewResul } // Send request to API - _, res.Err = c.Request("POST", previewURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Post(previewURL(c), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } @@ -510,9 +485,7 @@ func Preview(c *gophercloud.ServiceClient, opts PreviewOptsBuilder) PreviewResul // resources intact, and returns data describing the stack and its resources. func Abandon(c *gophercloud.ServiceClient, stackName, stackID string) AbandonResult { var res AbandonResult - - // Send request to API - _, res.Err = c.Request("DELETE", abandonURL(c, stackName, stackID), gophercloud.RequestOpts{ + _, res.Err = c.Delete(abandonURL(c, stackName, stackID), &gophercloud.RequestOpts{ JSONResponse: &res.Body, OkCodes: []int{200}, }) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go index ff971e8b8bd..04d3f8ea964 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacks/results.go @@ -2,6 +2,8 @@ package stacks import ( "encoding/json" + "fmt" + "reflect" "time" "github.com/mitchellh/mapstructure" @@ -73,6 +75,8 @@ type ListedStack struct { // ExtractStacks extracts and returns a slice of ListedStack. It is used while iterating // over a stacks.List call. func ExtractStacks(page pagination.Page) ([]ListedStack, error) { + casted := page.(StackPage).Body + var res struct { Stacks []ListedStack `mapstructure:"stacks"` } @@ -82,7 +86,16 @@ func ExtractStacks(page pagination.Page) ([]ListedStack, error) { return nil, err } - rawStacks := (((page.(StackPage).Body).(map[string]interface{}))["stacks"]).([]interface{}) + var rawStacks []interface{} + switch casted.(type) { + case map[string]interface{}: + rawStacks = casted.(map[string]interface{})["stacks"].([]interface{}) + case map[string][]interface{}: + rawStacks = casted.(map[string][]interface{})["stacks"] + default: + return res.Stacks, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + for i := range rawStacks { thisStack := (rawStacks[i]).(map[string]interface{}) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go index f57e226efbf..ad1e468d199 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go @@ -11,7 +11,6 @@ func Get(c *gophercloud.ServiceClient, stackName, stackID string) GetResult { var res GetResult _, res.Err = c.Request("GET", getURL(c, stackName, stackID), gophercloud.RequestOpts{ JSONResponse: &res.Body, - OkCodes: []int{200}, }) return res } @@ -52,10 +51,8 @@ func Validate(c *gophercloud.ServiceClient, opts ValidateOptsBuilder) ValidateRe return res } - _, res.Err = c.Request("POST", validateURL(c), gophercloud.RequestOpts{ - JSONBody: reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Post(validateURL(c), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go index f342a5e54e0..0dff2cfc303 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client.go @@ -11,7 +11,7 @@ import ( ) // DefaultUserAgent is the default User-Agent string set in the request header. -const DefaultUserAgent = "gophercloud/v1.0" +const DefaultUserAgent = "gophercloud/1.0.0" // UserAgent represents a User-Agent header. type UserAgent struct { @@ -196,25 +196,28 @@ func (client *ProviderClient) Request(method, url string, options RequestOpts) ( } } - // Validate the response code, if requested to do so. - if options.OkCodes != nil { - var ok bool - for _, code := range options.OkCodes { - if resp.StatusCode == code { - ok = true - break - } + // Allow default OkCodes if none explicitly set + if options.OkCodes == nil { + options.OkCodes = defaultOkCodes(method) + } + + // Validate the HTTP response status. + var ok bool + for _, code := range options.OkCodes { + if resp.StatusCode == code { + ok = true + break } - if !ok { - body, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - return resp, &UnexpectedResponseCodeError{ - URL: url, - Method: method, - Expected: options.OkCodes, - Actual: resp.StatusCode, - Body: body, - } + } + if !ok { + body, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + return resp, &UnexpectedResponseCodeError{ + URL: url, + Method: method, + Expected: options.OkCodes, + Actual: resp.StatusCode, + Body: body, } } @@ -226,3 +229,72 @@ func (client *ProviderClient) Request(method, url string, options RequestOpts) ( return resp, nil } + +func defaultOkCodes(method string) []int { + switch { + case method == "GET": + return []int{200} + case method == "POST": + return []int{201, 202} + case method == "PUT": + return []int{201, 202} + case method == "DELETE": + return []int{202, 204} + } + + return []int{} +} + +func (client *ProviderClient) Get(url string, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + return client.Request("GET", url, *opts) +} + +func (client *ProviderClient) Post(url string, JSONBody interface{}, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + + if v, ok := (JSONBody).(io.Reader); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + return client.Request("POST", url, *opts) +} + +func (client *ProviderClient) Put(url string, JSONBody interface{}, JSONResponse *interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + + if v, ok := (JSONBody).(io.Reader); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + return client.Request("PUT", url, *opts) +} + +func (client *ProviderClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = &RequestOpts{} + } + + return client.Request("DELETE", url, *opts) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go index 8fd20f8f812..d79d862b2cf 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/provider_client_test.go @@ -18,18 +18,18 @@ func TestAuthenticatedHeaders(t *testing.T) { func TestUserAgent(t *testing.T) { p := &ProviderClient{} - p.UserAgent.Prepend("custom-user-agent/v2.4") - expected := "custom-user-agent/v2.4 gophercloud/v1.0" + p.UserAgent.Prepend("custom-user-agent/2.4.0") + expected := "custom-user-agent/2.4.0 gophercloud/1.0.0" actual := p.UserAgent.Join() th.CheckEquals(t, expected, actual) - p.UserAgent.Prepend("another-custom-user-agent/v0.3", "a-third-ua/v5.9") - expected = "another-custom-user-agent/v0.3 a-third-ua/v5.9 custom-user-agent/v2.4 gophercloud/v1.0" + p.UserAgent.Prepend("another-custom-user-agent/0.3.0", "a-third-ua/5.9.0") + expected = "another-custom-user-agent/0.3.0 a-third-ua/5.9.0 custom-user-agent/2.4.0 gophercloud/1.0.0" actual = p.UserAgent.Join() th.CheckEquals(t, expected, actual) p.UserAgent = UserAgent{} - expected = "gophercloud/v1.0" + expected = "gophercloud/1.0.0" actual = p.UserAgent.Join() th.CheckEquals(t, expected, actual) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go index 3c058016a06..731fc6dd00c 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/cdn/v1/base/delegate_test.go @@ -21,7 +21,7 @@ func TestGetHomeDocument(t *testing.T) { "href-template": "services{?marker,limit}", "href-vars": map[string]interface{}{ "marker": "param/marker", - "limit": "param/limit", + "limit": "param/limit", }, "hints": map[string]interface{}{ "allow": []string{"GET"}, diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go index 8f1f34f7b8d..db3f305b52a 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/client.go @@ -202,3 +202,13 @@ func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.Endpo } return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil } + +// NewRackConnectV3 creates a ServiceClient that may be used to access the v3 RackConnect service. +func NewRackConnectV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + eo.ApplyDefaults("rax:rackconnect") + url, err := client.EndpointLocator(eo) + if err != nil { + return nil, err + } + return &gophercloud.ServiceClient{ProviderClient: client, Endpoint: url}, nil +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go index 3aefb0cdca1..cebbffd36a6 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/networks/requests.go @@ -21,10 +21,7 @@ func List(c *gophercloud.ServiceClient) pagination.Pager { // Get retrieves a specific network based on its unique ID. func Get(c *gophercloud.ServiceClient, id string) GetResult { var res GetResult - _, res.Err = c.Request("GET", getURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) return res } @@ -78,10 +75,8 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { } // Send request to API - _, res.Err = c.Request("POST", createURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 201, 202}, + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, }) return res } @@ -89,8 +84,6 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { // Delete accepts a unique ID and deletes the network associated with it. func Delete(c *gophercloud.ServiceClient, networkID string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", deleteURL(c, networkID), gophercloud.RequestOpts{ - OkCodes: []int{204}, - }) + _, res.Err = c.Delete(deleteURL(c, networkID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go index 173868edf1b..7810d156a0f 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate.go @@ -54,6 +54,23 @@ func Rebuild(client *gophercloud.ServiceClient, id string, opts os.RebuildOptsBu return os.Rebuild(client, id, opts) } +// Resize instructs the provider to change the flavor of the server. +// Note that this implies rebuilding it. +// Unfortunately, one cannot pass rebuild parameters to the resize function. +// When the resize completes, the server will be in RESIZE_VERIFY state. +// While in this state, you can explore the use of the new server's configuration. +// If you like it, call ConfirmResize() to commit the resize permanently. +// Otherwise, call RevertResize() to restore the old configuration. +func Resize(client *gophercloud.ServiceClient, id string, opts os.ResizeOptsBuilder) os.ActionResult { + return os.Resize(client, id, opts) +} + +// ConfirmResize confirms a previous resize operation on a server. +// See Resize() for more details. +func ConfirmResize(client *gophercloud.ServiceClient, id string) os.ActionResult { + return os.ConfirmResize(client, id) +} + // WaitForStatus will continually poll a server until it successfully transitions to a specified // status. It will do this for at most the number of seconds specified. func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { @@ -64,3 +81,36 @@ func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) er func ExtractServers(page pagination.Page) ([]os.Server, error) { return os.ExtractServers(page) } + +// ListAddresses makes a request against the API to list the servers IP addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + return os.ListAddresses(client, id) +} + +// ExtractAddresses interprets the results of a single page from a ListAddresses() call, producing a map of Address slices. +func ExtractAddresses(page pagination.Page) (map[string][]os.Address, error) { + return os.ExtractAddresses(page) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP addresses +// for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + return os.ListAddressesByNetwork(client, id, network) +} + +// ExtractNetworkAddresses interprets the results of a single page from a ListAddressesByNetwork() call, producing a map of Address slices. +func ExtractNetworkAddresses(page pagination.Page) ([]os.Address, error) { + return os.ExtractNetworkAddresses(page) +} + +// Metadata requests all the metadata for the given server ID. +func Metadata(client *gophercloud.ServiceClient, id string) os.GetMetadataResult { + return os.Metadata(client, id) +} + +// UpdateMetadata updates (or creates) all the metadata specified by opts for the given server ID. +// This operation does not affect already-existing metadata that is not specified +// by opts. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts os.UpdateMetadataOptsBuilder) os.UpdateMetadataResult { + return os.UpdateMetadata(client, id, opts) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go index c3d9cc0897e..03e7acea846 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/servers/delegate_test.go @@ -132,3 +132,51 @@ func TestRebuildServer(t *testing.T) { th.AssertNoErr(t, err) th.CheckDeepEquals(t, &GophercloudServer, actual) } + +func TestListAddresses(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleAddressListSuccessfully(t) + + expected := os.ListAddressesExpected + pages := 0 + err := ListAddresses(client.ServiceClient(), "asdfasdfasdf").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 networks, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestListAddressesByNetwork(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + os.HandleNetworkAddressListSuccessfully(t) + + expected := os.ListNetworkAddressesExpected + pages := 0 + err := ListAddressesByNetwork(client.ServiceClient(), "asdfasdfasdf", "public").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := ExtractNetworkAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 addresses, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go index 3c81ef80f8b..1ff7c5ae55f 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/compute/v2/virtualinterfaces/requests.go @@ -28,10 +28,8 @@ func Create(c *gophercloud.ServiceClient, instanceID, networkID string) CreateRe } // Send request to API - _, res.Err = c.Request("POST", createURL(c, instanceID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200, 201, 202}, + _, res.Err = c.Post(createURL(c, instanceID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, }) return res } @@ -40,7 +38,7 @@ func Create(c *gophercloud.ServiceClient, instanceID, networkID string) CreateRe // instanceID. func Delete(c *gophercloud.ServiceClient, instanceID, interfaceID string) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", deleteURL(c, instanceID, interfaceID), gophercloud.RequestOpts{ + _, res.Err = c.Delete(deleteURL(c, instanceID, interfaceID), &gophercloud.RequestOpts{ OkCodes: []int{200, 204}, }) return res diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go index 94d98e34cb1..d4ce7c01f44 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/acl/requests.go @@ -74,11 +74,7 @@ func Create(client *gophercloud.ServiceClient, loadBalancerID int, opts CreateOp return res } - _, res.Err = client.Request("POST", rootURL(client, loadBalancerID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Post(rootURL(client, loadBalancerID), reqBody, nil, nil) return res } @@ -95,19 +91,14 @@ func BulkDelete(c *gophercloud.ServiceClient, loadBalancerID int, itemIDs []int) url := rootURL(c, loadBalancerID) url += gophercloud.IDSliceToQueryString("id", itemIDs) - _, res.Err = c.Request("DELETE", url, gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(url, nil) return res } // Delete will remove a single network item from a load balancer's access list. func Delete(c *gophercloud.ServiceClient, lbID, itemID int) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, lbID, itemID), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = c.Delete(resourceURL(c, lbID, itemID), nil) return res } @@ -115,8 +106,6 @@ func Delete(c *gophercloud.ServiceClient, lbID, itemID int) DeleteResult { // effectively resetting it and allowing all traffic. func DeleteAll(c *gophercloud.ServiceClient, lbID int) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", rootURL(c, lbID), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = c.Delete(rootURL(c, lbID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go index 49a46f6d4e5..46f5f02a43a 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/lbs/requests.go @@ -227,12 +227,7 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { return res } - _, res.Err = c.Request("POST", rootURL(c), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{202}, - }) - + _, res.Err = c.Post(rootURL(c), reqBody, &res.Body, nil) return res } @@ -243,9 +238,8 @@ func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) CreateResult { func Get(c *gophercloud.ServiceClient, id int) GetResult { var res GetResult - _, res.Err = c.Request("GET", resourceURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Get(resourceURL(c, id), &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res @@ -269,21 +263,14 @@ func BulkDelete(c *gophercloud.ServiceClient, ids []int) DeleteResult { url := rootURL(c) url += gophercloud.IDSliceToQueryString("id", ids) - _, res.Err = c.Request("DELETE", url, gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(url, nil) return res } // Delete removes a single load balancer. func Delete(c *gophercloud.ServiceClient, id int) DeleteResult { var res DeleteResult - - _, res.Err = c.Request("DELETE", resourceURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(resourceURL(c, id), nil) return res } @@ -363,11 +350,7 @@ func Update(c *gophercloud.ServiceClient, id int, opts UpdateOptsBuilder) Update return res } - _, res.Err = c.Request("PUT", resourceURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = c.Put(resourceURL(c, id), reqBody, nil, nil) return res } @@ -394,10 +377,7 @@ func ListAlgorithms(client *gophercloud.ServiceClient) pagination.Pager { func IsLoggingEnabled(client *gophercloud.ServiceClient, id int) (bool, error) { var body interface{} - _, err := client.Request("GET", loggingURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &body, - OkCodes: []int{200}, - }) + _, err := client.Get(loggingURL(client, id), &body, nil) if err != nil { return false, err } @@ -420,39 +400,22 @@ func toConnLoggingMap(state bool) map[string]map[string]bool { // EnableLogging will enable connection logging for a specified load balancer. func EnableLogging(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { - reqBody := toConnLoggingMap(true) var res gophercloud.ErrResult - - _, res.Err = client.Request("PUT", loggingURL(client, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Put(loggingURL(client, id), toConnLoggingMap(true), nil, nil) return res } // DisableLogging will disable connection logging for a specified load balancer. func DisableLogging(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { - reqBody := toConnLoggingMap(false) var res gophercloud.ErrResult - - _, res.Err = client.Request("PUT", loggingURL(client, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Put(loggingURL(client, id), toConnLoggingMap(false), nil, nil) return res } // GetErrorPage will retrieve the current error page for the load balancer. func GetErrorPage(client *gophercloud.ServiceClient, id int) ErrorPageResult { var res ErrorPageResult - - _, res.Err = client.Request("GET", errorPageURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = client.Get(errorPageURL(client, id), &res.Body, nil) return res } @@ -464,10 +427,8 @@ func SetErrorPage(client *gophercloud.ServiceClient, id int, html string) ErrorP type stringMap map[string]string reqBody := map[string]stringMap{"errorpage": stringMap{"content": html}} - _, res.Err = client.Request("PUT", errorPageURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - JSONBody: &reqBody, - OkCodes: []int{200}, + _, res.Err = client.Put(errorPageURL(client, id), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res @@ -476,23 +437,16 @@ func SetErrorPage(client *gophercloud.ServiceClient, id int, html string) ErrorP // DeleteErrorPage will delete the current error page for the load balancer. func DeleteErrorPage(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { var res gophercloud.ErrResult - - _, res.Err = client.Request("DELETE", errorPageURL(client, id), gophercloud.RequestOpts{ + _, res.Err = client.Delete(errorPageURL(client, id), &gophercloud.RequestOpts{ OkCodes: []int{200}, }) - return res } // GetStats will retrieve detailed stats related to the load balancer's usage. func GetStats(client *gophercloud.ServiceClient, id int) StatsResult { var res StatsResult - - _, res.Err = client.Request("GET", statsURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = client.Get(statsURL(client, id), &res.Body, nil) return res } @@ -507,10 +461,7 @@ func GetStats(client *gophercloud.ServiceClient, id int) StatsResult { func IsContentCached(client *gophercloud.ServiceClient, id int) (bool, error) { var body interface{} - _, err := client.Request("GET", cacheURL(client, id), gophercloud.RequestOpts{ - JSONResponse: &body, - OkCodes: []int{200}, - }) + _, err := client.Get(cacheURL(client, id), &body, nil) if err != nil { return false, err } @@ -533,26 +484,14 @@ func toCachingMap(state bool) map[string]map[string]bool { // EnableCaching will enable content-caching for the specified load balancer. func EnableCaching(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { - reqBody := toCachingMap(true) var res gophercloud.ErrResult - - _, res.Err = client.Request("PUT", cacheURL(client, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Put(cacheURL(client, id), toCachingMap(true), nil, nil) return res } // DisableCaching will disable content-caching for the specified load balancer. func DisableCaching(client *gophercloud.ServiceClient, id int) gophercloud.ErrResult { - reqBody := toCachingMap(false) var res gophercloud.ErrResult - - _, res.Err = client.Request("PUT", cacheURL(client, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = client.Put(cacheURL(client, id), toCachingMap(false), nil, nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go index 917282c63ba..d4ba27653ca 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/monitors/requests.go @@ -141,33 +141,20 @@ func Update(c *gophercloud.ServiceClient, id int, opts UpdateOptsBuilder) Update return res } - _, res.Err = c.Request("PUT", rootURL(c, id), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = c.Put(rootURL(c, id), reqBody, nil, nil) return res } // Get is the operation responsible for showing details of a health monitor. func Get(c *gophercloud.ServiceClient, id int) GetResult { var res GetResult - - _, res.Err = c.Request("GET", rootURL(c, id), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = c.Get(rootURL(c, id), &res.Body, nil) return res } // Delete is the operation responsible for deleting a health monitor. func Delete(c *gophercloud.ServiceClient, id int) DeleteResult { var res DeleteResult - - _, res.Err = c.Request("DELETE", rootURL(c, id), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(rootURL(c, id), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go index 86fe5d7c8ca..02af86b5c1e 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/nodes/requests.go @@ -112,11 +112,8 @@ func Create(client *gophercloud.ServiceClient, loadBalancerID int, opts CreateOp return res } - resp, err := client.Request("POST", rootURL(client, loadBalancerID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{202}, - }) + resp, err := client.Post(rootURL(client, loadBalancerID), reqBody, &res.Body, nil) + if err != nil { res.Err = err return res @@ -145,22 +142,14 @@ func BulkDelete(c *gophercloud.ServiceClient, loadBalancerID int, nodeIDs []int) url := rootURL(c, loadBalancerID) url += gophercloud.IDSliceToQueryString("id", nodeIDs) - _, res.Err = c.Request("DELETE", url, gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(url, nil) return res } // Get is the operation responsible for showing details for a single node. func Get(c *gophercloud.ServiceClient, lbID, nodeID int) GetResult { var res GetResult - - _, res.Err = c.Request("GET", resourceURL(c, lbID, nodeID), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = c.Get(resourceURL(c, lbID, nodeID), &res.Body, nil) return res } @@ -213,20 +202,14 @@ func Update(c *gophercloud.ServiceClient, lbID, nodeID int, opts UpdateOptsBuild return res } - _, res.Err = c.Request("PUT", resourceURL(c, lbID, nodeID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - OkCodes: []int{202}, - }) - + _, res.Err = c.Put(resourceURL(c, lbID, nodeID), reqBody, nil, nil) return res } // Delete is the operation responsible for permanently deleting a node. func Delete(c *gophercloud.ServiceClient, lbID, nodeID int) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, lbID, nodeID), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = c.Delete(resourceURL(c, lbID, nodeID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go index 5572407edda..a93d766cd92 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/sessions/requests.go @@ -42,12 +42,7 @@ func Enable(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) Enab return res } - _, res.Err = c.Request("PUT", rootURL(c, lbID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{202}, - }) - + _, res.Err = c.Put(rootURL(c, lbID), reqBody, &res.Body, nil) return res } @@ -55,12 +50,7 @@ func Enable(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) Enab // persistence configuration for a particular load balancer. func Get(c *gophercloud.ServiceClient, lbID int) GetResult { var res GetResult - - _, res.Err = c.Request("GET", rootURL(c, lbID), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = c.Get(rootURL(c, lbID), &res.Body, nil) return res } @@ -68,10 +58,6 @@ func Get(c *gophercloud.ServiceClient, lbID int) GetResult { // particular load balancer. func Disable(c *gophercloud.ServiceClient, lbID int) DisableResult { var res DisableResult - - _, res.Err = c.Request("DELETE", rootURL(c, lbID), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(rootURL(c, lbID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go index e9c65142862..bb53ef89602 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/ssl/requests.go @@ -85,12 +85,9 @@ func Update(c *gophercloud.ServiceClient, lbID int, opts UpdateOptsBuilder) Upda return res } - _, res.Err = c.Request("PUT", rootURL(c, lbID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Put(rootURL(c, lbID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) - return res } @@ -98,12 +95,7 @@ func Update(c *gophercloud.ServiceClient, lbID int, opts UpdateOptsBuilder) Upda // Termination configuration for a load balancer. func Get(c *gophercloud.ServiceClient, lbID int) GetResult { var res GetResult - - _, res.Err = c.Request("GET", rootURL(c, lbID), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = c.Get(rootURL(c, lbID), &res.Body, nil) return res } @@ -111,11 +103,9 @@ func Get(c *gophercloud.ServiceClient, lbID int) GetResult { // configuration for a load balancer. func Delete(c *gophercloud.ServiceClient, lbID int) DeleteResult { var res DeleteResult - - _, res.Err = c.Request("DELETE", rootURL(c, lbID), gophercloud.RequestOpts{ + _, res.Err = c.Delete(rootURL(c, lbID), &gophercloud.RequestOpts{ OkCodes: []int{200}, }) - return res } @@ -180,10 +170,8 @@ func CreateCert(c *gophercloud.ServiceClient, lbID int, opts CreateCertOptsBuild return res } - _, res.Err = c.Request("POST", certURL(c, lbID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{200}, + _, res.Err = c.Post(certURL(c, lbID), reqBody, &res.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, }) return res @@ -192,12 +180,7 @@ func CreateCert(c *gophercloud.ServiceClient, lbID int, opts CreateCertOptsBuild // GetCert will show the details of an existing SSL certificate. func GetCert(c *gophercloud.ServiceClient, lbID, certID int) GetCertResult { var res GetCertResult - - _, res.Err = c.Request("GET", certResourceURL(c, lbID, certID), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = c.Get(certResourceURL(c, lbID, certID), &res.Body, nil) return res } @@ -247,12 +230,7 @@ func UpdateCert(c *gophercloud.ServiceClient, lbID, certID int, opts UpdateCertO return res } - _, res.Err = c.Request("PUT", certResourceURL(c, lbID, certID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{202}, - }) - + _, res.Err = c.Put(certResourceURL(c, lbID, certID), reqBody, &res.Body, nil) return res } @@ -261,7 +239,7 @@ func UpdateCert(c *gophercloud.ServiceClient, lbID, certID int, opts UpdateCertO func DeleteCert(c *gophercloud.ServiceClient, lbID, certID int) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", certResourceURL(c, lbID, certID), gophercloud.RequestOpts{ + _, res.Err = c.Delete(certResourceURL(c, lbID, certID), &gophercloud.RequestOpts{ OkCodes: []int{200}, }) diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go index 2680a892b68..0446b97a14f 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/throttle/requests.go @@ -55,12 +55,7 @@ func Create(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) Crea return res } - _, res.Err = c.Request("PUT", rootURL(c, lbID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{202}, - }) - + _, res.Err = c.Put(rootURL(c, lbID), reqBody, &res.Body, nil) return res } @@ -68,12 +63,7 @@ func Create(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) Crea // throttling configuration for a load balancer. func Get(c *gophercloud.ServiceClient, lbID int) GetResult { var res GetResult - - _, res.Err = c.Request("GET", rootURL(c, lbID), gophercloud.RequestOpts{ - JSONResponse: &res.Body, - OkCodes: []int{200}, - }) - + _, res.Err = c.Get(rootURL(c, lbID), &res.Body, nil) return res } @@ -81,10 +71,6 @@ func Get(c *gophercloud.ServiceClient, lbID int) GetResult { // configuration for a load balancer. func Delete(c *gophercloud.ServiceClient, lbID int) DeleteResult { var res DeleteResult - - _, res.Err = c.Request("DELETE", rootURL(c, lbID), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(rootURL(c, lbID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go index d52a73afd47..2bc924f293b 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/lb/v1/vips/requests.go @@ -67,12 +67,7 @@ func Create(c *gophercloud.ServiceClient, lbID int, opts CreateOptsBuilder) Crea return res } - _, res.Err = c.Request("POST", rootURL(c, lbID), gophercloud.RequestOpts{ - JSONBody: &reqBody, - JSONResponse: &res.Body, - OkCodes: []int{202}, - }) - + _, res.Err = c.Post(rootURL(c, lbID), reqBody, &res.Body, nil) return res } @@ -90,18 +85,13 @@ func BulkDelete(c *gophercloud.ServiceClient, loadBalancerID int, vipIDs []int) url := rootURL(c, loadBalancerID) url += gophercloud.IDSliceToQueryString("id", vipIDs) - _, res.Err = c.Request("DELETE", url, gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) - + _, res.Err = c.Delete(url, nil) return res } // Delete is the operation responsible for permanently deleting a VIP. func Delete(c *gophercloud.ServiceClient, lbID, vipID int) DeleteResult { var res DeleteResult - _, res.Err = c.Request("DELETE", resourceURL(c, lbID, vipID), gophercloud.RequestOpts{ - OkCodes: []int{202}, - }) + _, res.Err = c.Delete(resourceURL(c, lbID, vipID), nil) return res } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go index 091b99e0f4d..95728d18558 100644 --- a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/ports/delegate.go @@ -24,6 +24,9 @@ func Get(c *gophercloud.ServiceClient, networkID string) os.GetResult { // Create accepts a CreateOpts struct and creates a new network using the values // provided. You must remember to provide a NetworkID value. +// +// NOTE: Currently the SecurityGroup option is not implemented to work with +// Rackspace. func Create(c *gophercloud.ServiceClient, opts os.CreateOptsBuilder) os.CreateResult { return os.Create(c, opts) } diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/doc.go new file mode 100644 index 00000000000..31f744ccd7a --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/doc.go @@ -0,0 +1,32 @@ +// Package security contains functionality to work with security group and +// security group rules Neutron resources. +// +// Security groups and security group rules allows administrators and tenants +// the ability to specify the type of traffic and direction (ingress/egress) +// that is allowed to pass through a port. A security group is a container for +// security group rules. +// +// When a port is created in Networking it is associated with a security group. +// If a security group is not specified the port is associated with a 'default' +// security group. By default, this group drops all ingress traffic and allows +// all egress. Rules can be added to this group in order to change the behaviour. +// +// The basic characteristics of Neutron Security Groups are: +// +// For ingress traffic (to an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all traffic is dropped. +// +// For egress traffic (from an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all egress traffic are dropped. +// - When a new security group is created, rules to allow all egress traffic +// is automatically added. +// +// "default security group" is defined for each tenant. +// - For the default security group a rule which allows intercommunication +// among hosts associated with the default security group is defined by default. +// - As a result, all egress traffic and intercommunication in the default +// group are allowed and all ingress from outside of the default group is +// dropped by default (in the default security group). +package security diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate.go new file mode 100644 index 00000000000..1e9a23a05a1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate.go @@ -0,0 +1,30 @@ +package groups + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// security groups. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts os.ListOpts) pagination.Pager { + return os.List(c, opts) +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts os.CreateOpts) os.CreateResult { + return os.Create(c, opts) +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(c, id) +} + +// Delete will permanently delete a particular security group based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(c, id) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate_test.go new file mode 100644 index 00000000000..45cd3ba8d4c --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/groups/delegate_test.go @@ -0,0 +1,206 @@ +package groups + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + osGroups "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/groups" + osRules "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_groups": [ + { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] + } + `) + }) + + count := 0 + + List(fake.ServiceClient(), osGroups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := osGroups.ExtractGroups(page) + if err != nil { + t.Errorf("Failed to extract secgroups: %v", err) + return false, err + } + + expected := []osGroups.SecGroup{ + osGroups.SecGroup{ + Description: "default", + ID: "85cc3048-abc3-43cc-89b3-377341426ac5", + Name: "default", + Rules: []osRules.SecGroupRule{}, + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "security_group": { + "name": "new-webservers", + "description": "security group for webservers" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` + { + "security_group": { + "description": "security group for webservers", + "id": "2076db17-a522-4506-91de-c6dd8e837028", + "name": "new-webservers", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv4", + "id": "38ce2d8e-e8f1-48bd-83c2-d33cb9f50c3d", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv6", + "id": "565b9502-12de-4ffd-91e9-68885cff6ae1", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + opts := osGroups.CreateOpts{Name: "new-webservers", Description: "security group for webservers"} + _, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/85cc3048-abc3-43cc-89b3-377341426ac5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_group": { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + sg, err := Get(fake.ServiceClient(), "85cc3048-abc3-43cc-89b3-377341426ac5").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "default", sg.Description) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sg.ID) + th.AssertEquals(t, "default", sg.Name) + th.AssertEquals(t, 2, len(sg.Rules)) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sg.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate.go new file mode 100644 index 00000000000..23b4b318e2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate.go @@ -0,0 +1,30 @@ +package rules + +import ( + "github.com/rackspace/gophercloud" + os "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns a Pager which allows you to iterate over a collection of +// security group rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts os.ListOpts) pagination.Pager { + return os.List(c, opts) +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts os.CreateOpts) os.CreateResult { + return os.Create(c, opts) +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) os.GetResult { + return os.Get(c, id) +} + +// Delete will permanently delete a particular security group based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) os.DeleteResult { + return os.Delete(c, id) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate_test.go new file mode 100644 index 00000000000..3563fbeaa6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/networking/v2/security/rules/delegate_test.go @@ -0,0 +1,236 @@ +package rules + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/rackspace/gophercloud/openstack/networking/v2/common" + osRules "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] + } + `) + }) + + count := 0 + + List(fake.ServiceClient(), osRules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := osRules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract secrules: %v", err) + return false, err + } + + expected := []osRules.SecGroupRule{ + osRules.SecGroupRule{ + Direction: "egress", + EtherType: "IPv6", + ID: "3c0e45ff-adaf-4124-b083-bf390e5482ff", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + osRules.SecGroupRule{ + Direction: "egress", + EtherType: "IPv4", + ID: "93aa42e5-80db-4581-9391-3a608bd0e448", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "security_group_rule": { + "direction": "ingress", + "port_range_min": 80, + "ethertype": "IPv4", + "port_range_max": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` + { + "security_group_rule": { + "direction": "ingress", + "ethertype": "IPv4", + "id": "2bc0accf-312e-429a-956e-e4407625eb62", + "port_range_max": 80, + "port_range_min": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "remote_ip_prefix": null, + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + opts := osRules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: "IPv4", + PortRangeMax: 80, + Protocol: "tcp", + RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", + } + _, err := Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := Create(fake.ServiceClient(), osRules.CreateOpts{Direction: "something"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), osRules.CreateOpts{Direction: osRules.DirIngress, EtherType: "something"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), osRules.CreateOpts{Direction: osRules.DirIngress, EtherType: osRules.Ether4}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = Create(fake.ServiceClient(), osRules.CreateOpts{Direction: osRules.DirIngress, EtherType: osRules.Ether4, SecGroupID: "something", Protocol: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/3c0e45ff-adaf-4124-b083-bf390e5482ff", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_group_rule": { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + } + `) + }) + + sr, err := Get(fake.ServiceClient(), "3c0e45ff-adaf-4124-b083-bf390e5482ff").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "egress", sr.Direction) + th.AssertEquals(t, "IPv6", sr.EtherType) + th.AssertEquals(t, "3c0e45ff-adaf-4124-b083-bf390e5482ff", sr.ID) + th.AssertEquals(t, 0, sr.PortRangeMax) + th.AssertEquals(t, 0, sr.PortRangeMin) + th.AssertEquals(t, "", sr.Protocol) + th.AssertEquals(t, "", sr.RemoteGroupID) + th.AssertEquals(t, "", sr.RemoteIPPrefix) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sr.SecGroupID) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sr.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests.go new file mode 100644 index 00000000000..58843030aed --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests.go @@ -0,0 +1,24 @@ +package cloudnetworks + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns all cloud networks that are associated with RackConnect. The ID +// returned for each network is the same as the ID returned by the networks package. +func List(c *gophercloud.ServiceClient) pagination.Pager { + url := listURL(c) + createPage := func(r pagination.PageResult) pagination.Page { + return CloudNetworkPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retrieves a specific cloud network (that is associated with RackConnect) +// based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests_test.go new file mode 100644 index 00000000000..10d15dd11f7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/requests_test.go @@ -0,0 +1,87 @@ +package cloudnetworks + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListCloudNetworks(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/cloud_networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[{ + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "updated": "2014-05-25T02:28:44Z" + }]`) + }) + + expected := []CloudNetwork{ + CloudNetwork{ + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + Name: "RC-CLOUD", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + } + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractCloudNetworks(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetCloudNetwork(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/cloud_networks/07426958-1ebf-4c38-b032-d456820ca21a", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "updated": "2014-05-25T02:28:44Z" + }`) + }) + + expected := &CloudNetwork{ + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + Name: "RC-CLOUD", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + } + + actual, err := Get(fake.ServiceClient(), "07426958-1ebf-4c38-b032-d456820ca21a").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/results.go new file mode 100644 index 00000000000..f554a0d75bd --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/results.go @@ -0,0 +1,113 @@ +package cloudnetworks + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// CloudNetwork represents a network associated with a RackConnect configuration. +type CloudNetwork struct { + // Specifies the ID of the newtork. + ID string `mapstructure:"id"` + // Specifies the user-provided name of the network. + Name string `mapstructure:"name"` + // Specifies the IP range for this network. + CIDR string `mapstructure:"cidr"` + // Specifies the time the network was created. + CreatedAt time.Time `mapstructure:"-"` + // Specifies the time the network was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// CloudNetworkPage is the page returned by a pager when traversing over a +// collection of CloudNetworks. +type CloudNetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a CloudNetworkPage contains no CloudNetworks. +func (r CloudNetworkPage) IsEmpty() (bool, error) { + cns, err := ExtractCloudNetworks(r) + if err != nil { + return true, err + } + return len(cns) == 0, nil +} + +// ExtractCloudNetworks extracts and returns CloudNetworks. It is used while iterating over +// a cloudnetworks.List call. +func ExtractCloudNetworks(page pagination.Page) ([]CloudNetwork, error) { + var res []CloudNetwork + casted := page.(CloudNetworkPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNets []interface{} + switch casted.(type) { + case interface{}: + rawNets = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNets { + thisNet := (rawNets[i]).(map[string]interface{}) + + if t, ok := thisNet["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNet["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + } + + return res, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a CloudNetwork from a GetResult. +func (r GetResult) Extract() (*CloudNetwork, error) { + if r.Err != nil { + return nil, r.Err + } + var res CloudNetwork + + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + return &res, err +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/urls.go new file mode 100644 index 00000000000..bd6b098dadc --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/cloudnetworks/urls.go @@ -0,0 +1,11 @@ +package cloudnetworks + +import "github.com/rackspace/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("cloud_networks") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("cloud_networks", id) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/doc.go new file mode 100644 index 00000000000..3a8279e1091 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/doc.go @@ -0,0 +1,4 @@ +// Package rackconnect allows Rackspace cloud accounts to leverage version 3 of +// RackConnect, Rackspace's hybrid connectivity solution connecting dedicated +// and cloud servers. +package rackconnect diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/doc.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/doc.go new file mode 100644 index 00000000000..f4319b8ff37 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/doc.go @@ -0,0 +1,14 @@ +// Package lbpools provides access to load balancer pools associated with a +// RackConnect configuration. Load Balancer Pools must be configured in advance +// by your Network Security team to be eligible for use with RackConnect. +// If you do not see a pool that you expect to see, contact your Support team +// for further assistance. The Load Balancer Pool id returned by these calls is +// automatically generated by the RackConnect automation and will remain constant +// unless the Load Balancer Pool is renamed on your hardware load balancer. +// All Load Balancer Pools will currently return a status of ACTIVE. Future +// features may introduce additional statuses. +// Node status values are ADDING, ACTIVE, REMOVING, ADD_FAILED, and REMOVE_FAILED. +// The cloud_servers node count will only include Cloud Servers from the specified +// cloud account. Any dedicated servers or cloud servers from another cloud account +// on the same RackConnect Configuration will be counted as external nodes. +package lbpools diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests.go new file mode 100644 index 00000000000..c300c56c1e9 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests.go @@ -0,0 +1,146 @@ +package lbpools + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns all load balancer pools that are associated with RackConnect. +func List(c *gophercloud.ServiceClient) pagination.Pager { + url := listURL(c) + createPage := func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retrieves a specific load balancer pool (that is associated with RackConnect) +// based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// ListNodes returns all load balancer pool nodes that are associated with RackConnect +// for the given LB pool ID. +func ListNodes(c *gophercloud.ServiceClient, id string) pagination.Pager { + url := listNodesURL(c, id) + createPage := func(r pagination.PageResult) pagination.Page { + return NodePage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// CreateNode adds the cloud server with the given serverID to the load balancer +// pool with the given poolID. +func CreateNode(c *gophercloud.ServiceClient, poolID, serverID string) CreateNodeResult { + var res CreateNodeResult + reqBody := map[string]interface{}{ + "cloud_server": map[string]string{ + "id": serverID, + }, + } + _, res.Err = c.Post(createNodeURL(c, poolID), reqBody, &res.Body, nil) + return res +} + +// ListNodesDetails returns all load balancer pool nodes that are associated with RackConnect +// for the given LB pool ID with all their details. +func ListNodesDetails(c *gophercloud.ServiceClient, id string) pagination.Pager { + url := listNodesDetailsURL(c, id) + createPage := func(r pagination.PageResult) pagination.Page { + return NodeDetailsPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// GetNode retrieves a specific LB pool node (that is associated with RackConnect) +// based on its unique ID and the LB pool's unique ID. +func GetNode(c *gophercloud.ServiceClient, poolID, nodeID string) GetNodeResult { + var res GetNodeResult + _, res.Err = c.Get(nodeURL(c, poolID, nodeID), &res.Body, nil) + return res +} + +// DeleteNode removes the node with the given nodeID from the LB pool with the +// given poolID. +func DeleteNode(c *gophercloud.ServiceClient, poolID, nodeID string) DeleteNodeResult { + var res DeleteNodeResult + _, res.Err = c.Delete(deleteNodeURL(c, poolID, nodeID), nil) + return res +} + +// GetNodeDetails retrieves a specific LB pool node's details based on its unique +// ID and the LB pool's unique ID. +func GetNodeDetails(c *gophercloud.ServiceClient, poolID, nodeID string) GetNodeDetailsResult { + var res GetNodeDetailsResult + _, res.Err = c.Get(nodeDetailsURL(c, poolID, nodeID), &res.Body, nil) + return res +} + +// NodeOpts are options for bulk adding/deleting nodes to LB pools. +type NodeOpts struct { + ServerID string + PoolID string +} + +// NodesOpts are a slice of NodeOpts, passed as options for bulk operations. +type NodesOpts []NodeOpts + +// ToLBPoolCreateNodesMap serializes a NodesOpts into a map to send in the request. +func (o NodesOpts) ToLBPoolCreateNodesMap() ([]map[string]interface{}, error) { + m := make([]map[string]interface{}, len(o)) + for i := range o { + m[i] = map[string]interface{}{ + "cloud_server": map[string]string{ + "id": o[i].ServerID, + }, + "load_balancer_pool": map[string]string{ + "id": o[i].PoolID, + }, + } + } + return m, nil +} + +// CreateNodes adds the cloud servers with the given serverIDs to the corresponding +// load balancer pools with the given poolIDs. +func CreateNodes(c *gophercloud.ServiceClient, opts NodesOpts) CreateNodesResult { + var res CreateNodesResult + reqBody, err := opts.ToLBPoolCreateNodesMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Post(createNodesURL(c), reqBody, &res.Body, nil) + return res +} + +// DeleteNodes removes the cloud servers with the given serverIDs to the corresponding +// load balancer pools with the given poolIDs. +func DeleteNodes(c *gophercloud.ServiceClient, opts NodesOpts) DeleteNodesResult { + var res DeleteNodesResult + reqBody, err := opts.ToLBPoolCreateNodesMap() + if err != nil { + res.Err = err + return res + } + + _, res.Err = c.Request("DELETE", createNodesURL(c), gophercloud.RequestOpts{ + JSONBody: &reqBody, + OkCodes: []int{204}, + }) + return res +} + +// ListNodesDetailsForServer is similar to ListNodesDetails but only returns nodes +// for the given serverID. +func ListNodesDetailsForServer(c *gophercloud.ServiceClient, serverID string) pagination.Pager { + url := listNodesForServerURL(c, serverID) + createPage := func(r pagination.PageResult) pagination.Page { + return NodeDetailsForServerPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests_test.go new file mode 100644 index 00000000000..48ebcece131 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/requests_test.go @@ -0,0 +1,876 @@ +package lbpools + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListPools(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[ + { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + { + "id": "33021100-4abf-4836-9080-465a6d87ab68", + "name": "RCv3Test2", + "node_counts": { + "cloud_servers": 1, + "external": 0, + "total": 1 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.7" + }, + { + "id": "b644350a-301b-47b5-a411-c6e0f933c347", + "name": "RCv3Test3", + "node_counts": { + "cloud_servers": 2, + "external": 3, + "total": 5 + }, + "port": 443, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.15" + } + ]`) + }) + + expected := []Pool{ + Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Pool{ + ID: "33021100-4abf-4836-9080-465a6d87ab68", + Name: "RCv3Test2", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 1, + External: 0, + Total: 1, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.7", + }, + Pool{ + ID: "b644350a-301b-47b5-a411-c6e0f933c347", + Name: "RCv3Test3", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 2, + External: 3, + Total: 5, + }, + Port: 443, + Status: "ACTIVE", + VirtualIP: "203.0.113.15", + }, + } + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPools(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetLBPool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }`) + }) + + expected := &Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + } + + actual, err := Get(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} + +func TestListNodes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ACTIVE", + "updated": "2014-05-30T03:24:18Z" + }, + { + "created": "2014-05-31T08:23:12Z", + "cloud_server": { + "id": "f28b870f-a063-498a-8b12-7025e5b1caa6" + }, + "id": "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ADDING", + "updated": "2014-05-31T08:23:26Z" + }, + { + "created": "2014-05-31T08:23:18Z", + "cloud_server": { + "id": "a3d3a6b3-e4e4-496f-9a3d-5c987163e458" + }, + "id": "ced9ddc8-6fae-4e72-9457-16ead52b5515", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ADD_FAILED", + "status_detail": "Unable to communicate with network device", + "updated": "2014-05-31T08:24:36Z" + } + ]`) + }) + + expected := []Node{ + Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + Node{ + CreatedAt: time.Date(2014, 5, 31, 8, 23, 12, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "f28b870f-a063-498a-8b12-7025e5b1caa6", + }, + ID: "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ADDING", + UpdatedAt: time.Date(2014, 5, 31, 8, 23, 26, 0, time.UTC), + }, + Node{ + CreatedAt: time.Date(2014, 5, 31, 8, 23, 18, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "a3d3a6b3-e4e4-496f-9a3d-5c987163e458", + }, + ID: "ced9ddc8-6fae-4e72-9457-16ead52b5515", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ADD_FAILED", + StatusDetail: "Unable to communicate with network device", + UpdatedAt: time.Date(2014, 5, 31, 8, 24, 36, 0, time.UTC), + }, + } + + count := 0 + err := ListNodes(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodes(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + } + } + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + `) + }) + + expected := &Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := CreateNode(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "d95ae0c4-6ab8-4873-b82f-f8433840cff2").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} + +func TestListNodesDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/details", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + ] + `) + }) + + expected := []NodeDetails{ + NodeDetails{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + count := 0 + err := ListNodesDetails(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodesDetails(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/1860451d-fb89-45b8-b54e-151afceb50e5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + `) + }) + + expected := &Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := GetNode(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "1860451d-fb89-45b8-b54e-151afceb50e5").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/1860451d-fb89-45b8-b54e-151afceb50e5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) + + err := DeleteNode(client.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "1860451d-fb89-45b8-b54e-151afceb50e5").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetNodeDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2/nodes/d95ae0c4-6ab8-4873-b82f-f8433840cff2/details", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + `) + }) + + expected := &NodeDetails{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := GetNodeDetails(fake.ServiceClient(), "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", "d95ae0c4-6ab8-4873-b82f-f8433840cff2").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestCreateNodes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + [ + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + } + }, + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "33021100-4abf-4836-9080-465a6d87ab68" + } + } + ] + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + }, + "status": "ADDING", + "status_detail": null, + "updated": null + }, + { + "created": "2014-05-31T08:23:12Z", + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "id": "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + "load_balancer_pool": { + "id": "33021100-4abf-4836-9080-465a6d87ab68" + }, + "status": "ADDING", + "status_detail": null, + "updated": null + } + ] + `) + }) + + expected := []Node{ + Node{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + Status: "ADDING", + }, + Node{ + CreatedAt: time.Date(2014, 5, 31, 8, 23, 12, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + }, + ID: "b70481dd-7edf-4dbb-a44b-41cc7679d4fb", + LoadBalancerPool: struct { + ID string `mapstructure:"id"` + }{ + ID: "33021100-4abf-4836-9080-465a6d87ab68", + }, + Status: "ADDING", + }, + } + + opts := NodesOpts{ + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "33021100-4abf-4836-9080-465a6d87ab68", + }, + } + actual, err := CreateNodes(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteNodes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + [ + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2" + } + }, + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + }, + "load_balancer_pool": { + "id": "33021100-4abf-4836-9080-465a6d87ab68" + } + } + ] + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) + + opts := NodesOpts{ + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + }, + NodeOpts{ + ServerID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + PoolID: "33021100-4abf-4836-9080-465a6d87ab68", + }, + } + err := DeleteNodes(client.ServiceClient(), opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListNodesForServerDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/load_balancer_pools/nodes/details", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "id": "1860451d-fb89-45b8-b54e-151afceb50e5", + "load_balancer_pool": { + "id": "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + "name": "RCv3Test", + "node_counts": { + "cloud_servers": 3, + "external": 4, + "total": 7 + }, + "port": 80, + "status": "ACTIVE", + "status_detail": null, + "virtual_ip": "203.0.113.5" + }, + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + ] + `) + }) + + expected := []NodeDetailsForServer{ + NodeDetailsForServer{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + ID: "1860451d-fb89-45b8-b54e-151afceb50e5", + LoadBalancerPool: Pool{ + ID: "d6d3aa7c-dfa5-4e61-96ee-1d54ac1075d2", + Name: "RCv3Test", + NodeCounts: struct { + CloudServers int `mapstructure:"cloud_servers"` + External int `mapstructure:"external"` + Total int `mapstructure:"total"` + }{ + CloudServers: 3, + External: 4, + Total: 7, + }, + Port: 80, + Status: "ACTIVE", + VirtualIP: "203.0.113.5", + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + count := 0 + err := ListNodesDetailsForServer(fake.ServiceClient(), "07426958-1ebf-4c38-b032-d456820ca21a").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractNodesDetailsForServer(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/results.go new file mode 100644 index 00000000000..e5e914b1e2b --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/results.go @@ -0,0 +1,505 @@ +package lbpools + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// Pool represents a load balancer pool associated with a RackConnect configuration. +type Pool struct { + // The unique ID of the load balancer pool. + ID string `mapstructure:"id"` + // The name of the load balancer pool. + Name string `mapstructure:"name"` + // The node counts associated witht the load balancer pool. + NodeCounts struct { + // The number of nodes associated with this LB pool for this account. + CloudServers int `mapstructure:"cloud_servers"` + // The number of nodes associated with this LB pool from other accounts. + External int `mapstructure:"external"` + // The total number of nodes associated with this LB pool. + Total int `mapstructure:"total"` + } `mapstructure:"node_counts"` + // The port of the LB pool + Port int `mapstructure:"port"` + // The status of the LB pool + Status string `mapstructure:"status"` + // The details of the status of the LB pool + StatusDetail string `mapstructure:"status_detail"` + // The virtual IP of the LB pool + VirtualIP string `mapstructure:"virtual_ip"` +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of Pools. +type PoolPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a PoolPage contains no Pools. +func (r PoolPage) IsEmpty() (bool, error) { + cns, err := ExtractPools(r) + if err != nil { + return true, err + } + return len(cns) == 0, nil +} + +// ExtractPools extracts and returns Pools. It is used while iterating over +// an lbpools.List call. +func ExtractPools(page pagination.Page) ([]Pool, error) { + var res []Pool + err := mapstructure.Decode(page.(PoolPage).Body, &res) + return res, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts an LBPool from a GetResult. +func (r GetResult) Extract() (*Pool, error) { + if r.Err != nil { + return nil, r.Err + } + var res Pool + err := mapstructure.Decode(r.Body, &res) + return &res, err +} + +// Node represents a load balancer pool node associated with a RackConnect configuration. +type Node struct { + // The unique ID of the LB node. + ID string `mapstructure:"id"` + // The cloud server (node) of the load balancer pool. + CloudServer struct { + // The cloud server ID. + ID string `mapstructure:"id"` + } `mapstructure:"cloud_server"` + // The load balancer pool. + LoadBalancerPool struct { + // The LB pool ID. + ID string `mapstructure:"id"` + } `mapstructure:"load_balancer_pool"` + // The status of the LB pool. + Status string `mapstructure:"status"` + // The details of the status of the LB pool. + StatusDetail string `mapstructure:"status_detail"` + // The time the LB node was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the LB node was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// NodePage is the page returned by a pager when traversing over a +// collection of Nodes. +type NodePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NodePage contains no Nodes. +func (r NodePage) IsEmpty() (bool, error) { + n, err := ExtractNodes(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractNodes extracts and returns a slice of Nodes. It is used while iterating over +// an lbpools.ListNodes call. +func ExtractNodes(page pagination.Page) ([]Node, error) { + var res []Node + casted := page.(NodePage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodes []interface{} + switch casted.(type) { + case interface{}: + rawNodes = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodes { + thisNode := (rawNodes[i]).(map[string]interface{}) + + if t, ok := thisNode["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNode["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + } + + return res, err +} + +// NodeResult represents a result that can be extracted as a Node. +type NodeResult struct { + gophercloud.Result +} + +// CreateNodeResult represents the result of an CreateNode operation. +type CreateNodeResult struct { + NodeResult +} + +// GetNodeResult represents the result of an GetNode operation. +type GetNodeResult struct { + NodeResult +} + +// Extract is a function that extracts a Node from a NodeResult. +func (r NodeResult) Extract() (*Node, error) { + if r.Err != nil { + return nil, r.Err + } + var res Node + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + return &res, err +} + +// NodeDetails represents a load balancer pool node associated with a RackConnect configuration +// with all its details. +type NodeDetails struct { + // The unique ID of the LB node. + ID string `mapstructure:"id"` + // The cloud server (node) of the load balancer pool. + CloudServer struct { + // The cloud server ID. + ID string `mapstructure:"id"` + // The name of the server. + Name string `mapstructure:"name"` + // The cloud network for the cloud server. + CloudNetwork struct { + // The network ID. + ID string `mapstructure:"id"` + // The network name. + Name string `mapstructure:"name"` + // The network's private IPv4 address. + PrivateIPv4 string `mapstructure:"private_ip_v4"` + // The IP range for the network. + CIDR string `mapstructure:"cidr"` + // The datetime the network was created. + CreatedAt time.Time `mapstructure:"-"` + // The last datetime the network was updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + // The datetime the server was created. + CreatedAt time.Time `mapstructure:"-"` + // The datetime the server was last updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_server"` + // The load balancer pool. + LoadBalancerPool Pool `mapstructure:"load_balancer_pool"` + // The status of the LB pool. + Status string `mapstructure:"status"` + // The details of the status of the LB pool. + StatusDetail string `mapstructure:"status_detail"` + // The time the LB node was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the LB node was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// NodeDetailsPage is the page returned by a pager when traversing over a +// collection of NodeDetails. +type NodeDetailsPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NodeDetailsPage contains no NodeDetails. +func (r NodeDetailsPage) IsEmpty() (bool, error) { + n, err := ExtractNodesDetails(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractNodesDetails extracts and returns a slice of NodeDetails. It is used while iterating over +// an lbpools.ListNodesDetails call. +func ExtractNodesDetails(page pagination.Page) ([]NodeDetails, error) { + var res []NodeDetails + casted := page.(NodeDetailsPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodesDetails []interface{} + switch casted.(type) { + case interface{}: + rawNodesDetails = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodesDetails { + thisNodeDetails := (rawNodesDetails[i]).(map[string]interface{}) + + if t, ok := thisNodeDetails["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNodeDetails["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + + if cs, ok := thisNodeDetails["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + } + + return res, err +} + +// GetNodeDetailsResult represents the result of an NodeDetails operation. +type GetNodeDetailsResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a NodeDetails from a NodeDetailsResult. +func (r GetNodeDetailsResult) Extract() (*NodeDetails, error) { + if r.Err != nil { + return nil, r.Err + } + var res NodeDetails + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + if cs, ok := b["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + + return &res, err +} + +// DeleteNodeResult represents the result of a DeleteNode operation. +type DeleteNodeResult struct { + gophercloud.ErrResult +} + +// CreateNodesResult represents the result of a CreateNodes operation. +type CreateNodesResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a slice of Nodes from a CreateNodesResult. +func (r CreateNodesResult) Extract() ([]Node, error) { + if r.Err != nil { + return nil, r.Err + } + var res []Node + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.([]interface{}) + for i := range b { + if date, ok := b[i].(map[string]interface{})["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res[i].CreatedAt = t + } + if date, ok := b[i].(map[string]interface{})["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res[i].UpdatedAt = t + } + } + + return res, err +} + +// DeleteNodesResult represents the result of a DeleteNodes operation. +type DeleteNodesResult struct { + gophercloud.ErrResult +} + +// NodeDetailsForServer represents a load balancer pool node associated with a RackConnect configuration +// with all its details for a particular server. +type NodeDetailsForServer struct { + // The unique ID of the LB node. + ID string `mapstructure:"id"` + // The load balancer pool. + LoadBalancerPool Pool `mapstructure:"load_balancer_pool"` + // The status of the LB pool. + Status string `mapstructure:"status"` + // The details of the status of the LB pool. + StatusDetail string `mapstructure:"status_detail"` + // The time the LB node was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the LB node was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// NodeDetailsForServerPage is the page returned by a pager when traversing over a +// collection of NodeDetailsForServer. +type NodeDetailsForServerPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NodeDetailsForServerPage contains no NodeDetailsForServer. +func (r NodeDetailsForServerPage) IsEmpty() (bool, error) { + n, err := ExtractNodesDetailsForServer(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractNodesDetailsForServer extracts and returns a slice of NodeDetailsForServer. It is used while iterating over +// an lbpools.ListNodesDetailsForServer call. +func ExtractNodesDetailsForServer(page pagination.Page) ([]NodeDetailsForServer, error) { + var res []NodeDetailsForServer + casted := page.(NodeDetailsForServerPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodesDetails []interface{} + switch casted.(type) { + case interface{}: + rawNodesDetails = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodesDetails { + thisNodeDetails := (rawNodesDetails[i]).(map[string]interface{}) + + if t, ok := thisNodeDetails["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNodeDetails["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + } + + return res, err +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/urls.go new file mode 100644 index 00000000000..c238239f61e --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/lbpools/urls.go @@ -0,0 +1,49 @@ +package lbpools + +import "github.com/rackspace/gophercloud" + +var root = "load_balancer_pools" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id) +} + +func listNodesURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id, "nodes") +} + +func createNodeURL(c *gophercloud.ServiceClient, id string) string { + return listNodesURL(c, id) +} + +func listNodesDetailsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id, "nodes", "details") +} + +func nodeURL(c *gophercloud.ServiceClient, poolID, nodeID string) string { + return c.ServiceURL(root, poolID, "nodes", nodeID) +} + +func deleteNodeURL(c *gophercloud.ServiceClient, poolID, nodeID string) string { + return nodeURL(c, poolID, nodeID) +} + +func nodeDetailsURL(c *gophercloud.ServiceClient, poolID, nodeID string) string { + return c.ServiceURL(root, poolID, "nodes", nodeID, "details") +} + +func createNodesURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root, "nodes") +} + +func deleteNodesURL(c *gophercloud.ServiceClient) string { + return createNodesURL(c) +} + +func listNodesForServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL(root, "nodes", "details?cloud_server_id="+serverID) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests.go new file mode 100644 index 00000000000..1164260109e --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests.go @@ -0,0 +1,50 @@ +package publicips + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// List returns all public IPs. +func List(c *gophercloud.ServiceClient) pagination.Pager { + url := listURL(c) + createPage := func(r pagination.PageResult) pagination.Page { + return PublicIPPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Create adds a public IP to the server with the given serverID. +func Create(c *gophercloud.ServiceClient, serverID string) CreateResult { + var res CreateResult + reqBody := map[string]interface{}{ + "cloud_server": map[string]string{ + "id": serverID, + }, + } + _, res.Err = c.Post(createURL(c), reqBody, &res.Body, nil) + return res +} + +// ListForServer returns all public IPs for the server with the given serverID. +func ListForServer(c *gophercloud.ServiceClient, serverID string) pagination.Pager { + url := listForServerURL(c, serverID) + createPage := func(r pagination.PageResult) pagination.Page { + return PublicIPPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retrieves the public IP with the given id. +func Get(c *gophercloud.ServiceClient, id string) GetResult { + var res GetResult + _, res.Err = c.Get(getURL(c, id), &res.Body, nil) + return res +} + +// Delete removes the public IP with the given id. +func Delete(c *gophercloud.ServiceClient, id string) DeleteResult { + var res DeleteResult + _, res.Err = c.Delete(deleteURL(c, id), nil) + return res +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests_test.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests_test.go new file mode 100644 index 00000000000..61da2b03d9b --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/requests_test.go @@ -0,0 +1,378 @@ +package publicips + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/rackspace/gophercloud/pagination" + th "github.com/rackspace/gophercloud/testhelper" + "github.com/rackspace/gophercloud/testhelper/client" + fake "github.com/rackspace/gophercloud/testhelper/client" +) + +func TestListIPs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, `[ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "public_ip_v4": "203.0.113.110", + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + } + ]`) + }) + + expected := []PublicIP{ + PublicIP{ + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + PublicIPv4: "203.0.113.110", + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + Status: "ACTIVE", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + + count := 0 + err := List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPublicIPs(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateIP(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "cloud_server": { + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2" + } + } + `) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "status": "ADDING" + }`) + }) + + expected := &PublicIP{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + Status: "ADDING", + } + + actual, err := Create(fake.ServiceClient(), "d95ae0c4-6ab8-4873-b82f-f8433840cff2").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestGetIP(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips/2d0f586b-37a7-4ae0-adac-2743d5feb450", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "public_ip_v4": "203.0.113.110", + "status": "ACTIVE", + "status_detail": null, + "updated": "2014-05-30T03:24:18Z" + }`) + }) + + expected := &PublicIP{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + Status: "ACTIVE", + PublicIPv4: "203.0.113.110", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + } + + actual, err := Get(fake.ServiceClient(), "2d0f586b-37a7-4ae0-adac-2743d5feb450").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteIP(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips/2d0f586b-37a7-4ae0-adac-2743d5feb450", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) + + err := Delete(client.ServiceClient(), "2d0f586b-37a7-4ae0-adac-2743d5feb450").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListForServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/public_ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + fmt.Fprintf(w, ` + [ + { + "created": "2014-05-30T03:23:42Z", + "cloud_server": { + "cloud_network": { + "cidr": "192.168.100.0/24", + "created": "2014-05-25T01:23:42Z", + "id": "07426958-1ebf-4c38-b032-d456820ca21a", + "name": "RC-CLOUD", + "private_ip_v4": "192.168.100.5", + "updated": "2014-05-25T02:28:44Z" + }, + "created": "2014-05-30T02:18:42Z", + "id": "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + "name": "RCv3TestServer1", + "updated": "2014-05-30T02:19:18Z" + }, + "id": "2d0f586b-37a7-4ae0-adac-2743d5feb450", + "public_ip_v4": "203.0.113.110", + "status": "ACTIVE", + "updated": "2014-05-30T03:24:18Z" + } + ]`) + }) + + expected := []PublicIP{ + PublicIP{ + CreatedAt: time.Date(2014, 5, 30, 3, 23, 42, 0, time.UTC), + CloudServer: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + CloudNetwork struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "d95ae0c4-6ab8-4873-b82f-f8433840cff2", + CloudNetwork: struct { + ID string `mapstructure:"id"` + Name string `mapstructure:"name"` + PrivateIPv4 string `mapstructure:"private_ip_v4"` + CIDR string `mapstructure:"cidr"` + CreatedAt time.Time `mapstructure:"-"` + UpdatedAt time.Time `mapstructure:"-"` + }{ + ID: "07426958-1ebf-4c38-b032-d456820ca21a", + CIDR: "192.168.100.0/24", + CreatedAt: time.Date(2014, 5, 25, 1, 23, 42, 0, time.UTC), + Name: "RC-CLOUD", + PrivateIPv4: "192.168.100.5", + UpdatedAt: time.Date(2014, 5, 25, 2, 28, 44, 0, time.UTC), + }, + CreatedAt: time.Date(2014, 5, 30, 2, 18, 42, 0, time.UTC), + Name: "RCv3TestServer1", + UpdatedAt: time.Date(2014, 5, 30, 2, 19, 18, 0, time.UTC), + }, + ID: "2d0f586b-37a7-4ae0-adac-2743d5feb450", + Status: "ACTIVE", + PublicIPv4: "203.0.113.110", + UpdatedAt: time.Date(2014, 5, 30, 3, 24, 18, 0, time.UTC), + }, + } + count := 0 + err := ListForServer(fake.ServiceClient(), "d95ae0c4-6ab8-4873-b82f-f8433840cff2").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ExtractPublicIPs(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/results.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/results.go new file mode 100644 index 00000000000..132cf770a0f --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/results.go @@ -0,0 +1,221 @@ +package publicips + +import ( + "fmt" + "reflect" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/pagination" +) + +// PublicIP represents a public IP address. +type PublicIP struct { + // The unique ID of the public IP. + ID string `mapstructure:"id"` + // The IPv4 address of the public IP. + PublicIPv4 string `mapstructure:"public_ip_v4"` + // The cloud server (node) of the public IP. + CloudServer struct { + // The cloud server ID. + ID string `mapstructure:"id"` + // The name of the server. + Name string `mapstructure:"name"` + // The cloud network for the cloud server. + CloudNetwork struct { + // The network ID. + ID string `mapstructure:"id"` + // The network name. + Name string `mapstructure:"name"` + // The network's private IPv4 address. + PrivateIPv4 string `mapstructure:"private_ip_v4"` + // The IP range for the network. + CIDR string `mapstructure:"cidr"` + // The datetime the network was created. + CreatedAt time.Time `mapstructure:"-"` + // The last datetime the network was updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_network"` + // The datetime the server was created. + CreatedAt time.Time `mapstructure:"-"` + // The datetime the server was last updated. + UpdatedAt time.Time `mapstructure:"-"` + } `mapstructure:"cloud_server"` + // The status of the public IP. + Status string `mapstructure:"status"` + // The details of the status of the public IP. + StatusDetail string `mapstructure:"status_detail"` + // The time the public IP was created. + CreatedAt time.Time `mapstructure:"-"` + // The time the public IP was last updated. + UpdatedAt time.Time `mapstructure:"-"` +} + +// PublicIPPage is the page returned by a pager when traversing over a +// collection of PublicIPs. +type PublicIPPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a PublicIPPage contains no PublicIPs. +func (r PublicIPPage) IsEmpty() (bool, error) { + n, err := ExtractPublicIPs(r) + if err != nil { + return true, err + } + return len(n) == 0, nil +} + +// ExtractPublicIPs extracts and returns a slice of PublicIPs. It is used while iterating over +// a publicips.List call. +func ExtractPublicIPs(page pagination.Page) ([]PublicIP, error) { + var res []PublicIP + casted := page.(PublicIPPage).Body + err := mapstructure.Decode(casted, &res) + + var rawNodesDetails []interface{} + switch casted.(type) { + case interface{}: + rawNodesDetails = casted.([]interface{}) + default: + return res, fmt.Errorf("Unknown type: %v", reflect.TypeOf(casted)) + } + + for i := range rawNodesDetails { + thisNodeDetails := (rawNodesDetails[i]).(map[string]interface{}) + + if t, ok := thisNodeDetails["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CreatedAt = creationTime + } + + if t, ok := thisNodeDetails["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].UpdatedAt = updatedTime + } + + if cs, ok := thisNodeDetails["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return res, err + } + res[i].CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + } + + return res, err +} + +// PublicIPResult represents a result that can be extracted into a PublicIP. +type PublicIPResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + PublicIPResult +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + PublicIPResult +} + +// Extract is a function that extracts a PublicIP from a PublicIPResult. +func (r PublicIPResult) Extract() (*PublicIP, error) { + if r.Err != nil { + return nil, r.Err + } + var res PublicIP + err := mapstructure.Decode(r.Body, &res) + + b := r.Body.(map[string]interface{}) + + if date, ok := b["created"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.CreatedAt = t + } + + if date, ok := b["updated"]; ok && date != nil { + t, err := time.Parse(time.RFC3339, date.(string)) + if err != nil { + return nil, err + } + res.UpdatedAt = t + } + + if cs, ok := b["cloud_server"].(map[string]interface{}); ok { + if t, ok := cs["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CreatedAt = creationTime + } + if t, ok := cs["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.UpdatedAt = updatedTime + } + if cn, ok := cs["cloud_network"].(map[string]interface{}); ok { + if t, ok := cn["created"].(string); ok && t != "" { + creationTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.CreatedAt = creationTime + } + if t, ok := cn["updated"].(string); ok && t != "" { + updatedTime, err := time.Parse(time.RFC3339, t) + if err != nil { + return &res, err + } + res.CloudServer.CloudNetwork.UpdatedAt = updatedTime + } + } + } + + return &res, err +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/urls.go b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/urls.go new file mode 100644 index 00000000000..6f310be4e81 --- /dev/null +++ b/Godeps/_workspace/src/github.com/rackspace/gophercloud/rackspace/rackconnect/v3/publicips/urls.go @@ -0,0 +1,25 @@ +package publicips + +import "github.com/rackspace/gophercloud" + +var root = "public_ips" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root) +} + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(root) +} + +func listForServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL(root + "?cloud_server_id=" + serverID) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(root, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/Vagrantfile b/Vagrantfile index 86b78bd94fe..660a7bf8dc4 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -27,14 +27,46 @@ $minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" } # Determine the OS platform to use $kube_os = ENV['KUBERNETES_OS'] || "fedora" -# Check if we already have kube box -$kube_box_url = ENV['KUBERNETES_BOX_URL'] || "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box" +# To override the vagrant provider, use (e.g.): +# KUBERNETES_PROVIDER=vagrant VAGRANT_DEFAULT_PROVIDER=... .../cluster/kube-up.sh +# To override the box, use (e.g.): +# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... .../cluster/kube-up.sh +# You can specify a box version: +# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_VERSION=... .../cluster/kube-up.sh +# You can specify a box location: +# KUBERNETES_PROVIDER=vagrant KUBERNETES_BOX_NAME=... KUBERNETES_BOX_URL=... .../cluster/kube-up.sh +# KUBERNETES_BOX_URL and KUBERNETES_BOX_VERSION will be ignored unless +# KUBERNETES_BOX_NAME is set -# OS platform to box information -$kube_box = { - "fedora" => { - "name" => "fedora20", - "box_url" => $kube_box_url +# Default OS platform to provider/box information +$kube_provider_boxes = { + :parallels => { + 'fedora' => { + # :box_url and :box_version are optional (and mutually exclusive); + # if :box_url is omitted the box will be retrieved by :box_name (and + # :box_version if provided) from + # http://atlas.hashicorp.com/boxes/search (formerly + # http://vagrantcloud.com/); this allows you override :box_name with + # your own value so long as you provide :box_url; for example, the + # "official" name of this box is "rickard-von-essen/ + # opscode_fedora-20", but by providing the URL and our own name, we + # make it appear as yet another provider under the "kube-fedora20" + # box + :box_name => 'kube-fedora20', + :box_url => 'https://atlas.hashicorp.com/rickard-von-essen/boxes/opscode_fedora-20/versions/0.4.0/providers/parallels.box' + } + }, + :virtualbox => { + 'fedora' => { + :box_name => 'kube-fedora20', + :box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box' + } + }, + :vmware_desktop => { + 'fedora' => { + :box_name => 'kube-fedora20', + :box_url => 'http://opscode-vm-bento.s3.amazonaws.com/vagrant/vmware/opscode_fedora-20-i386_chef-provisionerless.box' + } } } @@ -54,16 +86,88 @@ end # In Fedora VM, tmpfs device is mapped to /tmp. tmpfs is given 50% of RAM allocation. # When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. # This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) -$vm_mem = (ENV['KUBERNETES_MEMORY'] || 1024).to_i +$vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i +$vm_minion_mem = (ENV['KUBERNETES_MINION_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| - def customize_vm(config) - config.vm.box = $kube_box[$kube_os]["name"] - config.vm.box_url = $kube_box[$kube_os]["box_url"] + def setvmboxandurl(config, provider) + if ENV['KUBERNETES_BOX_NAME'] then + config.vm.box = ENV['KUBERNETES_BOX_NAME'] - config.vm.provider :virtualbox do |v| - v.customize ["modifyvm", :id, "--memory", $vm_mem] - v.customize ["modifyvm", :id, "--cpus", $vm_cpus] + if ENV['KUBERNETES_BOX_URL'] then + config.vm.box_url = ENV['KUBERNETES_BOX_URL'] + end + + if ENV['KUBERNETES_BOX_VERSION'] then + config.vm.box_version = ENV['KUBERNETES_BOX_VERSION'] + end + else + config.vm.box = $kube_provider_boxes[provider][$kube_os][:box_name] + + if $kube_provider_boxes[provider][$kube_os][:box_url] then + config.vm.box_url = $kube_provider_boxes[provider][$kube_os][:box_url] + end + + if $kube_provider_boxes[provider][$kube_os][:box_version] then + config.vm.box_version = $kube_provider_boxes[provider][$kube_os][:box_version] + end + end + end + + def customize_vm(config, vm_mem) + # Try VMWare Fusion first (see + # https://docs.vagrantup.com/v2/providers/basic_usage.html) + config.vm.provider :vmware_fusion do |v, override| + setvmboxandurl(override, :vmware_desktop) + v.vmx['memsize'] = vm_mem + v.vmx['numvcpus'] = $vm_cpus + end + + # Then try VMWare Workstation + config.vm.provider :vmware_workstation do |v, override| + setvmboxandurl(override, :vmware_desktop) + v.vmx['memsize'] = vm_mem + v.vmx['numvcpus'] = $vm_cpus + end + + # Then try Parallels + config.vm.provider :parallels do |v, override| + setvmboxandurl(override, :parallels) + v.memory = vm_mem # v.customize ['set', :id, '--memsize', vm_mem] + v.cpus = $vm_cpus # v.customize ['set', :id, '--cpus', $vm_cpus] + + # Don't attempt to update the Parallels tools on the image (this can + # be done manually if necessary) + v.update_guest_tools = false # v.customize ['set', :id, '--tools-autoupdate', 'off'] + + # Set up Parallels folder sharing to behave like VirtualBox (i.e., + # mount the current directory as /vagrant and that's it) + v.customize ['set', :id, '--shf-guest', 'off'] + v.customize ['set', :id, '--shf-guest-automount', 'off'] + v.customize ['set', :id, '--shf-host', 'on'] + + # Remove all auto-mounted "shared folders"; the result seems to + # persist between runs (i.e., vagrant halt && vagrant up) + override.vm.provision :shell, :inline => (%q{ + set -ex + if [ -d /media/psf ]; then + for i in /media/psf/*; do + if [ -d "${i}" ]; then + umount "${i}" || true + rmdir -v "${i}" + fi + done + rmdir -v /media/psf + fi + exit + }).strip + end + + # Finally, fall back to VirtualBox + config.vm.provider :virtualbox do |v, override| + setvmboxandurl(override, :virtualbox) + v.memory = vm_mem # v.customize ["modifyvm", :id, "--memory", vm_mem] + v.cpus = $vm_cpus # v.customize ["modifyvm", :id, "--cpus", $vm_cpus] # Use faster paravirtualized networking v.customize ["modifyvm", :id, "--nictype1", "virtio"] @@ -73,7 +177,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # Kubernetes master config.vm.define "master" do |c| - customize_vm c + customize_vm c, $vm_master_mem if ENV['KUBE_TEMP'] then script = "#{ENV['KUBE_TEMP']}/master-start.sh" c.vm.provision "shell", run: "always", path: script @@ -84,17 +188,20 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # Kubernetes minion $num_minion.times do |n| - config.vm.define "minion-#{n+1}" do |minion| - customize_vm minion + minion_vm_name = "minion-#{n+1}" + minion_prefix = ENV['INSTANCE_PREFIX'] || 'kubernetes' # must mirror default in cluster/vagrant/config-default.sh + minion_hostname = "#{minion_prefix}-#{minion_vm_name}" + + config.vm.define minion_vm_name do |minion| + customize_vm minion, $vm_minion_mem - minion_index = n+1 minion_ip = $minion_ips[n] if ENV['KUBE_TEMP'] then script = "#{ENV['KUBE_TEMP']}/minion-start-#{n}.sh" minion.vm.provision "shell", run: "always", path: script end minion.vm.network "private_network", ip: "#{minion_ip}" - minion.vm.hostname = "#{ENV['INSTANCE_PREFIX']}-minion-#{minion_index}" + minion.vm.hostname = minion_hostname end end end diff --git a/build/common.sh b/build/common.sh index 5fd8a712086..a0c549cb331 100644 --- a/build/common.sh +++ b/build/common.sh @@ -614,7 +614,7 @@ function kube::release::create_docker_images_for_server() { rm -rf ${docker_build_path} mkdir -p ${docker_build_path} ln $1/${binary_name} ${docker_build_path}/${binary_name} - printf " FROM scratch \n ADD ${binary_name} /${binary_name} \n ENTRYPOINT [ \"/${binary_name}\" ]\n" > ${docker_file_path} + printf " FROM busybox \n ADD ${binary_name} /${binary_name} \n ENTRYPOINT [ \"/${binary_name}\" ]\n" > ${docker_file_path} local docker_image_tag=gcr.io/google_containers/$binary_name:$md5_sum docker build -q -t "${docker_image_tag}" ${docker_build_path} >/dev/null diff --git a/contrib/logging/fluentd-gcp-image/Dockerfile b/cluster/addons/fluentd-gcp/fluentd-gcp-image/Dockerfile similarity index 100% rename from contrib/logging/fluentd-gcp-image/Dockerfile rename to cluster/addons/fluentd-gcp/fluentd-gcp-image/Dockerfile diff --git a/contrib/logging/fluentd-gcp-image/Makefile b/cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile similarity index 100% rename from contrib/logging/fluentd-gcp-image/Makefile rename to cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile diff --git a/contrib/logging/fluentd-gcp-image/README.md b/cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md similarity index 100% rename from contrib/logging/fluentd-gcp-image/README.md rename to cluster/addons/fluentd-gcp/fluentd-gcp-image/README.md diff --git a/contrib/logging/fluentd-gcp-image/google-fluentd.conf b/cluster/addons/fluentd-gcp/fluentd-gcp-image/google-fluentd.conf similarity index 100% rename from contrib/logging/fluentd-gcp-image/google-fluentd.conf rename to cluster/addons/fluentd-gcp/fluentd-gcp-image/google-fluentd.conf diff --git a/cluster/aws/templates/format-disks.sh b/cluster/aws/templates/format-disks.sh index 75d60873cba..193adcfb732 100644 --- a/cluster/aws/templates/format-disks.sh +++ b/cluster/aws/templates/format-disks.sh @@ -67,5 +67,13 @@ else ln -s /mnt/docker /var/lib/docker DOCKER_ROOT="/mnt/docker" DOCKER_OPTS="${DOCKER_OPTS} -g /mnt/docker" + + # Move /var/lib/kubelet to /mnt if we have it + # (the backing for empty-dir volumes can use a lot of space!) + if [[ -d /var/lib/kubelet ]]; then + mv /var/lib/kubelet /mnt/ + fi + mkdir -p /mnt/kubelet + ln -s /mnt/kubelet /var/lib/kubelet fi diff --git a/cluster/aws/templates/salt-master.sh b/cluster/aws/templates/salt-master.sh index 5e70a1f70a3..e6a10f5aa3b 100755 --- a/cluster/aws/templates/salt-master.sh +++ b/cluster/aws/templates/salt-master.sh @@ -58,5 +58,5 @@ EOF # # -M installs the master set +x -curl -L --connect-timeout 20 --retry 6 --retry-delay 10 http://bootstrap.saltstack.com | sh -s -- -M -X +curl -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s -- -M -X set -x diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 579be1474e4..eebd62cf667 100644 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -20,6 +20,7 @@ # config-default.sh. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/aws/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" # This removes the final character in bash (somehow) AWS_REGION=${ZONE%?} @@ -265,7 +266,7 @@ function upload-server-tars() { # Ensure that we have a password created for validating to the master. Will -# read from the kubernetes auth-file for the current context if available. +# read from kubeconfig for the current context if available. # # Assumed vars # KUBE_ROOT @@ -274,17 +275,11 @@ function upload-server-tars() { # KUBE_USER # KUBE_PASSWORD function get-password { - # go template to extract the auth-path of the current-context user - # Note: we save dot ('.') to $dot because the 'with' action overrides dot - local template='{{$dot := .}}{{with $ctx := index $dot "current-context"}}{{range $element := (index $dot "contexts")}}{{ if eq .name $ctx }}{{ with $user := .context.user }}{{range $element := (index $dot "users")}}{{ if eq .name $user }}{{ index . "user" "auth-path" }}{{end}}{{end}}{{end}}{{end}}{{end}}{{end}}' - local file=$("${KUBE_ROOT}/cluster/kubectl.sh" config view -o template --template="${template}") - if [[ ! -z "$file" && -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') fi - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') } # Adds a tag to an AWS resource @@ -609,44 +604,23 @@ function kube-up { echo "Kubernetes cluster created." - local kube_cert="kubecfg.crt" - local kube_key="kubecfg.key" - local ca_cert="kubernetes.ca.crt" # TODO use token instead of kube_auth - local kube_auth="kubernetes_auth" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="aws_${INSTANCE_PREFIX}" local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" - local context="${INSTANCE_PREFIX}" - local user="${INSTANCE_PREFIX}-admin" - local config_dir="${HOME}/.kube/${context}" # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's # config file. Distribute the same way the htpasswd is done. ( - mkdir -p "${config_dir}" umask 077 - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.crt >"${config_dir}/${kube_cert}" 2>$LOG - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/kubecfg.key >"${config_dir}/${kube_key}" 2>$LOG - ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" ubuntu@${KUBE_MASTER_IP} sudo cat /srv/kubernetes/ca.crt >"${config_dir}/${ca_cert}" 2>$LOG + ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>"$LOG" + ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>"$LOG" + ssh -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "ubuntu@${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>"$LOG" - "${kubectl}" config set-cluster "${context}" --server="https://${KUBE_MASTER_IP}" --certificate-authority="${config_dir}/${ca_cert}" --global - "${kubectl}" config set-credentials "${user}" --auth-path="${config_dir}/${kube_auth}" --global - "${kubectl}" config set-context "${context}" --cluster="${context}" --user="${user}" --global - "${kubectl}" config use-context "${context}" --global - - cat << EOF > "${config_dir}/${kube_auth}" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "${config_dir}/${ca_cert}", - "CertFile": "${config_dir}/${kube_cert}", - "KeyFile": "${config_dir}/${kube_key}" -} -EOF - - chmod 0600 "${config_dir}/${kube_auth}" "${config_dir}/$kube_cert" \ - "${config_dir}/${kube_key}" "${config_dir}/${ca_cert}" - echo "Wrote ${config_dir}/${kube_auth}" + create-kubeconfig ) echo "Sanity checking cluster..." @@ -700,7 +674,7 @@ EOF echo echo -e "${color_yellow} https://${KUBE_MASTER_IP}" echo - echo -e "${color_green}The user name and password to use is located in ${config_dir}/${kube_auth}${color_norm}" + echo -e "${color_green}The user name and password to use is located in ${KUBECONFIG}.${color_norm}" echo } diff --git a/cluster/azure/templates/salt-minion.sh b/cluster/azure/templates/salt-minion.sh index e22509cefec..42f0b633764 100644 --- a/cluster/azure/templates/salt-minion.sh +++ b/cluster/azure/templates/salt-minion.sh @@ -31,7 +31,7 @@ log_level: debug log_level_logfile: debug EOF -hostnamef=$(hostname -f) +hostnamef=$(uname -n) apt-get install -y ipcalc netmask=$(ipcalc $MINION_IP_RANGE | grep Netmask | awk '{ print $2 }') network=$(ipcalc $MINION_IP_RANGE | grep Address | awk '{ print $2 }') diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index b9e95f5b576..515917bea95 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -21,6 +21,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/azure/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" function azure_call { local -a params=() @@ -242,30 +243,17 @@ function detect-master () { } # Ensure that we have a password created for validating to the master. Will -# read from $HOME/.kubernetres_auth if available. +# read from kubeconfig current-context if available. # # Vars set: # KUBE_USER # KUBE_PASSWORD function get-password { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return - fi + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then KUBE_USER=admin KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Remove this code, since in all use cases I can see, we are overwriting this - # at cluster creation time. - cat << EOF > "$file" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD" -} -EOF - chmod 0600 "$file" + fi } # Generate authentication token for admin user. Will @@ -432,32 +420,22 @@ function kube-up { printf "\n" echo "Kubernetes cluster created." - local kube_cert=".kubecfg.crt" - local kube_key=".kubecfg.key" - local ca_cert=".kubernetes.ca.crt" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="azure_${INSTANCE_PREFIX}" # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's # config file. Distribute the same way the htpasswd is done. (umask 077 ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ - sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null + sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ - sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null + sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null ssh -oStrictHostKeyChecking=no -i $AZ_SSH_KEY -p 22000 $AZ_CS.cloudapp.net \ - sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null - cat << EOF > ~/.kubernetes_auth -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "$HOME/$ca_cert", - "CertFile": "$HOME/$kube_cert", - "KeyFile": "$HOME/$kube_key" -} -EOF - - chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ - "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + create-kubeconfig ) # Wait for salt on the minions @@ -482,7 +460,7 @@ EOF echo echo " https://${KUBE_MASTER_IP}" echo - echo "The user name and password to use is located in ~/.kubernetes_auth." + echo "The user name and password to use is located in ${KUBECONFIG}." echo } diff --git a/cluster/common.sh b/cluster/common.sh index 9943619bda3..0f25b2bbb3c 100644 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -22,33 +22,54 @@ set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +DEFAULT_KUBECONFIG="${HOME}/.kube/config" + # Generate kubeconfig data for the created cluster. # Assumed vars: # KUBE_USER # KUBE_PASSWORD # KUBE_MASTER_IP # KUBECONFIG +# CONTEXT # +# The following can be omitted for --insecure-skip-tls-verify # KUBE_CERT # KUBE_KEY # CA_CERT -# CONTEXT function create-kubeconfig() { local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" + export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} # KUBECONFIG determines the file we write to, but it may not exist yet if [[ ! -e "${KUBECONFIG}" ]]; then mkdir -p $(dirname "${KUBECONFIG}") touch "${KUBECONFIG}" fi - "${kubectl}" config set-cluster "${CONTEXT}" --server="https://${KUBE_MASTER_IP}" \ - --certificate-authority="${CA_CERT}" \ - --embed-certs=true - "${kubectl}" config set-credentials "${CONTEXT}" --username="${KUBE_USER}" \ - --password="${KUBE_PASSWORD}" \ - --client-certificate="${KUBE_CERT}" \ - --client-key="${KUBE_KEY}" \ - --embed-certs=true + local cluster_args=( + "--server=${KUBE_SERVER:-https://${KUBE_MASTER_IP}}" + ) + if [[ -z "${CA_CERT:-}" ]]; then + cluster_args+=("--insecure-skip-tls-verify=true") + else + cluster_args+=( + "--certificate-authority=${CA_CERT}" + "--embed-certs=true" + ) + fi + local user_args=( + "--username=${KUBE_USER}" + "--password=${KUBE_PASSWORD}" + ) + if [[ ! -z "${KUBE_CERT:-}" && ! -z "${KUBE_KEY:-}" ]]; then + user_args+=( + "--client-certificate=${KUBE_CERT}" + "--client-key=${KUBE_KEY}" + "--embed-certs=true" + ) + fi + + "${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}" + "${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}" "${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}" "${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}" @@ -60,6 +81,7 @@ function create-kubeconfig() { # KUBECONFIG # CONTEXT function clear-kubeconfig() { + export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" "${kubectl}" config unset "clusters.${CONTEXT}" "${kubectl}" config unset "users.${CONTEXT}" @@ -85,6 +107,7 @@ function clear-kubeconfig() { # KUBE_USER,KUBE_PASSWORD will be empty if no current-context is set, or # the current-context user does not exist or contain basicauth entries. function get-kubeconfig-basicauth() { + export KUBECONFIG=${KUBECONFIG:-$DEFAULT_KUBECONFIG} # Templates to safely extract the username,password for the current-context # user. The long chain of 'with' commands avoids indexing nil if any of the # entries ("current-context", "contexts"."current-context", "users", etc) diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index df5c917178f..ee8ac8fe04c 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -63,7 +63,7 @@ function increment_ipv4 { } node_count="${NUM_MINIONS}" -next_node="10.244.0.0" +next_node="${KUBE_GCE_CLUSTER_CLASS_B:-10.244}.0.0" node_subnet_size=24 node_subnet_count=$((2 ** (32-$node_subnet_size))) subnets=() @@ -73,7 +73,7 @@ for ((node_num=0; node_num /etc/init/salt-minion.override + update-rc.d salt-minion disable if service salt-minion status >/dev/null; then echo "salt-minion started in defiance of runlevel policy, aborting startup." >&2 @@ -205,18 +208,21 @@ mount-master-pd() { mkdir -p /mnt/master-pd/srv/kubernetes # Contains the cluster's initial config parameters and auth tokens mkdir -p /mnt/master-pd/srv/salt-overlay - ln -s /mnt/master-pd/var/etcd /var/etcd - ln -s /mnt/master-pd/srv/kubernetes /srv/kubernetes - ln -s /mnt/master-pd/srv/salt-overlay /srv/salt-overlay + + ln -s -f /mnt/master-pd/var/etcd /var/etcd + ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes + ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay # This is a bit of a hack to get around the fact that salt has to run after the # PD and mounted directory are already set up. We can't give ownership of the # directory to etcd until the etcd user and group exist, but they don't exist # until salt runs if we don't create them here. We could alternatively make the # permissions on the directory more permissive, but this seems less bad. - useradd -s /sbin/nologin -d /var/etcd etcd - chown etcd /mnt/master-pd/var/etcd - chgrp etcd /mnt/master-pd/var/etcd + if ! id etcd &>/dev/null; then + useradd -s /sbin/nologin -d /var/etcd etcd + fi + chown -R etcd /mnt/master-pd/var/etcd + chgrp -R etcd /mnt/master-pd/var/etcd } # Create the overlay files for the salt tree. We create these in a separate @@ -282,6 +288,14 @@ function create-salt-auth() { } function download-release() { + # TODO(zmerlynn): We should optimize for the reboot case here, but + # unlike the .debs, we don't have version information in the + # filenames here, nor do the URLs even provide useful information in + # the dev environment case (because they're just a project + # bucket). We should probably push a hash into the kube-env, and + # store it when we download, and then when it's different infer that + # a push occurred (otherwise it's a simple reboot). + echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)" download-or-bust "$SERVER_BINARY_TAR_URL" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 52bbb69156f..6a3811dbd0a 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -390,20 +390,23 @@ function create-node-template { # Robustly try to add metadata on an instance. # $1: The name of the instace. -# $2: The metadata key=value pair to add. +# $2...$n: The metadata key=value pairs to add. function add-instance-metadata { + local -r instance=$1 + shift 1 + local -r kvs=( "$@" ) detect-project local attempt=0 while true; do - if ! gcloud compute instances add-metadata "$1" \ + if ! gcloud compute instances add-metadata "${instance}" \ --project "${PROJECT}" \ --zone "${ZONE}" \ - --metadata "$2"; then + --metadata "${kvs[@]}"; then if (( attempt > 5 )); then - echo -e "${color_red}Failed to add instance metadata in $1 ${color_norm}" + echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" exit 2 fi - echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in $1. Retrying.${color_norm}" + echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}" attempt=$(($attempt+1)) else break @@ -412,21 +415,25 @@ function add-instance-metadata { } # Robustly try to add metadata on an instance, from a file. -# $1: The name of the instace. -# $2: The metadata key=file pair to add. +# $1: The name of the instance. +# $2...$n: The metadata key=file pairs to add. function add-instance-metadata-from-file { + local -r instance=$1 + shift 1 + local -r kvs=( "$@" ) detect-project local attempt=0 while true; do - if ! gcloud compute instances add-metadata "$1" \ + echo "${kvs[@]}" + if ! gcloud compute instances add-metadata "${instance}" \ --project "${PROJECT}" \ --zone "${ZONE}" \ - --metadata-from-file "$2"; then + --metadata-from-file "${kvs[@]}"; then if (( attempt > 5 )); then - echo -e "${color_red}Failed to add instance metadata in $1 ${color_norm}" + echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" exit 2 fi - echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in $1. Retrying.${color_norm}" + echo -e "${color_yellow}Attempt $(($attempt+1)) failed to add metadata in ${instance}. Retrying.${color_norm}" attempt=$(($attempt+1)) else break @@ -584,7 +591,16 @@ function kube-up { # https://github.com/GoogleCloudPlatform/kubernetes/issues/3168 KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) - create-master-instance & + # Reserve the master's IP so that it can later be transferred to another VM + # without disrupting the kubelets. IPs are associated with regions, not zones, + # so extract the region name, which is the same as the zone but with the final + # dash and characters trailing the dash removed. + local REGION=${ZONE%-*} + MASTER_RESERVED_IP=$(gcloud compute addresses create "${MASTER_NAME}-ip" \ + --project "${PROJECT}" \ + --region "${REGION}" -q --format yaml | awk '/^address:/ { print $2 }') + + create-master-instance $MASTER_RESERVED_IP & # Create a single firewall rule for all minions. create-firewall-rule "${MINION_TAG}-all" "${CLUSTER_IP_RANGE}" "${MINION_TAG}" & @@ -647,16 +663,6 @@ function kube-up { detect-master - # Reserve the master's IP so that it can later be transferred to another VM - # without disrupting the kubelets. IPs are associated with regions, not zones, - # so extract the region name, which is the same as the zone but with the final - # dash and characters trailing the dash removed. - local REGION=${ZONE%-*} - gcloud compute addresses create "${MASTER_NAME}-ip" \ - --project "${PROJECT}" \ - --addresses "${KUBE_MASTER_IP}" \ - --region "${REGION}" - echo "Waiting for cluster initialization." echo echo " This will continually check to see if the API for kubernetes is reachable." @@ -673,10 +679,9 @@ function kube-up { echo "Kubernetes cluster created." # TODO use token instead of basic auth - export KUBECONFIG="${HOME}/.kube/.kubeconfig" - export KUBE_CERT="/tmp/kubecfg.crt" - export KUBE_KEY="/tmp/kubecfg.key" - export CA_CERT="/tmp/kubernetes.ca.crt" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}" # TODO: generate ADMIN (and KUBELET) tokens and put those in the master's @@ -832,7 +837,6 @@ function kube-down { --quiet \ "${MASTER_NAME}-ip" || true - export KUBECONFIG="${HOME}/.kube/.kubeconfig" export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}" clear-kubeconfig } @@ -853,8 +857,10 @@ function kube-push { find-release-tars upload-server-tars + echo "Updating master metadata ..." write-master-env - add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" + add-instance-metadata-from-file "${KUBE_MASTER}" "kube-env=${KUBE_TEMP}/master-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" + echo "Pushing to master (log at ${OUTPUT}/kube-push-${KUBE_MASTER}.log) ..." cat ${KUBE_ROOT}/cluster/gce/configure-vm.sh | gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone "${ZONE}" "${KUBE_MASTER}" --command "sudo bash -s -- --push" &> ${OUTPUT}/kube-push-"${KUBE_MASTER}".log @@ -899,7 +905,7 @@ function kube-update-nodes() { echo "Updating node metadata... " write-node-env for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - add-instance-metadata-from-file "${MINION_NAMES[$i]}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" & + add-instance-metadata-from-file "${MINION_NAMES[$i]}" "kube-env=${KUBE_TEMP}/node-kube-env.yaml" "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" & done wait-for-jobs echo "Done" @@ -975,7 +981,7 @@ function restart-kube-proxy { # Restart the kube-apiserver on a node ($1) function restart-apiserver { - ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart" + ssh-to-node "$1" "sudo docker kill `sudo docker ps | grep /kube-apiserver | awk '{print $1}'`" } # Perform preparations required to run e2e tests diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index 771e9403816..f8422386ca8 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -251,7 +251,7 @@ function restart-kube-proxy() { # Restart the kube-proxy on master ($1) function restart-apiserver() { echo "... in restart-kube-apiserver()" >&2 - ssh-to-node "$1" "sudo /etc/init.d/kube-apiserver restart" + ssh-to-node "$1" "sudo docker kill `sudo docker ps | grep /kube-apiserver | awk '{print $1}'`" } # Execute after running tests to perform any required clean-up. This is called diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index 3bd67948b91..ef5504bb1b8 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -1,6 +1,6 @@ # build the hyperkube image. -VERSION=v0.14.2 +VERSION=v0.15.0 all: curl -O http://storage.googleapis.com/kubernetes-release/release/${VERSION}/bin/linux/amd64/hyperkube diff --git a/cluster/kubectl.sh b/cluster/kubectl.sh index 2c146bc9d67..2945c88afa9 100755 --- a/cluster/kubectl.sh +++ b/cluster/kubectl.sh @@ -100,30 +100,12 @@ elif [[ ! -x "${KUBECTL_PATH}" ]]; then fi kubectl="${KUBECTL_PATH:-${kubectl}}" -# While GKE requires the kubectl binary, it's actually called through -# gcloud. But we need to adjust the PATH so gcloud gets the right one. +# GKE stores it's kubeconfig in a separate location. if [[ "$KUBERNETES_PROVIDER" == "gke" ]]; then detect-project &> /dev/null - export PATH=$(get_absolute_dirname $kubectl):$PATH - kubectl="${GCLOUD}" - # GKE runs kubectl through gcloud. config=( - "alpha" - "container" - "kubectl" - "--project=${PROJECT}" - "--zone=${ZONE}" - "--cluster=${CLUSTER_NAME}" - ) -elif [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then - # When we are using vagrant it has hard coded kubeconfig, and do not clobber public endpoints - config=( - "--kubeconfig=$HOME/.kubernetes_vagrant_kubeconfig" - ) -elif [[ "$KUBERNETES_PROVIDER" == "libvirt-coreos" ]]; then - detect-master > /dev/null - config=( - "--server=http://${KUBE_MASTER_IP}:8080" + "--kubeconfig=${HOME}/.config/gcloud/kubernetes/kubeconfig" + "--context=gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}" ) fi diff --git a/cluster/libvirt-coreos/util.sh b/cluster/libvirt-coreos/util.sh index d532232ef0c..af353edd1c8 100644 --- a/cluster/libvirt-coreos/util.sh +++ b/cluster/libvirt-coreos/util.sh @@ -18,7 +18,8 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. readonly ROOT=$(dirname "${BASH_SOURCE}") -source $ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"} +source "$ROOT/${KUBE_CONFIG_FILE:-"config-default.sh"}" +source "$KUBE_ROOT/cluster/common.sh" export LIBVIRT_DEFAULT_URI=qemu:///system @@ -199,6 +200,7 @@ function wait-cluster-readiness { function kube-up { detect-master detect-minions + get-password initialize-pool keep_base_image initialize-network @@ -235,12 +237,9 @@ function kube-up { rm $domain_xml done - export KUBECONFIG="${HOME}/.kube/.kubeconfig" - local kubectl="${KUBE_ROOT}/cluster/kubectl.sh" - - "${kubectl}" config set-cluster libvirt-coreos --server=http://${KUBE_MASTER_IP-}:8080 - "${kubectl}" config set-context libvirt-coreos --cluster=libvirt-coreos - "${kubectl}" config use-context libvirt-coreos --cluster=libvirt-coreos + export KUBE_SERVER="http://192.168.10.1:8080" + export CONTEXT="libvirt-coreos" + create-kubeconfig wait-cluster-readiness @@ -331,8 +330,8 @@ function test-teardown { # Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider function get-password { - export KUBE_USER=core - echo "TODO get-password" + export KUBE_USER='' + export KUBE_PASSWORD='' } # SSH to a node by name or IP ($1) and run a command ($2). diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index 72a9aab0e12..771bae4d245 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -20,6 +20,7 @@ # config-default.sh. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"} +source "${KUBE_ROOT}/cluster/common.sh" verify-prereqs() { # Make sure that prerequisites are installed. @@ -50,29 +51,17 @@ verify-prereqs() { } # Ensure that we have a password created for validating to the master. Will -# read from $HOME/.kubernetres_auth if available. +# read from kubeconfig current-context if available. # # Vars set: # KUBE_USER # KUBE_PASSWORD get-password() { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') fi - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Store password for reuse. - cat << EOF > "$file" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD" -} -EOF - chmod 0600 "$file" } rax-ssh-key() { @@ -329,6 +318,13 @@ kube-up() { echo "Kubernetes cluster created." + export KUBE_CERT="" + export KUBE_KEY="" + export CA_CERT="" + export CONTEXT="rackspace_${INSTANCE_PREFIX}" + + create-kubeconfig + # Don't bail on errors, we want to be able to print some info. set +e diff --git a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh index 08af6f503d2..26f6e3d6c7d 100755 --- a/cluster/saltbase/salt/generate-cert/make-ca-cert.sh +++ b/cluster/saltbase/salt/generate-cert/make-ca-cert.sh @@ -36,7 +36,7 @@ if [ "$cert_ip" == "_use_aws_external_ip_" ]; then fi if [ "$cert_ip" == "_use_azure_dns_name_" ]; then - cert_ip=$(hostname -f | awk -F. '{ print $2 }').cloudapp.net + cert_ip=$(uname -n | awk -F. '{ print $2 }').cloudapp.net use_cn=true fi diff --git a/cluster/saltbase/salt/kube-addons/default b/cluster/saltbase/salt/kube-addons/default new file mode 100644 index 00000000000..65bc90a4af8 --- /dev/null +++ b/cluster/saltbase/salt/kube-addons/default @@ -0,0 +1,14 @@ +#TODO(erictune): once we make DNS a hard requirement for clusters, then this can be removed, +# and APISERVER_URL="https://kubernetes:443" +{% if grains.api_servers is defined -%} + {% set api_server = "https://" + grains.api_servers + ":6443" -%} +{% elif grains.apiservers is defined -%} # TODO(remove after 0.16.0): Deprecated form + {% set api_server = "https://" + grains.apiservers + ":6443" -%} +{% elif grains['roles'][0] == 'kubernetes-master' -%} + {% set master_ipv4 = salt['grains.get']('fqdn_ip4')[0] -%} + {% set api_server = "https://" + master_ipv4 + ":6443" -%} +{% else -%} + {% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%} + {% set api_server = "https://" + ips[0][0] + ":6443" -%} +{% endif -%} +export APISERVER_URL={{ api_server }} diff --git a/cluster/saltbase/salt/kube-addons/init.sls b/cluster/saltbase/salt/kube-addons/init.sls index a38adb62556..069574c592e 100644 --- a/cluster/saltbase/salt/kube-addons/init.sls +++ b/cluster/saltbase/salt/kube-addons/init.sls @@ -48,6 +48,20 @@ - makedirs: True {% endif %} +{% if grains['os_family'] == 'RedHat' %} +{% set environment_file = '/etc/sysconfig/kube-addons' %} +{% else %} +{% set environment_file = '/etc/default/kube-addons' %} +{% endif %} + +{{ environment_file }}: + file.managed: + - source: salt://kube-addons/default + - template: jinja + - user: root + - group: root + - mode: 644 + /etc/kubernetes/kube-addons.sh: file.managed: - source: salt://kube-addons/kube-addons.sh diff --git a/cluster/saltbase/salt/kube-addons/initd b/cluster/saltbase/salt/kube-addons/initd index 6b06e8c7cd1..2c60e5bd291 100644 --- a/cluster/saltbase/salt/kube-addons/initd +++ b/cluster/saltbase/salt/kube-addons/initd @@ -21,6 +21,9 @@ PIDFILE=/var/run/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME KUBE_ADDONS_SH=/etc/kubernetes/kube-addons.sh +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + # Define LSB log_* functions. # Depend on lsb-base (>= 3.2-14) to ensure that this file is present # and status_of_proc is working. diff --git a/cluster/saltbase/salt/kube-addons/kube-addons.service b/cluster/saltbase/salt/kube-addons/kube-addons.service index 086394e857a..f9be6db3168 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addons.service +++ b/cluster/saltbase/salt/kube-addons/kube-addons.service @@ -3,6 +3,7 @@ Description=Kubernetes Addon Object Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes [Service] +EnvironmentFile=/etc/sysconfig/kube-addons ExecStart=/etc/kubernetes/kube-addons.sh [Install] diff --git a/cluster/saltbase/salt/kube-addons/kube-addons.sh b/cluster/saltbase/salt/kube-addons/kube-addons.sh index 016f8af44b6..12cd629f64a 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addons.sh +++ b/cluster/saltbase/salt/kube-addons/kube-addons.sh @@ -19,23 +19,47 @@ # managed result is of that. Start everything below that directory. KUBECTL=/usr/local/bin/kubectl -function create-kubernetesauth-secret() { +if [ -z "$APISERVER_URL" ] ; then + echo "Must set APISERVER_URL" + exit 1 +fi + +function create-kubeconfig-secret() { local -r token=$1 local -r username=$2 local -r safe_username=$(tr -s ':_' '--' <<< "${username}") - # Make secret with a kubernetes_auth file with a token. + # Make a kubeconfig file with the token. # TODO(etune): put apiserver certs into secret too, and reference from authfile, # so that "Insecure" is not needed. - kafile=$(echo "{\"BearerToken\": \"${token}\", \"Insecure\": true }" | base64 -w0) - read -r -d '' secretjson <= 3.2-14) to ensure that this file is present -# and status_of_proc is working. -. /lib/lsb/init-functions - -# -# Function that starts the daemon/service -# -do_start() -{ - # Return - # 0 if daemon has been started - # 1 if daemon was already running - # 2 if daemon could not be started - start-stop-daemon --start --quiet --background --no-close \ - --make-pidfile --pidfile $PIDFILE \ - --exec $DAEMON -c $DAEMON_USER --test > /dev/null \ - || return 1 - start-stop-daemon --start --quiet --background --no-close \ - --make-pidfile --pidfile $PIDFILE \ - --exec $DAEMON -c $DAEMON_USER -- \ - $DAEMON_ARGS >> $DAEMON_LOG_FILE 2>&1 \ - || return 2 -} - -# -# Function that stops the daemon/service -# -do_stop() -{ - # Return - # 0 if daemon has been stopped - # 1 if daemon was already stopped - # 2 if daemon could not be stopped - # other if a failure occurred - start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --exec $DAEMON - RETVAL="$?" - [ "$RETVAL" = 2 ] && return 2 - # Many daemons don't delete their pidfiles when they exit. - rm -f $PIDFILE - return "$RETVAL" -} - - -case "$1" in - start) - log_daemon_msg "Starting $DESC" "$NAME" - do_start - case "$?" in - 0|1) log_end_msg 0 || exit 0 ;; - 2) log_end_msg 1 || exit 1 ;; - esac - ;; - stop) - log_daemon_msg "Stopping $DESC" "$NAME" - do_stop - case "$?" in - 0|1) log_end_msg 0 ;; - 2) exit 1 ;; - esac - ;; - status) - status_of_proc -p $PIDFILE "$DAEMON" "$NAME" && exit 0 || exit $? - ;; - - restart|force-reload) - log_daemon_msg "Restarting $DESC" "$NAME" - do_stop - case "$?" in - 0|1) - do_start - case "$?" in - 0) log_end_msg 0 ;; - 1) log_end_msg 1 ;; # Old process is still running - *) log_end_msg 1 ;; # Failed to start - esac - ;; - *) - # Failed to stop - log_end_msg 1 - ;; - esac - ;; - *) - echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2 - exit 3 - ;; -esac diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest new file mode 100644 index 00000000000..ff22da0b156 --- /dev/null +++ b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest @@ -0,0 +1,140 @@ +{% set machines = ""-%} +{% set cluster_name = "" -%} +{% set minion_regexp = "--minion_regexp=.*" -%} +{% set sync_nodes = "--sync_nodes=true" -%} + +{% if pillar['node_instance_prefix'] is defined -%} + {% set minion_regexp = "--minion_regexp='" + pillar['node_instance_prefix'] + ".*'" -%} +{% endif -%} +{% if pillar['instance_prefix'] is defined -%} + {% set cluster_name = "--cluster_name=" + pillar['instance_prefix'] -%} +{% endif -%} + +{% set cloud_provider = "" -%} +{% set cloud_config = "" -%} + +{% if grains.cloud is defined -%} +{% set cloud_provider = "--cloud_provider=" + grains.cloud -%} + +{% if grains.cloud == 'gce' -%} + {% if grains.cloud_config is defined -%} + {% set cloud_config = "--cloud_config=" + grains.cloud_config -%} + {% endif -%} + +{% elif grains.cloud == 'aws' -%} + {% if grains.cloud_config is defined -%} + {% set cloud_config = "--cloud_config=" + grains.cloud_config -%} + {% endif -%} + {% set machines = "--machines=" + ','.join(salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').keys()) -%} + +{% elif grains.cloud == 'azure' -%} + {% set machines = "--machines=" + salt['mine.get']('roles:kubernetes-pool', 'grains.items', expr_form='grain').values()|join(',', attribute='hostnamef') -%} + +{% elif grains.cloud == 'vsphere' -%} + # Collect IPs of minions as machines list. + {% set machines= "" -%} + {% for addrs in salt['mine.get']('roles:kubernetes-pool', 'network.ip_addrs', expr_form='grain').values() -%} + {% if loop.first -%} + machines="--machines="; + {% endif -%} + {% set machines = machines + addrs[0] %} + {% if not loop.last -%} + {% set machines = machines + "," %} + {% endif -%} + {% endfor -%} + {% set minion_regexp = "" -%} + +{% endif -%} +{% endif -%} + +{ +"apiVersion": "v1beta3", +"kind": "Pod", +"metadata": {"name":"kube-controller-manager"}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "kube-controller-manager", + "image": "gcr.io/google_containers/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}", + "command": [ + "/kube-controller-manager", + "--master=127.0.0.1:8080", + "{{machines}}", + "{{cluster_name}}", + "{{minion_regexp}}", + "{{cloud_provider}}", + "{{sync_nodes}}", + "{{cloud_config}}", + "{{pillar['log_level']}}" + ], + "volumeMounts": [ + { "name": "srvkube", + "mountPath": "/srv/kubernetes", + "readOnly": true}, + { "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true}, + { "name": "usrsharessl", + "mountPath": "/usr/share/ssl", + "readOnly": true}, + { "name": "varssl", + "mountPath": "/var/ssl", + "readOnly": true}, + { "name": "usrssl", + "mountPath": "/usr/ssl", + "readOnly": true}, + { "name": "usrlibssl", + "mountPath": "/usr/lib/ssl", + "readOnly": true}, + { "name": "usrlocalopenssl", + "mountPath": "/usr/local/openssl", + "readOnly": true}, + { "name": "etcopenssl", + "mountPath": "/etc/openssl", + "readOnly": true}, + { "name": "etcpkitls", + "mountPath": "/etc/pki/tls", + "readOnly": true} + ] + } +], +"volumes":[ + { "name": "srvkube", + "hostPath": { + "path": "/srv/kubernetes"} + }, + { "name": "etcssl", + "hostPath": { + "path": "/etc/ssl"} + }, + { "name": "usrsharessl", + "hostPath": { + "path": "/usr/share/ssl"} + }, + { "name": "varssl", + "hostPath": { + "path": "/var/ssl"} + }, + { "name": "usrssl", + "hostPath": { + "path": "/usr/ssl"} + }, + { "name": "usrlibssl", + "hostPath": { + "path": "/usr/lib/ssl"} + }, + { "name": "usrlocalopenssl", + "hostPath": { + "path": "/usr/local/openssl"} + }, + { "name": "etcopenssl", + "hostPath": { + "path": "/etc/openssl"} + }, + { "name": "etcpkitls", + "hostPath": { + "path": "/etc/pki/tls"} + } +] +}} diff --git a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.service b/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.service deleted file mode 100644 index fb8ab9a8291..00000000000 --- a/cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Kubernetes Controller Manager -Documentation=https://github.com/GoogleCloudPlatform/kubernetes - -[Service] -EnvironmentFile=-/etc/sysconfig/kube-controller-manager -ExecStart=/usr/local/bin/kube-controller-manager "$DAEMON_ARGS" -Restart=on-failure - -[Install] -WantedBy=multi-user.target diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 2b3f868494f..dc22a04bfe0 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -17,8 +17,8 @@ {% set config = "--config=/etc/kubernetes/manifests" -%} {% set hostname_override = "" -%} -{% if grains.minion_ip is defined -%} - {% set hostname_override = " --hostname_override=" + grains.minion_ip -%} +{% if grains.hostname_override is defined -%} + {% set hostname_override = " --hostname_override=" + grains.hostname_override -%} {% endif -%} {% set cluster_dns = "" %} diff --git a/cluster/saltbase/salt/monit/kube-apiserver b/cluster/saltbase/salt/monit/kube-apiserver deleted file mode 100644 index 2256a38c6b7..00000000000 --- a/cluster/saltbase/salt/monit/kube-apiserver +++ /dev/null @@ -1,10 +0,0 @@ -check process kube-apiserver with pidfile /var/run/kube-apiserver.pid -group kube-apiserver -start program = "/etc/init.d/kube-apiserver start" -stop program = "/etc/init.d/kube-apiserver stop" -if failed - host 127.0.0.1 - port 8080 - protocol http - request "/index.html" -then restart diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index bb5b893c18f..d77c059ef2c 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -22,6 +22,7 @@ export NUM_MINIONS # The IP of the master export MASTER_IP="10.245.1.2" +export KUBE_MASTER_IP="10.245.1.2" export INSTANCE_PREFIX="kubernetes" export MASTER_NAME="${INSTANCE_PREFIX}-master" diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-minion.sh index 618c79d5ce6..41d3679f320 100755 --- a/cluster/vagrant/provision-minion.sh +++ b/cluster/vagrant/provision-minion.sh @@ -72,7 +72,7 @@ grains: roles: - kubernetes-pool cbr-cidr: '$(echo "$CONTAINER_SUBNET" | sed -e "s/'/''/g")' - minion_ip: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' + hostname_override: '$(echo "$MINION_IP" | sed -e "s/'/''/g")' EOF # we will run provision to update code each time we test, so we do not want to do salt install each time diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index db91a13d88f..33340c93d07 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -18,6 +18,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" function detect-master () { KUBE_MASTER_IP=$MASTER_IP @@ -33,13 +34,62 @@ function detect-minions { # Verify prereqs on host machine Also sets exports USING_KUBE_SCRIPTS=true so # that our Vagrantfile doesn't error out. function verify-prereqs { - for x in vagrant VBoxManage; do + for x in vagrant; do if ! which "$x" >/dev/null; then echo "Can't find $x in PATH, please fix and retry." exit 1 fi done + local vagrant_plugins=$(vagrant plugin list | sed '-es% .*$%%' '-es% *% %g' | tr ' ' $'\n') + local providers=( + # Format is: + # provider_ctl_executable vagrant_provider_name vagrant_provider_plugin_re + # either provider_ctl_executable or vagrant_provider_plugin_re can + # be blank (i.e., '') if none is needed by Vagrant (see, e.g., + # virtualbox entry) + vmrun vmware_fusion vagrant-vmware-fusion + vmrun vmware_workstation vagrant-vmware-workstation + prlctl parallels vagrant-parallels + VBoxManage virtualbox '' + ) + local provider_found='' + local provider_bin + local provider_name + local provider_plugin_re + + while [ "${#providers[@]}" -gt 0 ]; do + provider_bin=${providers[0]} + provider_name=${providers[1]} + provider_plugin_re=${providers[2]} + providers=("${providers[@]:3}") + + # If the provider is explicitly set, look only for that provider + if [ -n "${VAGRANT_DEFAULT_PROVIDER:-}" ] \ + && [ "${VAGRANT_DEFAULT_PROVIDER}" != "${provider_name}" ]; then + continue + fi + + if ([ -z "${provider_bin}" ] \ + || which "${provider_bin}" >/dev/null 2>&1) \ + && ([ -z "${provider_plugin_re}" ] \ + || [ -n "$(echo "${vagrant_plugins}" | grep -E "^${provider_plugin_re}$")" ]); then + provider_found="${provider_name}" + # Stop after finding the first viable provider + break + fi + done + + if [ -z "${provider_found}" ]; then + if [ -n "${VAGRANT_DEFAULT_PROVIDER}" ]; then + echo "Can't find the necessary components for the ${VAGRANT_DEFAULT_PROVIDER} vagrant provider, please fix and retry." + else + echo "Can't find the necessary components for any viable vagrant providers (e.g., virtualbox), please fix and retry." + fi + + exit 1 + fi + # Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no # matter what directory the tools are called from. export VAGRANT_CWD="${KUBE_ROOT}" @@ -89,6 +139,7 @@ function create-provision-scripts { echo "DNS_REPLICAS='${DNS_REPLICAS:-}'" echo "RUNTIME_CONFIG='${RUNTIME_CONFIG:-}'" echo "ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" + echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-master.sh" grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh" ) > "${KUBE_TEMP}/master-start.sh" @@ -109,6 +160,7 @@ function create-provision-scripts { echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" echo "CONTAINER_SUBNET='${CONTAINER_SUBNET}'" echo "DOCKER_OPTS='${EXTRA_DOCKER_OPTS-}'" + echo "VAGRANT_DEFAULT_PROVIDER='${VAGRANT_DEFAULT_PROVIDER:-}'" grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh" grep -v "^#" "${KUBE_ROOT}/cluster/vagrant/provision-network.sh" ) > "${KUBE_TEMP}/minion-start-${i}.sh" @@ -116,6 +168,9 @@ function create-provision-scripts { } function verify-cluster { + # TODO: How does the user know the difference between "tak[ing] some + # time" and "loop[ing] forever"? Can we give more specific feedback on + # whether "an error" has occurred? echo "Each machine instance has been created/updated." echo " Now waiting for the Salt provisioning process to complete on each machine." echo " This can take some time based on your network, disk, and cpu speed." @@ -124,7 +179,7 @@ function verify-cluster { # verify master has all required daemons echo "Validating master" local machine="master" - local -a required_daemon=("salt-master" "salt-minion" "nginx" "kube-controller-manager" "kubelet") + local -a required_daemon=("salt-master" "salt-minion" "nginx" "kubelet") local validated="1" until [[ "$validated" == "0" ]]; do validated="0" @@ -198,49 +253,18 @@ function kube-up { vagrant up - local kube_cert=".kubecfg.vagrant.crt" - local kube_key=".kubecfg.vagrant.key" - local ca_cert=".kubernetes.vagrant.ca.crt" + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="vagrant" - (umask 077 - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null - vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + ( + umask 077 + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null - cat <"${HOME}/.kubernetes_vagrant_auth" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "$HOME/$ca_cert", - "CertFile": "$HOME/$kube_cert", - "KeyFile": "$HOME/$kube_key" -} -EOF - - cat <"${HOME}/.kubernetes_vagrant_kubeconfig" -apiVersion: v1 -clusters: -- cluster: - certificate-authority: ${HOME}/$ca_cert - server: https://${MASTER_IP}:443 - name: vagrant -contexts: -- context: - cluster: vagrant - namespace: default - user: vagrant - name: vagrant -current-context: "vagrant" -kind: Config -preferences: {} -users: -- name: vagrant - user: - auth-path: ${HOME}/.kubernetes_vagrant_auth -EOF - - chmod 0600 ~/.kubernetes_vagrant_auth "${HOME}/${kube_cert}" \ - "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + create-kubeconfig ) verify-cluster diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index ab29e168282..c4b37da848c 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -14,11 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Bring up a Kubernetes cluster. -# -# If the full release name (gs:///) is passed in then we take -# that directly. If not then we assume we are doing development stuff and take -# the defaults in the release config. +# Validates that the cluster is healthy. set -o errexit set -o nounset @@ -28,12 +24,9 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. source "${KUBE_ROOT}/cluster/kube-env.sh" source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" -get-password -detect-master > /dev/null -detect-minions > /dev/null - MINIONS_FILE=/tmp/minions-$$ trap 'rm -rf "${MINIONS_FILE}"' EXIT + # Make several attempts to deal with slow cluster birth. attempt=0 while true; do @@ -54,62 +47,39 @@ done echo "Found ${found} nodes." cat -n "${MINIONS_FILE}" -# On vSphere, use minion IPs as their names -if [[ "${KUBERNETES_PROVIDER}" == "vsphere" || "${KUBERNETES_PROVIDER}" == "vagrant" || "${KUBERNETES_PROVIDER}" == "libvirt-coreos" || "${KUBERNETES_PROVIDER}" == "juju" ]] ; then - MINION_NAMES=("${KUBE_MINION_IP_ADDRESSES[@]}") -fi +attempt=0 +while true; do + kubectl_output=$("${KUBE_ROOT}/cluster/kubectl.sh" get cs) -# On AWS we can't really name the minions, so just trust that if the number is right, the right names are there. -if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then - MINION_NAMES=("$(cat ${MINIONS_FILE})") - # /healthz validation isn't working for some reason on AWS. So just hope for the best. - # TODO: figure out why and fix, it must be working in some form, or else clusters wouldn't work. - echo "Kubelet health checking on AWS isn't currently supported, assuming everything is good..." - echo -e "${color_green}Cluster validation succeeded${color_norm}" - exit 0 -fi + # The "kubectl componentstatuses" output is four columns like this: + # + # COMPONENT HEALTH MSG ERR + # controller-manager Healthy ok nil + # + # Parse the output to capture the value of the second column("HEALTH"), then use grep to + # count the number of times it doesn't match "success". + # Because of the header, the actual unsuccessful count is 1 minus the count. -for (( i=0; i<${#MINION_NAMES[@]}; i++)); do - # Grep returns an exit status of 1 when line is not found, so we need the : to always return a 0 exit status - count=$(grep -c "${MINION_NAMES[$i]}" "${MINIONS_FILE}") || : - if [[ "${count}" == "0" ]]; then - echo -e "${color_red}Failed to find ${MINION_NAMES[$i]}, cluster is probably broken.${color_norm}" - cat -n "${MINIONS_FILE}" - exit 1 - fi + non_success_count=$(echo "${kubectl_output}" | \ + sed -n 's/^\([[:alnum:][:punct:]]\+\)\s\+\([[:alnum:][:punct:]]\+\)\s\+.*/\2/p' | \ + grep 'Healthy' --invert-match -c) - name="${MINION_NAMES[$i]}" - if [[ "$KUBERNETES_PROVIDER" != "vsphere" && "$KUBERNETES_PROVIDER" != "vagrant" && "$KUBERNETES_PROVIDER" != "libvirt-coreos" && "$KUBERNETES_PROVIDER" != "juju" ]]; then - # Grab fully qualified name - name=$(grep "${MINION_NAMES[$i]}\." "${MINIONS_FILE}") - fi - - # Make sure the kubelet is healthy. - # Make several attempts to deal with slow cluster birth. - attempt=0 - while true; do - echo -n "Attempt $((attempt+1)) at checking Kubelet installation on node ${MINION_NAMES[$i]} ..." - if [[ "$KUBERNETES_PROVIDER" != "libvirt-coreos" && "$KUBERNETES_PROVIDER" != "juju" ]]; then - curl_output=$(curl -s --insecure --user "${KUBE_USER}:${KUBE_PASSWORD}" \ - "https://${KUBE_MASTER_IP}/api/v1beta1/proxy/minions/${name}/healthz") - else - curl_output=$(curl -s \ - "http://${KUBE_MASTER_IP}:8080/api/v1beta1/proxy/minions/${name}/healthz") - fi - if [[ "${curl_output}" != "ok" ]]; then - if (( attempt > 5 )); then - echo - echo -e "${color_red}Kubelet failed to install on node ${MINION_NAMES[$i]}. Your cluster is unlikely to work correctly." - echo -e "Please run ./cluster/kube-down.sh and re-create the cluster. (sorry!)${color_norm}" - exit 1 - fi - else - echo -e " ${color_green}[working]${color_norm}" - break - fi - echo -e " ${color_yellow}[not working yet]${color_norm}" + if ((non_success_count > 1)); then + if ((attempt < 5)); then + echo -e "${color_yellow}Cluster not working yet.${color_norm}" attempt=$((attempt+1)) sleep 30 - done + else + echo -e " ${color_yellow}Validate output:${color_norm}" + echo "${kubectl_output}" + echo -e "${color_red}Validation returned one or more failed components. Cluster is probably broken.${color_norm}" + exit 1 + fi + else + break + fi done + +echo "Validate output:" +echo "${kubectl_output}" echo -e "${color_green}Cluster validation succeeded${color_norm}" diff --git a/cluster/vsphere/templates/salt-minion.sh b/cluster/vsphere/templates/salt-minion.sh index 7741ea5158a..5324362c917 100755 --- a/cluster/vsphere/templates/salt-minion.sh +++ b/cluster/vsphere/templates/salt-minion.sh @@ -37,7 +37,7 @@ echo "master: $KUBE_MASTER" > /etc/salt/minion.d/master.conf # cat </etc/salt/minion.d/grains.conf grains: - minion_ip: $(ip route get 1.1.1.1 | awk '{print $7}') + hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}') roles: - kubernetes-pool - kubernetes-pool-vsphere diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index c866963cd8a..90fbf928d52 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -21,6 +21,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/vsphere/config-common.sh" source "${KUBE_ROOT}/cluster/vsphere/${KUBE_CONFIG_FILE-"config-default.sh"}" +source "${KUBE_ROOT}/cluster/common.sh" # Detect the IP for the master # @@ -169,29 +170,17 @@ function upload-server-tars { } # Ensure that we have a password created for validating to the master. Will -# read from $HOME/.kubernetes_auth if available. +# read from kubeconfig if available. # # Vars set: # KUBE_USER # KUBE_PASSWORD function get-password { - local file="$HOME/.kubernetes_auth" - if [[ -r "$file" ]]; then - KUBE_USER=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["User"]') - KUBE_PASSWORD=$(cat "$file" | python -c 'import json,sys;print json.load(sys.stdin)["Password"]') - return + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') fi - KUBE_USER=admin - KUBE_PASSWORD=$(python -c 'import string,random; print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') - - # Store password for reuse. - cat << EOF > "$file" -{ - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD" -} -EOF - chmod 0600 "$file" } # Run command over ssh @@ -372,6 +361,24 @@ function kube-up { printf " OK\n" done + echo "Kubernetes cluster created." + + # TODO use token instead of basic auth + export KUBE_CERT="/tmp/$RANDOM-kubecfg.crt" + export KUBE_KEY="/tmp/$RANDOM-kubecfg.key" + export CA_CERT="/tmp/$RANDOM-kubernetes.ca.crt" + export CONTEXT="vsphere_${INSTANCE_PREFIX}" + + ( + umask 077 + + kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${KUBE_CERT}" 2>/dev/null + kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${KUBE_KEY}" 2>/dev/null + kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${CA_CERT}" 2>/dev/null + + create-kubeconfig + ) + echo echo "Sanity checking cluster..." @@ -394,33 +401,8 @@ function kube-up { echo echo " https://${KUBE_MASTER_IP}" echo - echo "The user name and password to use is located in ~/.kubernetes_auth." + echo "The user name and password to use is located in ${KUBECONFIG}" echo - - local kube_cert=".kubecfg.crt" - local kube_key=".kubecfg.key" - local ca_cert=".kubernetes.ca.crt" - - ( - umask 077 - - kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null - kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null - kube-ssh "${KUBE_MASTER_IP}" sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null - - cat << EOF > ~/.kubernetes_auth - { - "User": "$KUBE_USER", - "Password": "$KUBE_PASSWORD", - "CAFile": "$HOME/$ca_cert", - "CertFile": "$HOME/$kube_cert", - "KeyFile": "$HOME/$kube_key" - } -EOF - - chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ - "${HOME}/${kube_key}" "${HOME}/${ca_cert}" - ) } # Delete a kubernetes cluster diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index c5976cfde9e..676deee188f 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -208,7 +208,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st endpoints := service.NewEndpointController(cl) // ensure the service endpoints are sync'd several times within the window that the integration tests wait - go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*4) + go endpoints.Run(3, util.NeverStop) controllerManager := replicationControllerPkg.NewReplicationManager(cl) @@ -285,7 +285,7 @@ func endpointsSet(c *client.Client, serviceNamespace, serviceID string, endpoint return func() (bool, error) { endpoints, err := c.Endpoints(serviceNamespace).Get(serviceID) if err != nil { - glog.Infof("Error on creating endpoints: %v", err) + glog.Infof("Error getting endpoints: %v", err) return false, nil } count := 0 diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 9f18878a6ba..afdd53fe1cf 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -24,6 +24,7 @@ import ( "net" "net/http" "os" + "path" "regexp" "strconv" "strings" @@ -56,6 +57,7 @@ type APIServer struct { APIBurst int TLSCertFile string TLSPrivateKeyFile string + CertDirectory string APIPrefix string StorageVersion string CloudProvider string @@ -99,6 +101,7 @@ func NewAPIServer() *APIServer { EnableLogsSupport: true, MasterServiceNamespace: api.NamespaceDefault, ClusterName: "kubernetes", + CertDirectory: "/var/run/kubernetes", RuntimeConfig: make(util.ConfigurationMap), KubeletConfig: client.KubeletConfig{ @@ -143,6 +146,8 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { "If HTTPS serving is enabled, and --tls_cert_file and --tls_private_key_file are not provided, "+ "a self-signed certificate and key are generated for the public address and saved to /var/run/kubernetes.") fs.StringVar(&s.TLSPrivateKeyFile, "tls_private_key_file", s.TLSPrivateKeyFile, "File containing x509 private key matching --tls_cert_file.") + fs.StringVar(&s.CertDirectory, "cert_dir", s.CertDirectory, "The directory where the TLS certs are located (by default /var/run/kubernetes). "+ + "If --tls_cert_file and --tls_private_key_file are provided, this flag will be ignored.") fs.StringVar(&s.APIPrefix, "api_prefix", s.APIPrefix, "The prefix for API requests on the server. Default '/api'.") fs.StringVar(&s.StorageVersion, "storage_version", s.StorageVersion, "The version to store resources with. Defaults to server preferred") fs.StringVar(&s.CloudProvider, "cloud_provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") @@ -368,8 +373,8 @@ func (s *APIServer) Run(_ []string) error { defer util.HandleCrash() for { if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { - s.TLSCertFile = "/var/run/kubernetes/apiserver.crt" - s.TLSPrivateKeyFile = "/var/run/kubernetes/apiserver.key" + s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") + s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile); err != nil { glog.Errorf("Unable to generate self signed cert: %v", err) } else { diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 060c26239c0..92e2ba8b281 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -29,6 +29,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" + clientcmdapi "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/servicecontroller" @@ -47,9 +49,9 @@ import ( type CMServer struct { Port int Address util.IP - ClientConfig client.Config CloudProvider string CloudConfigFile string + ConcurrentEndpointSyncs int MinionRegexp string NodeSyncPeriod time.Duration ResourceQuotaSyncPeriod time.Duration @@ -72,6 +74,9 @@ type CMServer struct { ClusterName string EnableProfiling bool + + Master string + Kubeconfig string } // NewCMServer creates a new CMServer with a default config. @@ -79,6 +84,7 @@ func NewCMServer() *CMServer { s := CMServer{ Port: ports.ControllerManagerPort, Address: util.IP(net.ParseIP("127.0.0.1")), + ConcurrentEndpointSyncs: 5, NodeSyncPeriod: 10 * time.Second, ResourceQuotaSyncPeriod: 10 * time.Second, NamespaceSyncPeriod: 5 * time.Minute, @@ -96,11 +102,9 @@ func NewCMServer() *CMServer { func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.IntVar(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on") fs.Var(&s.Address, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces)") - s.ClientConfig.QPS = 20.0 - s.ClientConfig.Burst = 30 - client.BindClientConfigFlags(fs, &s.ClientConfig) fs.StringVar(&s.CloudProvider, "cloud_provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") fs.StringVar(&s.CloudConfigFile, "cloud_config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") + fs.IntVar(&s.ConcurrentEndpointSyncs, "concurrent_endpoint_syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load") fs.StringVar(&s.MinionRegexp, "minion_regexp", s.MinionRegexp, "If non empty, and --cloud_provider is specified, a regular expression for matching minion VMs.") fs.DurationVar(&s.NodeSyncPeriod, "node_sync_period", s.NodeSyncPeriod, ""+ "The period for syncing nodes from cloudprovider. Longer periods will result in "+ @@ -130,6 +134,8 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) { fs.Var(resource.NewQuantityFlagValue(&s.NodeMemory), "node_memory", "The amount of memory (in bytes) provisioned on each node") fs.StringVar(&s.ClusterName, "cluster_name", s.ClusterName, "The instance prefix for the cluster") fs.BoolVar(&s.EnableProfiling, "profiling", false, "Enable profiling via web interface host:port/debug/pprof/") + fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") + fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") } func (s *CMServer) verifyMinionFlags() { @@ -151,11 +157,23 @@ func (s *CMServer) verifyMinionFlags() { func (s *CMServer) Run(_ []string) error { s.verifyMinionFlags() - if len(s.ClientConfig.Host) == 0 { - glog.Fatal("usage: controller-manager --master ") + if s.Kubeconfig == "" && s.Master == "" { + glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } - kubeClient, err := client.New(&s.ClientConfig) + // This creates a client, first loading any specified kubeconfig + // file, and then overriding the Master flag, if non-empty. + kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, + &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() + if err != nil { + return err + } + + kubeconfig.QPS = 20.0 + kubeconfig.Burst = 30 + + kubeClient, err := client.New(kubeconfig) if err != nil { glog.Fatalf("Invalid API configuration: %v", err) } @@ -171,7 +189,7 @@ func (s *CMServer) Run(_ []string) error { }() endpoints := service.NewEndpointController(kubeClient) - go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10) + go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop) controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient) controllerManager.Run(replicationControllerPkg.DefaultSyncPeriod) diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 8610486b09b..e497251d375 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -27,6 +27,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" + clientcmdapi "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/proxy" "github.com/GoogleCloudPlatform/kubernetes/pkg/proxy/config" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -40,11 +42,12 @@ import ( // ProxyServer contains configures and runs a Kubernetes proxy server type ProxyServer struct { BindAddress util.IP - ClientConfig client.Config HealthzPort int HealthzBindAddress util.IP OOMScoreAdj int ResourceContainer string + Master string + Kubeconfig string } // NewProxyServer creates a new ProxyServer object with default parameters @@ -61,11 +64,12 @@ func NewProxyServer() *ProxyServer { // AddFlags adds flags for a specific ProxyServer to the specified FlagSet func (s *ProxyServer) AddFlags(fs *pflag.FlagSet) { fs.Var(&s.BindAddress, "bind_address", "The IP address for the proxy server to serve on (set to 0.0.0.0 for all interfaces)") - client.BindClientConfigFlags(fs, &s.ClientConfig) + fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)") fs.IntVar(&s.HealthzPort, "healthz_port", s.HealthzPort, "The port to bind the health check server. Use 0 to disable.") fs.Var(&s.HealthzBindAddress, "healthz_bind_address", "The IP address for the health check server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)") fs.IntVar(&s.OOMScoreAdj, "oom_score_adj", s.OOMScoreAdj, "The oom_score_adj value for kube-proxy process. Values must be within the range [-1000, 1000]") fs.StringVar(&s.ResourceContainer, "resource_container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).") + fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.") } // Run runs the specified ProxyServer. This should never exit. @@ -105,21 +109,32 @@ func (s *ProxyServer) Run(_ []string) error { // are registered yet. // define api config source - if s.ClientConfig.Host != "" { - glog.Infof("Using API calls to get config %v", s.ClientConfig.Host) - client, err := client.New(&s.ClientConfig) - if err != nil { - glog.Fatalf("Invalid API configuration: %v", err) - } - config.NewSourceAPI( - client.Services(api.NamespaceAll), - client.Endpoints(api.NamespaceAll), - 30*time.Second, - serviceConfig.Channel("api"), - endpointsConfig.Channel("api"), - ) + if s.Kubeconfig == "" && s.Master == "" { + glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } + // This creates a client, first loading any specified kubeconfig + // file, and then overriding the Master flag, if non-empty. + kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig}, + &clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig() + if err != nil { + return err + } + + client, err := client.New(kubeconfig) + if err != nil { + glog.Fatalf("Invalid API configuration: %v", err) + } + + config.NewSourceAPI( + client.Services(api.NamespaceAll), + client.Endpoints(api.NamespaceAll), + 30*time.Second, + serviceConfig.Channel("api"), + endpointsConfig.Channel("api"), + ) + if s.HealthzPort > 0 { go util.Forever(func() { err := http.ListenAndServe(s.HealthzBindAddress.String()+":"+strconv.Itoa(s.HealthzPort), nil) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index b7891ec6449..a9c8d15bfbe 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -23,6 +23,7 @@ import ( "math/rand" "net" "net/http" + _ "net/http/pprof" "path" "strconv" "strings" @@ -168,7 +169,8 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { "If --tls_cert_file and --tls_private_key_file are not provided, a self-signed certificate and key "+ "are generated for the public address and saved to the directory passed to --cert_dir.") fs.StringVar(&s.TLSPrivateKeyFile, "tls_private_key_file", s.TLSPrivateKeyFile, "File containing x509 private key matching --tls_cert_file.") - fs.StringVar(&s.CertDirectory, "cert_dir", s.CertDirectory, "The directory where the TLS certs are located (by default /var/run/kubernetes)") + fs.StringVar(&s.CertDirectory, "cert_dir", s.CertDirectory, "The directory where the TLS certs are located (by default /var/run/kubernetes). "+ + "If --tls_cert_file and --tls_private_key_file are provided, this flag will be ignored.") fs.StringVar(&s.HostnameOverride, "hostname_override", s.HostnameOverride, "If non-empty, will use this string as identification instead of the actual hostname.") fs.StringVar(&s.PodInfraContainerImage, "pod_infra_container_image", s.PodInfraContainerImage, "The image whose network/ipc namespaces containers in each pod will use.") fs.StringVar(&s.DockerEndpoint, "docker_endpoint", s.DockerEndpoint, "If non-empty, use this for the docker endpoint to communicate with") diff --git a/cmd/kubernetes/kubernetes.go b/cmd/kubernetes/kubernetes.go index 8ec247d9b30..65cd939b8c2 100644 --- a/cmd/kubernetes/kubernetes.go +++ b/cmd/kubernetes/kubernetes.go @@ -139,7 +139,7 @@ func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU, } endpoints := service.NewEndpointController(cl) - go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10) + go endpoints.Run(5, util.NeverStop) controllerManager := controller.NewReplicationManager(cl) controllerManager.Run(controller.DefaultSyncPeriod) diff --git a/contrib/completions/bash/kubectl b/contrib/completions/bash/kubectl index 238b289c036..5bf6c2c46e6 100644 --- a/contrib/completions/bash/kubectl +++ b/contrib/completions/bash/kubectl @@ -242,6 +242,7 @@ _kubectl_get() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("componentstatus") must_have_one_noun+=("endpoints") must_have_one_noun+=("event") must_have_one_noun+=("limitrange") @@ -272,6 +273,15 @@ _kubectl_describe() must_have_one_flag=() must_have_one_noun=() + must_have_one_noun+=("limitrange") + must_have_one_noun+=("minion") + must_have_one_noun+=("node") + must_have_one_noun+=("persistentvolume") + must_have_one_noun+=("persistentvolumeclaim") + must_have_one_noun+=("pod") + must_have_one_noun+=("replicationcontroller") + must_have_one_noun+=("resourcequota") + must_have_one_noun+=("service") } _kubectl_create() diff --git a/contrib/logging/fluentd-gcp/Makefile b/contrib/logging/fluentd-gcp/Makefile deleted file mode 100644 index 843817cacd7..00000000000 --- a/contrib/logging/fluentd-gcp/Makefile +++ /dev/null @@ -1,22 +0,0 @@ -# Makefile for a synthetic logger to be logged -# by GCP. The cluster must have been created with -# the variable LOGGING_DESTINATION=GCP. - -.PHONY: up down logger-up logger-down get - -KUBECTL=kubectl.sh - -up: logger-up - -down: logger-down - - -logger-up: - -${KUBECTL} create -f synthetic_0_25lps.yml - -logger-down: - -${KUBECTL} delete pods synthetic-logger-0.25lps-pod - -get: - ${KUBECTL} get pods - diff --git a/contrib/logging/fluentd-gcp/synthetic_0_25lps.yml b/contrib/logging/fluentd-gcp/synthetic_0_25lps.yml deleted file mode 100644 index bbd935141b5..00000000000 --- a/contrib/logging/fluentd-gcp/synthetic_0_25lps.yml +++ /dev/null @@ -1,29 +0,0 @@ -# This pod specification creates an instance of a synthetic logger. The logger -# is simply a program that writes out the hostname of the pod, a count which increments -# by one on each iteration (to help notice missing log enteries) and the date using -# a long format (RFC-3339) to nano-second precision. This program logs at a frequency -# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument -# and could have been written out as: -# i="0" -# while true -# do -# echo -n "`hostname`: $i: " -# date --rfc-3339 ns -# sleep 4 -# i=$[$i+1] -# done - -apiVersion: v1beta1 -kind: Pod -id: synthetic-logger-0.25lps-pod -desiredState: - manifest: - version: v1beta1 - id: synth-logger-0.25lps - containers: - - name: synth-lgr - image: ubuntu:14.04 - command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"] -labels: - name: synth-logging-source - \ No newline at end of file diff --git a/contrib/recipes/docs/rollingupdates_from_jenkins.md b/contrib/recipes/docs/rollingupdates_from_jenkins.md deleted file mode 100644 index 310aebb6682..00000000000 --- a/contrib/recipes/docs/rollingupdates_from_jenkins.md +++ /dev/null @@ -1,57 +0,0 @@ -###How To -For our example, Jenkins is set up to have one build step in bash: - -`Jenkins "Bash" build step` -``` - #!/bin/bash - cd $WORKSPACE - source bin/jenkins.sh - source bin/kube-rolling.sh -``` - -Our project's build script (`bin/jenkins.sh`), is followed by our new kube-rolling script. Jenkins already has `$BUILD_NUMBER` set, but we need a few other variables that are set in `jenkins.sh` that we reference in `kube-rolling.sh`: - -``` - DOCKER_IMAGE="path_webteam/public" - REGISTRY_LOCATION="dockerreg.web.local/" -``` - -Jenkins builds our container, tags it with the build number, and runs a couple rudimentary tests on it. On success, it pushes it to our private docker registry. Once the container is pushed, it then executes our rolling update script. - -`kube-rolling.sh` -``` - #!/bin/bash - # KUBERNETES_MASTER: Your Kubernetes API Server endpoint - # BINARY_LOCATION: Location of pre-compiled Binaries (We build our own, there are others available) - # CONTROLLER_NAME: Name of the replicationController you're looking to update - # RESET_INTERVAL: Interval between pod updates - - export KUBERNETES_MASTER="http://10.1.10.1:8080" - BINARY_LOCATION="https://build.web.local/kubernetes/" - CONTROLLER_NAME="public-frontend-controller" - RESET_INTERVAL="10s" - - echo "*** Time to push to Kubernetes!"; - - #Delete then graba kubecfg binary from a static location - rm kubecfg - wget $BINARY_LOCATION/kubecfg - - echo "*** Downloaded binary from $BINARY_LOCATION/kubecfg" - - chmod +x kubecfg - - # Update the controller with your new image! - echo "*** ./kubecfg -image \"$REGISTRY_LOCATION$DOCKER_IMAGE:$BUILD_NUMBER\" -u $RESET_INTERVAL rolling-update $CONTROLLER_NAME" - ./kubecfg -image "$REGISTRY_LOCATION$DOCKER_IMAGE:$BUILD_NUMBER" -u $RESET_INTERVAL rolling-update $CONTROLLER_NAME -``` - -Though basic, this implementation allows our Jenkins instance to push container updates to our Kubernetes cluster without much trouble. - -### Notes -When using a private docker registry as we are, the Jenkins slaves as well as the Kubernetes minions require the [.dockercfg](https://coreos.com/docs/launching-containers/building/customizing-docker/#using-a-dockercfg-file-for-authentication) file in order to function properly. - -### Questions -twitter @jeefy - -irc.freenode.net #kubernetes jeefy diff --git a/docs/availability.md b/docs/availability.md index 1295dccca90..6185a0bddf1 100644 --- a/docs/availability.md +++ b/docs/availability.md @@ -73,7 +73,7 @@ Mitigations: - Action: Multiple independent clusters (and avoid making risky changes to all clusters at once) - Mitigates: Everything listed above. -## Chosing Multiple Kubernetes Clusters +## Choosing Multiple Kubernetes Clusters You may want to set up multiple kubernetes clusters, both to have clusters in different regions to be nearer to your users; and to tolerate failures and/or invasive maintenance. @@ -120,8 +120,7 @@ then you need `R + U` clusters. If it is not (e.g you want to ensure low latenc cluster failure), then you need to have `R * U` clusters (`U` in each of `R` regions). In any case, try to put each cluster in a different zone. Finally, if any of your clusters would need more than the maximum recommended number of nodes for a Kubernetes cluster, then -you may need even more clusters. Our roadmap ( -https://github.com/GoogleCloudPlatform/kubernetes/blob/24e59de06e4da61f5dafd4cd84c9340a2c0d112f/docs/roadmap.md) +you may need even more clusters. Our [roadmap](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/roadmap.md) calls for maximum 100 node clusters at v1.0 and maximum 1000 node clusters in the middle of 2015. ## Working with multiple clusters @@ -129,4 +128,3 @@ calls for maximum 100 node clusters at v1.0 and maximum 1000 node clusters in th When you have multiple clusters, you would typically create services with the same config in each cluster and put each of those service instances behind a load balancer (AWS Elastic Load Balancer, GCE Forwarding Rule or HTTP Load Balancer), so that failures of a single cluster are not visible to end users. - diff --git a/docs/cluster_management.md b/docs/cluster_management.md index b9076877fc1..265f00f362f 100644 --- a/docs/cluster_management.md +++ b/docs/cluster_management.md @@ -46,11 +46,12 @@ If you want more control over the upgrading process, you may use the following w This keeps new pods from landing on the node while you are trying to get them off. 1. Get the pods off the machine, via any of the following strategies: 1. wait for finite-duration pods to complete - 1. for pods with a replication controller, delete the pod with `kubectl delete pods $PODNAME` - 1. for pods which are not replicated, bring up a new copy of the pod, and redirect clients to it. + 1. delete pods with `kubectl delete pods $PODNAME` + 1. for pods with a replication controller, the pod will eventually be replaced by a new pod which will be scheduled to a new node. additionally, if the pod is part of a service, then clients will automatically be redirected to the new pod. + 1. for pods with no replication controller, you need to bring up a new copy of the pod, and assuming it is not part of a service, redirect clients to it. 1. Work on the node 1. Make the node schedulable again: `kubectl update nodes $NODENAME --patch='{"apiVersion": "v1beta1", "unschedulable": false}'`. - Or, if you deleted the VM instance and created a new one, and are using `--sync_nodes=true` on the apiserver - (the default), then a new schedulable node resource will be created automatically when you create a new - VM instance. See [Node](node.md). + If you deleted the node's VM instance and created a new one, then a new schedulable node resource will + be created automatically when you create a new VM instance (if you're using a cloud provider that supports + node discovery; currently this is only GCE, not including CoreOS on GCE using kube-register). See [Node](node.md). diff --git a/docs/design/isolation_between_nodes_and_master.md b/docs/design/isolation_between_nodes_and_master.md deleted file mode 100644 index a91927d8f5b..00000000000 --- a/docs/design/isolation_between_nodes_and_master.md +++ /dev/null @@ -1,48 +0,0 @@ -# Design: Limit direct access to etcd from within Kubernetes - -All nodes have effective access of "root" on the entire Kubernetes cluster today because they have access to etcd, the central data store. The kubelet, the service proxy, and the nodes themselves have a connection to etcd that can be used to read or write any data in the system. In a cluster with many hosts, any container or user that gains the ability to write to the network device that can reach etcd, on any host, also gains that access. - -* The Kubelet and Kube Proxy currently rely on an efficient "wait for changes over HTTP" interface get their current state and avoid missing changes - * This interface is implemented by etcd as the "watch" operation on a given key containing useful data - - -## Options: - -1. Do nothing -2. Introduce an HTTP proxy that limits the ability of nodes to access etcd - 1. Prevent writes of data from the kubelet - 2. Prevent reading data not associated with the client responsibilities - 3. Introduce a security token granting access -3. Introduce an API on the apiserver that returns the data a node Kubelet and Kube Proxy needs - 1. Remove the ability of nodes to access etcd via network configuration - 2. Provide an alternate implementation for the event writing code Kubelet - 3. Implement efficient "watch for changes over HTTP" to offer comparable function with etcd - 4. Ensure that the apiserver can scale at or above the capacity of the etcd system. - 5. Implement authorization scoping for the nodes that limits the data they can view -4. Implement granular access control in etcd - 1. Authenticate HTTP clients with client certificates, tokens, or BASIC auth and authorize them for read only access - 2. Allow read access of certain subpaths based on what the requestor's tokens are - - -## Evaluation: - -Option 1 would be considered unacceptable for deployment in a multi-tenant or security conscious environment. It would be acceptable in a low security deployment where all software is trusted. It would be acceptable in proof of concept environments on a single machine. - -Option 2 would require implementing an http proxy that for 2-1 could block POST/PUT/DELETE requests (and potentially HTTP method tunneling parameters accepted by etcd). 2-2 would be more complicated and would require filtering operations based on deep understanding of the etcd API *and* the underlying schema. It would be possible, but involve extra software. - -Option 3 would involve extending the existing apiserver to return pods associated with a given node over an HTTP "watch for changes" mechanism, which is already implemented. Proper security would involve checking that the caller is authorized to access that data - one imagines a per node token, key, or SSL certificate that could be used to authenticate and then authorize access to only the data belonging to that node. The current event publishing mechanism from the kubelet would also need to be replaced with a secure API endpoint or a change to a polling model. The apiserver would also need to be able to function in a horizontally scalable mode by changing or fixing the "operations" queue to work in a stateless, scalable model. In practice, the amount of traffic even a large Kubernetes deployment would drive towards an apiserver would be tens of requests per second (500 hosts, 1 request per host every minute) which is negligible if well implemented. Implementing this would also decouple the data store schema from the nodes, allowing a different data store technology to be added in the future without affecting existing nodes. This would also expose that data to other consumers for their own purposes (monitoring, implementing service discovery). - -Option 4 would involve extending etcd to [support access control](https://github.com/coreos/etcd/issues/91). Administrators would need to authorize nodes to connect to etcd, and expose network routability directly to etcd. The mechanism for handling this authentication and authorization would be different than the authorization used by Kubernetes controllers and API clients. It would not be possible to completely replace etcd as a data store without also implementing a new Kubelet config endpoint. - - -## Preferred solution: - -Implement the first parts of option 3 - an efficient watch API for the pod, service, and endpoints data for the Kubelet and Kube Proxy. Authorization and authentication are planned in the future - when a solution is available, implement a custom authorization scope that allows API access to be restricted to only the data about a single node or the service endpoint data. - -In general, option 4 is desirable in addition to option 3 as a mechanism to further secure the store to infrastructure components that must access it. - - -## Caveats - -In all four options, compromise of a host will allow an attacker to imitate that host. For attack vectors that are reproducible from inside containers (privilege escalation), an attacker can distribute himself to other hosts by requesting new containers be spun up. In scenario 1, the cluster is totally compromised immediately. In 2-1, the attacker can view all information about the cluster including keys or authorization data defined with pods. In 2-2 and 3, the attacker must still distribute himself in order to get access to a large subset of information, and cannot see other data that is potentially located in etcd like side storage or system configuration. For attack vectors that are not exploits, but instead allow network access to etcd, an attacker in 2ii has no ability to spread his influence, and is instead restricted to the subset of information on the host. For 3-5, they can do nothing they could not do already (request access to the nodes / services endpoint) because the token is not visible to them on the host. - diff --git a/docs/devel/developer-guides/vagrant.md b/docs/devel/developer-guides/vagrant.md index ab0ef274223..baf40b97022 100644 --- a/docs/devel/developer-guides/vagrant.md +++ b/docs/devel/developer-guides/vagrant.md @@ -4,42 +4,54 @@ Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/deve ### Prerequisites 1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html -2. Install latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads +2. Install one of: + 1. The latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads + 2. [VMWare Fusion](https://www.vmware.com/products/fusion/) version 5 or greater as well as the appropriate [Vagrant VMWare Fusion provider](https://www.vagrantup.com/vmware) + 3. [VMWare Workstation](https://www.vmware.com/products/workstation/) version 9 or greater as well as the [Vagrant VMWare Workstation provider](https://www.vagrantup.com/vmware) + 4. [Parallels Desktop](https://www.parallels.com/products/desktop/) version 9 or greater as well as the [Vagrant Parallels provider](https://parallels.github.io/vagrant-parallels/) 3. Get or build a [binary release](../../getting-started-guides/binary_release.md) ### Setup By default, the Vagrant setup will create a single kubernetes-master and 1 kubernetes-minion. Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). To start your local cluster, open a shell and run: -``` +```sh cd kubernetes export KUBERNETES_PROVIDER=vagrant -cluster/kube-up.sh +./cluster/kube-up.sh ``` The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine. +If you installed more than one Vagrant provider, Kubernetes will usually pick the appropriate one. However, you can override which one Kubernetes will use by setting the [`VAGRANT_DEFAULT_PROVIDER`](https://docs.vagrantup.com/v2/providers/default.html) environment variable: + +```sh +export VAGRANT_DEFAULT_PROVIDER=parallels +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + Vagrant will provision each machine in the cluster with all the necessary components to run Kubernetes. The initial setup can take a few minutes to complete on each machine. By default, each VM in the cluster is running Fedora, and all of the Kubernetes services are installed into systemd. To access the master or any minion: -``` +```sh vagrant ssh master vagrant ssh minion-1 ``` If you are running more than one minion, you can access the others by: -``` +```sh vagrant ssh minion-2 vagrant ssh minion-3 ``` To view the service status and/or logs on the kubernetes-master: -``` +```sh vagrant ssh master [vagrant@kubernetes-master ~] $ sudo systemctl status kube-apiserver [vagrant@kubernetes-master ~] $ sudo journalctl -r -u kube-apiserver @@ -52,7 +64,7 @@ vagrant ssh master ``` To view the services on any of the kubernetes-minion(s): -``` +```sh vagrant ssh minion-1 [vagrant@kubernetes-minion-1] $ sudo systemctl status docker [vagrant@kubernetes-minion-1] $ sudo journalctl -r -u docker @@ -65,18 +77,18 @@ vagrant ssh minion-1 With your Kubernetes cluster up, you can manage the nodes in your cluster with the regular Vagrant commands. To push updates to new Kubernetes code after making source changes: -``` -cluster/kube-push.sh +```sh +./cluster/kube-push.sh ``` To stop and then restart the cluster: -``` +```sh vagrant halt -cluster/kube-up.sh +./cluster/kube-up.sh ``` To destroy the cluster: -``` +```sh vagrant destroy ``` @@ -84,14 +96,13 @@ Once your Vagrant machines are up and provisioned, the first thing to do is to c You may need to build the binaries first, you can do this with ```make``` -``` +```sh $ ./cluster/kubectl.sh get minions NAME LABELS 10.245.1.4 10.245.1.5 10.245.1.3 - ``` ### Interacting with your Kubernetes cluster with the `kube-*` scripts. @@ -100,39 +111,39 @@ Alternatively to using the vagrant commands, you can also use the `cluster/kube- All of these commands assume you have set `KUBERNETES_PROVIDER` appropriately: -``` +```sh export KUBERNETES_PROVIDER=vagrant ``` Bring up a vagrant cluster -``` -cluster/kube-up.sh +```sh +./cluster/kube-up.sh ``` Destroy the vagrant cluster -``` -cluster/kube-down.sh +```sh +./cluster/kube-down.sh ``` Update the vagrant cluster after you make changes (only works when building your own releases locally): -``` -cluster/kube-push.sh +```sh +./cluster/kube-push.sh ``` Interact with the cluster -``` -cluster/kubectl.sh +```sh +./cluster/kubectl.sh ``` ### Authenticating with your master When using the vagrant provider in Kubernetes, the `cluster/kubectl.sh` script will cache your credentials in a `~/.kubernetes_vagrant_auth` file so you will not be prompted for them in the future. -``` +```sh cat ~/.kubernetes_vagrant_auth { "User": "vagrant", "Password": "vagrant" @@ -144,22 +155,21 @@ cat ~/.kubernetes_vagrant_auth You should now be set to use the `cluster/kubectl.sh` script. For example try to list the minions that you have started with: -``` -cluster/kubectl.sh get minions +```sh +./cluster/kubectl.sh get minions ``` ### Running containers Your cluster is running, you can list the minions in your cluster: -``` -$ cluster/kubectl.sh get minions +```sh +$ ./cluster/kubectl.sh get minions NAME LABELS 10.245.2.4 10.245.2.3 10.245.2.2 - ``` Now start running some containers! @@ -196,7 +206,7 @@ NAME IMAGE(S) HOST You need to wait for the provisioning to complete, you can monitor the minions by doing: -``` +```sh $ sudo salt '*minion-1' cmd.run 'docker images' kubernetes-minion-1: REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE @@ -206,7 +216,7 @@ kubernetes-minion-1: Once the docker image for nginx has been downloaded, the container will start and you can list it: -``` +```sh $ sudo salt '*minion-1' cmd.run 'docker ps' kubernetes-minion-1: CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES @@ -235,9 +245,9 @@ We did not start any services, hence there are none listed. But we see three rep Check the [guestbook](../../examples/guestbook/README.md) application to learn how to create a service. You can already play with resizing the replicas with: -``` -$ cluster/kubectl.sh resize rc my-nginx --replicas=2 -$ cluster/kubectl.sh get pods +```sh +$ ./cluster/kubectl.sh resize rc my-nginx --replicas=2 +$ ./cluster/kubectl.sh get pods NAME IMAGE(S) HOST LABELS STATUS 7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running 78140853-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.3/10.245.2.3 name=myNginx Running @@ -247,9 +257,9 @@ Congratulations! ### Testing -The following will run all of the end-to-end testing scenarios assuming you set your environment in cluster/kube-env.sh +The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`: -``` +```sh NUM_MINIONS=3 hack/e2e-test.sh ``` @@ -257,26 +267,26 @@ NUM_MINIONS=3 hack/e2e-test.sh #### I keep downloading the same (large) box all the time! -By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing an alternate URL when calling `kube-up.sh` +By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing a name and an alternate URL when calling `kube-up.sh` -```bash +```sh +export KUBERNETES_BOX_NAME=choose_your_own_name_for_your_kuber_box export KUBERNETES_BOX_URL=path_of_your_kuber_box export KUBERNETES_PROVIDER=vagrant -cluster/kube-up.sh +./cluster/kube-up.sh ``` - #### I just created the cluster, but I am getting authorization errors! You probably have an incorrect ~/.kubernetes_vagrant_auth file for the cluster you are attempting to contact. -``` +```sh rm ~/.kubernetes_vagrant_auth ``` After using kubectl.sh make sure that the correct credentials are set: -``` +```sh cat ~/.kubernetes_vagrant_auth { "User": "vagrant", @@ -284,35 +294,42 @@ cat ~/.kubernetes_vagrant_auth } ``` -#### I just created the cluster, but I do not see my container running ! +#### I just created the cluster, but I do not see my container running! If this is your first time creating the cluster, the kubelet on each minion schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned. -#### I changed Kubernetes code, but it's not running ! +#### I changed Kubernetes code, but it's not running! Are you sure there was no build error? After running `$ vagrant provision`, scroll up and ensure that each Salt state was completed successfully on each box in the cluster. It's very likely you see a build error due to an error in your source files! -#### I have brought Vagrant up but the minions won't validate ! +#### I have brought Vagrant up but the minions won't validate! Are you sure you built a release first? Did you install `net-tools`? For more clues, login to one of the minions (`vagrant ssh minion-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`). -#### I want to change the number of minions ! +#### I want to change the number of minions! You can control the number of minions that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough minions to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single minion. You do this, by setting `NUM_MINIONS` to 1 like so: -``` +```sh export NUM_MINIONS=1 ``` -#### I want my VMs to have more memory ! +#### I want my VMs to have more memory! You can control the memory allotted to virtual machines with the `KUBERNETES_MEMORY` environment variable. Just set it to the number of megabytes you would like the machines to have. For example: -``` +```sh export KUBERNETES_MEMORY=2048 ``` +If you need more granular control, you can set the amount of memory for the master and minions independently. For example: + +```sh +export KUBERNETES_MASTER_MEMORY=1536 +export KUBERNETES_MASTER_MINION=2048 +``` + #### I ran vagrant suspend and nothing works! ```vagrant suspend``` seems to mess up the network. It's not supported at this time. diff --git a/docs/getting-started-guides/README.md b/docs/getting-started-guides/README.md index b9fc19cdaa9..fc1da9df561 100644 --- a/docs/getting-started-guides/README.md +++ b/docs/getting-started-guides/README.md @@ -9,8 +9,8 @@ If you are considering contributing a new guide, please read the IaaS Provider | Config. Mgmt | OS | Networking | Docs | Support Level | Notes -------------- | ------------ | ------ | ---------- | ---------------------------------------------------- | ---------------------------- | ----- -GKE | | | GCE | [docs](https://cloud.google.com/container-engine) | Commercial | Uses K8s version 0.14.1 -GCE | Saltstack | Debian | GCE | [docs](../../docs/getting-started-guides/gce.md) | Project | Tested with 0.14.1 by @brendandburns +GKE | | | GCE | [docs](https://cloud.google.com/container-engine) | Commercial | Uses K8s version 0.15.0 +GCE | Saltstack | Debian | GCE | [docs](../../docs/getting-started-guides/gce.md) | Project | Tested with 0.15.0 by @robertbailey Mesos/GCE | | | | [docs](../../docs/getting-started-guides/mesos.md) | [Community](https://github.com/mesosphere/kubernetes-mesos) ([@jdef](https://github.com/jdef)) | Uses K8s v0.11.0 Vagrant | Saltstack | Fedora | OVS | [docs](../../docs/getting-started-guides/vagrant.md) | Project | Bare-metal | custom | Fedora | _none_ | [docs](../../docs/getting-started-guides/fedora/fedora_manual_config.md) | Project | Uses K8s v0.13.2 @@ -29,13 +29,12 @@ Docker Single Node | custom | N/A | local | [docs](docker. Docker Multi Node | Flannel| N/A | local | [docs](docker-multinode.md) | Project (@brendandburns) | Tested @ 0.14.1 | Local | | | _none_ | [docs](../../docs/getting-started-guides/locally.md) | Community (@preillyme) | Ovirt | | | | [docs](../../docs/getting-started-guides/ovirt.md) | Inactive (@simon3z) | -Rackspace | CoreOS | CoreOS | Rackspace | [docs](../../docs/getting-started-guides/rackspace.md) | Inactive (@doubleerr) | Bare-metal | custom | CentOS | _none_ | [docs](../../docs/getting-started-guides/centos/centos_manual_config.md) | Community(@coolsvap) | Uses K8s v0.9.1 libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](../../docs/getting-started-guides/libvirt-coreos.md) | Community (@lhuard1A) | AWS | Juju | Ubuntu | flannel | [docs](../../docs/getting-started-guides/juju.md) | [Community](https://github.com/whitmo/bundle-kubernetes) ( [@whit](https://github.com/whitmo), [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) | [Tested](http://reports.vapour.ws/charm-tests-by-charm/kubernetes) K8s v0.8.1 OpenStack/HPCloud | Juju | Ubuntu | flannel | [docs](../../docs/getting-started-guides/juju.md) | [Community](https://github.com/whitmo/bundle-kubernetes) ( [@whit](https://github.com/whitmo), [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) | [Tested](http://reports.vapour.ws/charm-tests-by-charm/kubernetes) K8s v0.8.1 Joyent | Juju | Ubuntu | flannel | [docs](../../docs/getting-started-guides/juju.md) | [Community](https://github.com/whitmo/bundle-kubernetes) ( [@whit](https://github.com/whitmo), [@matt](https://github.com/mbruzek), [@chuck](https://github.com/chuckbutler) ) | [Tested](http://reports.vapour.ws/charm-tests-by-charm/kubernetes) K8s v0.8.1 -Azure | CoreOS | CoreOS | Weave | [docs](../../docs/getting-started-guides/coreos/azure/README.md) | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon)) | Uses K8s version 0.11.0 +Azure | CoreOS | CoreOS | Weave | [docs](../../docs/getting-started-guides/coreos/azure/README.md) | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin)) | Uses K8s version 0.15.0 Bare-metal (Offline) | CoreOS | CoreOS | flannel | [docs](../../docs/getting-started-guides/coreos/bare_metal_offline.md) | Community([@jeffbean](https://github.com/jeffbean)) | K8s v0.10.1 Definition of columns: diff --git a/docs/getting-started-guides/aws.md b/docs/getting-started-guides/aws.md index dd165219a9f..2e1cb10655b 100644 --- a/docs/getting-started-guides/aws.md +++ b/docs/getting-started-guides/aws.md @@ -19,10 +19,12 @@ or if you prefer ```curl``` export KUBERNETES_PROVIDER=aws; curl -sS https://get.k8s.io | bash ``` - -NOTE: The script will provision a new VPC and a 4 node k8s cluster in us-west-2 (Oregon). It'll also try to create or -reuse a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion". If these -already exist, make sure you want them to be used here. +NOTE: This script calls [cluster/kube-up.sh](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/cluster/kube-up.sh) +which in turn calls [cluster/aws/util.sh](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/cluster/aws/util.sh) +using [cluster/aws/config-default.sh](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/cluster/aws/config-default.sh). +By default, the script will provision a new VPC and a 4 node k8s cluster in us-west-2 (Oregon). It'll also try to create or reuse +a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion". If these already exist, make +sure you want them to be used here. You can override the variables defined in config-default.sh to change this behavior. Once the cluster is up, it will print the ip address of your cluster, this process takes about 5 to 10 minutes. @@ -134,3 +136,6 @@ Take a look at [next steps](https://github.com/GoogleCloudPlatform/kubernetes/tr ### Cloud Formation [optional] There is a contributed [example](aws-coreos.md) from [CoreOS](http://www.coreos.com) using Cloud Formation. + +### Further reading +Please see the [Kubernetes docs](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs) for more details on administering and using a Kubernetes cluster. diff --git a/docs/getting-started-guides/aws/cloud-configs/node.yaml b/docs/getting-started-guides/aws/cloud-configs/node.yaml index e21959fda06..fa46b9ab038 100644 --- a/docs/getting-started-guides/aws/cloud-configs/node.yaml +++ b/docs/getting-started-guides/aws/cloud-configs/node.yaml @@ -19,7 +19,7 @@ coreos: content: | [Service] ExecStartPre=/bin/bash -c "until curl http://:4001/v2/machines; do sleep 2; done" - ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}' + ExecStartPre=/usr/bin/etcdctl -C :4001 set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}' - name: docker.service command: start drop-ins: diff --git a/docs/getting-started-guides/coreos/azure/README.md b/docs/getting-started-guides/coreos/azure/README.md index e955535a919..01ce4195ea7 100644 --- a/docs/getting-started-guides/coreos/azure/README.md +++ b/docs/getting-started-guides/coreos/azure/README.md @@ -68,7 +68,7 @@ kubectl create -f frontend-controller.json kubectl create -f frontend-service.json ``` -You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Unknown`, through `Pending` to `Runnig`. +You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Unknown`, through `Pending` to `Running`. ``` kubectl get pods --watch ``` diff --git a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml b/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml index 85a7f70a901..f6e9fcda92a 100644 --- a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml +++ b/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-etcd-node-template.yml @@ -2,16 +2,48 @@ ## More specifically, we need to add peer hosts for each but the elected peer. coreos: - etcd: - name: etcd - addr: $private_ipv4:4001 - bind-addr: 0.0.0.0 - peer-addr: $private_ipv4:7001 - snapshot: true - max-retry-attempts: 50 units: - - name: etcd.service + - name: download-etcd2.service + enable: true command: start + content: | + [Unit] + After=network-online.target + Before=etcd2.service + Description=Download etcd2 Binaries + Documentation=https://github.com/coreos/etcd/ + Requires=network-online.target + [Service] + Environment=ETCD2_RELEASE_TARBALL=https://github.com/coreos/etcd/releases/download/v2.0.9/etcd-v2.0.9-linux-amd64.tar.gz + ExecStartPre=/bin/mkdir -p /opt/bin + ExecStart=/bin/bash -c "curl --silent --location $ETCD2_RELEASE_TARBALL | tar xzv -C /opt" + ExecStartPost=/bin/ln -s /opt/etcd-v2.0.9-linux-amd64/etcd /opt/bin/etcd2 + ExecStartPost=/bin/ln -s /opt/etcd-v2.0.9-linux-amd64/etcdctl /opt/bin/etcdctl2 + RemainAfterExit=yes + Type=oneshot + [Install] + WantedBy=multi-user.target + - name: etcd2.service + enable: true + command: start + content: | + [Unit] + After=download-etcd2.service + Description=etcd 2 + Documentation=https://github.com/coreos/etcd/ + [Service] + Environment=ETCD_NAME=%host% + Environment=ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster + Environment=ETCD_INITIAL_ADVERTISE_PEER_URLS=http://%host%:2380 + Environment=ETCD_LISTEN_PEER_URLS=http://%host%:2380 + Environment=ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001 + Environment=ETCD_INITIAL_CLUSTER=%cluster% + Environment=ETCD_INITIAL_CLUSTER_STATE=new + ExecStart=/opt/bin/etcd2 + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target update: group: stable reboot-strategy: off diff --git a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml b/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml index f0c8f335e7d..7c9a4d3cba7 100644 --- a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml +++ b/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml @@ -18,9 +18,37 @@ write_files: printf '{ "id": "%s", "kind": "Minion", "apiVersion": "v1beta1", "labels": { "environment": "production" } }' "${minion_id}" \ | /opt/bin/kubectl create -s "${master_url}" -f - + - path: /etc/kubernetes/manifests/fluentd.manifest + permissions: '0755' + owner: root + content: | + version: v1beta2 + id: fluentd-to-elasticsearch + containers: + - name: fluentd-es + image: gcr.io/google_containers/fluentd-elasticsearch:1.3 + env: + - name: FLUENTD_ARGS + value: -qq + volumeMounts: + - name: containers + mountPath: /var/lib/docker/containers + - name: varlog + mountPath: /varlog + volumes: + - name: containers + source: + hostDir: + path: /var/lib/docker/containers + - name: varlog + source: + hostDir: + path: /var/log + coreos: update: group: stable + reboot-strategy: off units: - name: docker.service drop-ins: @@ -187,7 +215,7 @@ coreos: Documentation=https://github.com/GoogleCloudPlatform/kubernetes Requires=network-online.target [Service] - Environment=KUBE_RELEASE_TARBALL=https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v0.11.0/kubernetes.tar.gz + Environment=KUBE_RELEASE_TARBALL=https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v0.15.0/kubernetes.tar.gz ExecStartPre=/bin/mkdir -p /opt/ ExecStart=/bin/bash -c "curl --silent --location $KUBE_RELEASE_TARBALL | tar xzv -C /tmp/" ExecStart=/bin/tar xzvf /tmp/kubernetes/server/kubernetes-server-linux-amd64.tar.gz -C /opt @@ -278,12 +306,16 @@ coreos: Wants=download-kubernetes.service ConditionHost=!kube-00 [Service] + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests/ ExecStart=/opt/kubernetes/server/bin/kubelet \ --address=0.0.0.0 \ --port=10250 \ --hostname_override=%H \ --api_servers=http://kube-00:8080 \ - --logtostderr=true + --logtostderr=true \ + --cluster_dns=10.1.0.3 \ + --cluster_domain=kube.local \ + --config=/etc/kubernetes/manifests/ Restart=always RestartSec=10 [Install] diff --git a/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js b/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js index 9468add14a1..e4d6642fc13 100644 --- a/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js +++ b/docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js @@ -13,9 +13,9 @@ var inspect = require('util').inspect; var util = require('./util.js'); var coreos_image_ids = { - 'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-607.0.0', - 'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-612.1.0', // untested - 'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-626.0.0', // untested + 'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-633.1.0', + 'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-647.0.0', // untested + 'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-647.0.0' // untested }; var conf = {}; @@ -140,7 +140,9 @@ var create_ssh_conf = function () { }; var get_location = function () { - if (process.env['AZ_LOCATION']) { + if (process.env['AZ_AFFINITY']) { + return '--affinity-group=' + process.env['AZ_AFFINITY']; + } else if (process.env['AZ_LOCATION']) { return '--location=' + process.env['AZ_LOCATION']; } else { return '--location=West Europe'; diff --git a/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js b/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js index 6fc3eb3fac3..112d30570e9 100644 --- a/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js +++ b/docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js @@ -1,22 +1,29 @@ var _ = require('underscore'); +_.mixin(require('underscore.string').exports()); var util = require('../util.js'); var cloud_config = require('../cloud_config.js'); exports.create_etcd_cloud_config = function (node_count, conf) { - var elected_node = 0; - var input_file = './cloud_config_templates/kubernetes-cluster-etcd-node-template.yml'; + var peers = [ ]; + for (var i = 0; i < node_count; i++) { + peers.push(util.hostname(i, 'etcd') + '=http://' + util.hostname(i, 'etcd') + ':2380'); + } + var cluster = peers.join(','); + return _(node_count).times(function (n) { var output_file = util.join_output_file_path('kubernetes-cluster-etcd-node-' + n, 'generated.yml'); return cloud_config.process_template(input_file, output_file, function(data) { - if (n !== elected_node) { - data.coreos.etcd.peers = [ - util.hostname(elected_node, 'etcd'), 7001 - ].join(':'); + for (var i = 0; i < data.coreos.units.length; i++) { + var unit = data.coreos.units[i]; + if (unit.name === 'etcd2.service') { + unit.content = _.replaceAll(_.replaceAll(unit.content, '%host%', util.hostname(n, 'etcd')), '%cluster%', cluster); + break; + } } return data; }); diff --git a/docs/getting-started-guides/docker-multinode/master.md b/docs/getting-started-guides/docker-multinode/master.md index 8f8510f399c..8c084fcd85b 100644 --- a/docs/getting-started-guides/docker-multinode/master.md +++ b/docs/getting-started-guides/docker-multinode/master.md @@ -108,20 +108,20 @@ systemctl start docker Ok, now that your networking is set up, you can startup Kubernetes, this is the same as the single-node case, we will use the "main" instance of the Docker daemon for the Kubernetes components. ```sh -sudo docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.14.2 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests-multi +sudo docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.15.0 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests-multi ``` ### Also run the service proxy ```sh -sudo docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.14.2 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 +sudo docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.15.0 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 ``` ### Test it out At this point, you should have a functioning 1-node cluster. Let's test it out! Download the kubectl binary -([OS X](http://storage.googleapis.com/kubernetes-release/release/v0.14.2/bin/darwin/amd64/kubectl)) -([linux](http://storage.googleapis.com/kubernetes-release/release/v0.14.2/bin/linux/amd64/kubectl)) +([OS X](http://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/darwin/amd64/kubectl)) +([linux](http://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kubectl)) List the nodes diff --git a/docs/getting-started-guides/docker-multinode/worker.md b/docs/getting-started-guides/docker-multinode/worker.md index 88e608f2a3c..3ec57d241b5 100644 --- a/docs/getting-started-guides/docker-multinode/worker.md +++ b/docs/getting-started-guides/docker-multinode/worker.md @@ -93,14 +93,14 @@ systemctl start docker Again this is similar to the above, but the ```--api_servers``` now points to the master we set up in the beginning. ```sh -sudo docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.14.2 /hyperkube kubelet --api_servers=http://${MASTER_IP}:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=$(hostname -i) +sudo docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.15.0 /hyperkube kubelet --api_servers=http://${MASTER_IP}:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=$(hostname -i) ``` #### Run the service proxy The service proxy provides load-balancing between groups of containers defined by Kubernetes ```Services``` ```sh -sudo docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.14.2 /hyperkube proxy --master=http://${MASTER_IP}:8080 --v=2 +sudo docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.15.0 /hyperkube proxy --master=http://${MASTER_IP}:8080 --v=2 ``` diff --git a/docs/getting-started-guides/docker.md b/docs/getting-started-guides/docker.md index 46824c8bfeb..9a091a23b62 100644 --- a/docs/getting-started-guides/docker.md +++ b/docs/getting-started-guides/docker.md @@ -12,7 +12,7 @@ docker run --net=host -d kubernetes/etcd:2.0.5.1 /usr/local/bin/etcd --addr=127. ### Step Two: Run the master ```sh -docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.14.2 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests +docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.15.0 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests ``` This actually runs the kubelet, which in turn runs a [pod](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/pods.md) that contains the other master components. @@ -20,14 +20,14 @@ This actually runs the kubelet, which in turn runs a [pod](https://github.com/Go ### Step Three: Run the service proxy *Note, this could be combined with master above, but it requires --privileged for iptables manipulation* ```sh -docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.14.2 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 +docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.15.0 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 ``` ### Test it out At this point you should have a running kubernetes cluster. You can test this by downloading the kubectl binary -([OS X](http://storage.googleapis.com/kubernetes-release/release/v0.14.2/bin/darwin/amd64/kubectl)) -([linux](http://storage.googleapis.com/kubernetes-release/release/v0.14.2/bin/linux/amd64/kubectl)) +([OS X](http://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/darwin/amd64/kubectl)) +([linux](http://storage.googleapis.com/kubernetes-release/release/v0.15.0/bin/linux/amd64/kubectl)) *Note:* On OS/X you will need to set up port forwarding via ssh: diff --git a/docs/getting-started-guides/mesos.md b/docs/getting-started-guides/mesos.md index 2f212ce8553..21aa80eb80a 100644 --- a/docs/getting-started-guides/mesos.md +++ b/docs/getting-started-guides/mesos.md @@ -47,7 +47,7 @@ $ export KUBERNETES_MASTER=http://${servicehost}:8888 Start etcd and verify that it is running: ```bash -$ sudo docker run -d --hostname $(hostname -f) --name etcd -p 4001:4001 -p 7001:7001 coreos/etcd +$ sudo docker run -d --hostname $(uname -n) --name etcd -p 4001:4001 -p 7001:7001 coreos/etcd ``` ```bash diff --git a/docs/getting-started-guides/rackspace.md b/docs/getting-started-guides/rackspace.md index 352b46ebaf5..aaefb8b2c73 100644 --- a/docs/getting-started-guides/rackspace.md +++ b/docs/getting-started-guides/rackspace.md @@ -1,3 +1,7 @@ +# Status: Out Of Date + +** Rackspace support is out of date. Please check back later ** + # Rackspace In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and network design. diff --git a/docs/getting-started-guides/vagrant.md b/docs/getting-started-guides/vagrant.md index a9d345a714f..3f3c6f341f0 100644 --- a/docs/getting-started-guides/vagrant.md +++ b/docs/getting-started-guides/vagrant.md @@ -4,13 +4,17 @@ Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/deve ### Prerequisites 1. Install latest version >= 1.6.2 of vagrant from http://www.vagrantup.com/downloads.html -2. Install latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads +2. Install one of: + 1. The latest version of Virtual Box from https://www.virtualbox.org/wiki/Downloads + 2. [VMWare Fusion](https://www.vmware.com/products/fusion/) version 5 or greater as well as the appropriate [Vagrant VMWare Fusion provider](https://www.vagrantup.com/vmware) + 3. [VMWare Workstation](https://www.vmware.com/products/workstation/) version 9 or greater as well as the [Vagrant VMWare Workstation provider](https://www.vagrantup.com/vmware) + 4. [Parallels Desktop](https://www.parallels.com/products/desktop/) version 9 or greater as well as the [Vagrant Parallels provider](https://parallels.github.io/vagrant-parallels/) ### Setup Setting up a cluster is as simple as running: -``` +```sh export KUBERNETES_PROVIDER=vagrant curl -sS https://get.k8s.io | bash ``` @@ -19,33 +23,41 @@ The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster By default, the Vagrant setup will create a single kubernetes-master and 1 kubernetes-minion. Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). To start your local cluster, open a shell and run: -``` +```sh cd kubernetes export KUBERNETES_PROVIDER=vagrant -cluster/kube-up.sh +./cluster/kube-up.sh ``` Vagrant will provision each machine in the cluster with all the necessary components to run Kubernetes. The initial setup can take a few minutes to complete on each machine. +If you installed more than one Vagrant provider, Kubernetes will usually pick the appropriate one. However, you can override which one Kubernetes will use by setting the [`VAGRANT_DEFAULT_PROVIDER`](https://docs.vagrantup.com/v2/providers/default.html) environment variable: + +```sh +export VAGRANT_DEFAULT_PROVIDER=parallels +export KUBERNETES_PROVIDER=vagrant +./cluster/kube-up.sh +``` + By default, each VM in the cluster is running Fedora, and all of the Kubernetes services are installed into systemd. To access the master or any minion: -``` +```sh vagrant ssh master vagrant ssh minion-1 ``` If you are running more than one minion, you can access the others by: -``` +```sh vagrant ssh minion-2 vagrant ssh minion-3 ``` To view the service status and/or logs on the kubernetes-master: -``` +```sh vagrant ssh master [vagrant@kubernetes-master ~] $ sudo systemctl status kube-apiserver [vagrant@kubernetes-master ~] $ sudo journalctl -r -u kube-apiserver @@ -58,7 +70,7 @@ vagrant ssh master ``` To view the services on any of the kubernetes-minion(s): -``` +```sh vagrant ssh minion-1 [vagrant@kubernetes-minion-1] $ sudo systemctl status docker [vagrant@kubernetes-minion-1] $ sudo journalctl -r -u docker @@ -71,18 +83,18 @@ vagrant ssh minion-1 With your Kubernetes cluster up, you can manage the nodes in your cluster with the regular Vagrant commands. To push updates to new Kubernetes code after making source changes: -``` -cluster/kube-push.sh +```sh +./cluster/kube-push.sh ``` To stop and then restart the cluster: -``` +```sh vagrant halt -cluster/kube-up.sh +./cluster/kube-up.sh ``` To destroy the cluster: -``` +```sh vagrant destroy ``` @@ -90,14 +102,13 @@ Once your Vagrant machines are up and provisioned, the first thing to do is to c You may need to build the binaries first, you can do this with ```make``` -``` +```sh $ ./cluster/kubectl.sh get minions NAME LABELS 10.245.1.4 10.245.1.5 10.245.1.3 - ``` ### Interacting with your Kubernetes cluster with the `kube-*` scripts. @@ -106,39 +117,39 @@ Alternatively to using the vagrant commands, you can also use the `cluster/kube- All of these commands assume you have set `KUBERNETES_PROVIDER` appropriately: -``` +```sh export KUBERNETES_PROVIDER=vagrant ``` Bring up a vagrant cluster -``` -cluster/kube-up.sh +```sh +./cluster/kube-up.sh ``` Destroy the vagrant cluster -``` -cluster/kube-down.sh +```sh +./cluster/kube-down.sh ``` Update the vagrant cluster after you make changes (only works when building your own releases locally): -``` -cluster/kube-push.sh +```sh +./cluster/kube-push.sh ``` Interact with the cluster -``` -cluster/kubectl.sh +```sh +./cluster/kubectl.sh ``` ### Authenticating with your master When using the vagrant provider in Kubernetes, the `cluster/kubectl.sh` script will cache your credentials in a `~/.kubernetes_vagrant_auth` file so you will not be prompted for them in the future. -``` +```sh cat ~/.kubernetes_vagrant_auth { "User": "vagrant", "Password": "vagrant" @@ -150,50 +161,49 @@ cat ~/.kubernetes_vagrant_auth You should now be set to use the `cluster/kubectl.sh` script. For example try to list the minions that you have started with: -``` -cluster/kubectl.sh get minions +```sh +./cluster/kubectl.sh get minions ``` ### Running containers Your cluster is running, you can list the minions in your cluster: -``` -$ cluster/kubectl.sh get minions +```sh +$ ./cluster/kubectl.sh get minions NAME LABELS 10.245.2.4 10.245.2.3 10.245.2.2 - ``` Now start running some containers! -You can now use any of the cluster/kube-*.sh commands to interact with your VM machines. +You can now use any of the `cluster/kube-*.sh` commands to interact with your VM machines. Before starting a container there will be no pods, services and replication controllers. -``` -$ cluster/kubectl.sh get pods +```sh +$ ./cluster/kubectl.sh get pods NAME IMAGE(S) HOST LABELS STATUS -$ cluster/kubectl.sh get services +$ ./cluster/kubectl.sh get services NAME LABELS SELECTOR IP PORT -$ cluster/kubectl.sh get replicationControllers +$ ./cluster/kubectl.sh get replicationControllers NAME IMAGE(S SELECTOR REPLICAS ``` Start a container running nginx with a replication controller and three replicas -``` -$ cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80 +```sh +$ ./cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80 ``` When listing the pods, you will see that three containers have been started and are in Waiting state: -``` -$ cluster/kubectl.sh get pods +```sh +$ ./cluster/kubectl.sh get pods NAME IMAGE(S) HOST LABELS STATUS 781191ff-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.4/10.245.2.4 name=myNginx Waiting 7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Waiting @@ -202,7 +212,7 @@ NAME IMAGE(S) HOST You need to wait for the provisioning to complete, you can monitor the minions by doing: -``` +```sh $ sudo salt '*minion-1' cmd.run 'docker images' kubernetes-minion-1: REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE @@ -213,7 +223,7 @@ kubernetes-minion-1: Once the docker image for nginx has been downloaded, the container will start and you can list it: -``` +```sh $ sudo salt '*minion-1' cmd.run 'docker ps' kubernetes-minion-1: CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES @@ -225,17 +235,17 @@ kubernetes-minion-1: Going back to listing the pods, services and replicationControllers, you now have: -``` -$ cluster/kubectl.sh get pods +```sh +$ ./cluster/kubectl.sh get pods NAME IMAGE(S) HOST LABELS STATUS 781191ff-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.4/10.245.2.4 name=myNginx Running 7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running 78140853-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.3/10.245.2.3 name=myNginx Running -$ cluster/kubectl.sh get services +$ ./cluster/kubectl.sh get services NAME LABELS SELECTOR IP PORT -$ cluster/kubectl.sh get replicationControllers +$ ./cluster/kubectl.sh get replicationControllers NAME IMAGE(S SELECTOR REPLICAS myNginx nginx name=my-nginx 3 ``` @@ -244,9 +254,9 @@ We did not start any services, hence there are none listed. But we see three rep Check the [guestbook](../../examples/guestbook/README.md) application to learn how to create a service. You can already play with resizing the replicas with: -``` -$ cluster/kubectl.sh resize rc my-nginx --replicas=2 -$ cluster/kubectl.sh get pods +```sh +$ ./cluster/kubectl.sh resize rc my-nginx --replicas=2 +$ ./cluster/kubectl.sh get pods NAME IMAGE(S) HOST LABELS STATUS 7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running 78140853-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.3/10.245.2.3 name=myNginx Running @@ -258,26 +268,26 @@ Congratulations! #### I keep downloading the same (large) box all the time! -By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing an alternate URL when calling `kube-up.sh` +By default the Vagrantfile will download the box from S3. You can change this (and cache the box locally) by providing a name and an alternate URL when calling `kube-up.sh` -```bash +```sh +export KUBERNETES_BOX_NAME=choose_your_own_name_for_your_kuber_box export KUBERNETES_BOX_URL=path_of_your_kuber_box export KUBERNETES_PROVIDER=vagrant -cluster/kube-up.sh +./cluster/kube-up.sh ``` - #### I just created the cluster, but I am getting authorization errors! You probably have an incorrect ~/.kubernetes_vagrant_auth file for the cluster you are attempting to contact. -``` +```sh rm ~/.kubernetes_vagrant_auth ``` After using kubectl.sh make sure that the correct credentials are set: -``` +```sh cat ~/.kubernetes_vagrant_auth { "User": "vagrant", @@ -285,34 +295,41 @@ cat ~/.kubernetes_vagrant_auth } ``` -#### I just created the cluster, but I do not see my container running ! +#### I just created the cluster, but I do not see my container running! If this is your first time creating the cluster, the kubelet on each minion schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned. -#### I want to make changes to Kubernetes code ! +#### I want to make changes to Kubernetes code! To set up a vagrant cluster for hacking, follow the [vagrant developer guide](../devel/developer-guides/vagrant.md). -#### I have brought Vagrant up but the minions won't validate ! +#### I have brought Vagrant up but the minions won't validate! Log on to one of the minions (`vagrant ssh minion-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`). -#### I want to change the number of minions ! +#### I want to change the number of minions! You can control the number of minions that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough minions to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single minion. You do this, by setting `NUM_MINIONS` to 1 like so: -``` +```sh export NUM_MINIONS=1 ``` -#### I want my VMs to have more memory ! +#### I want my VMs to have more memory! You can control the memory allotted to virtual machines with the `KUBERNETES_MEMORY` environment variable. Just set it to the number of megabytes you would like the machines to have. For example: -``` +```sh export KUBERNETES_MEMORY=2048 ``` +If you need more granular control, you can set the amount of memory for the master and minions independently. For example: + +```sh +export KUBERNETES_MASTER_MEMORY=1536 +export KUBERNETES_MASTER_MINION=2048 +``` + #### I ran vagrant suspend and nothing works! ```vagrant suspend``` seems to mess up the network. It's not supported at this time. diff --git a/docs/kubectl.md b/docs/kubectl.md index cc5d7e8a992..b238e727c8b 100644 --- a/docs/kubectl.md +++ b/docs/kubectl.md @@ -66,4 +66,4 @@ kubectl * [kubectl update](kubectl_update.md) - Update a resource by filename or stdin. * [kubectl version](kubectl_version.md) - Print the client and server version information. -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.392549632 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.488963312 +0000 UTC diff --git a/docs/kubectl_api-versions.md b/docs/kubectl_api-versions.md index d7bbe180f37..9cf25963db3 100644 --- a/docs/kubectl_api-versions.md +++ b/docs/kubectl_api-versions.md @@ -50,4 +50,4 @@ kubectl api-versions ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.39227534 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.488505223 +0000 UTC diff --git a/docs/kubectl_cluster-info.md b/docs/kubectl_cluster-info.md index 61d3559bb6e..28742b6a767 100644 --- a/docs/kubectl_cluster-info.md +++ b/docs/kubectl_cluster-info.md @@ -50,4 +50,4 @@ kubectl cluster-info ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.392162759 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.48831375 +0000 UTC diff --git a/docs/kubectl_config.md b/docs/kubectl_config.md index 19649d52d6f..2a7f1bf20f1 100644 --- a/docs/kubectl_config.md +++ b/docs/kubectl_config.md @@ -63,4 +63,4 @@ kubectl config SUBCOMMAND * [kubectl config use-context](kubectl_config_use-context.md) - Sets the current-context in a kubeconfig file * [kubectl config view](kubectl_config_view.md) - displays Merged kubeconfig settings or a specified kubeconfig file. -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.392043616 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.488116168 +0000 UTC diff --git a/docs/kubectl_config_set-cluster.md b/docs/kubectl_config_set-cluster.md index 309b22920f7..7816ada83d8 100644 --- a/docs/kubectl_config_set-cluster.md +++ b/docs/kubectl_config_set-cluster.md @@ -65,4 +65,4 @@ $ kubectl config set-cluster e2e --insecure-skip-tls-verify=true ### SEE ALSO * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.39119629 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.486460859 +0000 UTC diff --git a/docs/kubectl_config_set-context.md b/docs/kubectl_config_set-context.md index 15cb76d9f25..fbd2f1e3697 100644 --- a/docs/kubectl_config_set-context.md +++ b/docs/kubectl_config_set-context.md @@ -58,4 +58,4 @@ $ kubectl config set-context gce --user=cluster-admin ### SEE ALSO * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.391488399 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.486736724 +0000 UTC diff --git a/docs/kubectl_config_set-credentials.md b/docs/kubectl_config_set-credentials.md index 0a9969ae0ca..e3538fbf34c 100644 --- a/docs/kubectl_config_set-credentials.md +++ b/docs/kubectl_config_set-credentials.md @@ -78,4 +78,4 @@ $ kubectl set-credentials cluster-admin --client-certificate=~/.kube/admin.crt - ### SEE ALSO * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.391323192 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.486604006 +0000 UTC diff --git a/docs/kubectl_config_set.md b/docs/kubectl_config_set.md index 63a5230f649..871ea936657 100644 --- a/docs/kubectl_config_set.md +++ b/docs/kubectl_config_set.md @@ -52,4 +52,4 @@ kubectl config set PROPERTY_NAME PROPERTY_VALUE ### SEE ALSO * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.391618859 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.486861123 +0000 UTC diff --git a/docs/kubectl_config_unset.md b/docs/kubectl_config_unset.md index 4559d34440f..163b2e30e4f 100644 --- a/docs/kubectl_config_unset.md +++ b/docs/kubectl_config_unset.md @@ -51,4 +51,4 @@ kubectl config unset PROPERTY_NAME ### SEE ALSO * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.391735806 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.487685494 +0000 UTC diff --git a/docs/kubectl_config_use-context.md b/docs/kubectl_config_use-context.md index 9ef37418dd8..0e691726476 100644 --- a/docs/kubectl_config_use-context.md +++ b/docs/kubectl_config_use-context.md @@ -50,4 +50,4 @@ kubectl config use-context CONTEXT_NAME ### SEE ALSO * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.391848246 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.487888021 +0000 UTC diff --git a/docs/kubectl_config_view.md b/docs/kubectl_config_view.md index 0bd157be1b2..3013d4b1142 100644 --- a/docs/kubectl_config_view.md +++ b/docs/kubectl_config_view.md @@ -72,4 +72,4 @@ $ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2 ### SEE ALSO * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.391073075 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.486319118 +0000 UTC diff --git a/docs/kubectl_create.md b/docs/kubectl_create.md index 82f121037f3..8b01c7ce45e 100644 --- a/docs/kubectl_create.md +++ b/docs/kubectl_create.md @@ -63,4 +63,4 @@ $ cat pod.json | kubectl create -f - ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.388588064 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.48343431 +0000 UTC diff --git a/docs/kubectl_delete.md b/docs/kubectl_delete.md index 2f2f11249f9..f046bafb948 100644 --- a/docs/kubectl_delete.md +++ b/docs/kubectl_delete.md @@ -81,4 +81,4 @@ $ kubectl delete pods --all ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.389412973 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.483731878 +0000 UTC diff --git a/docs/kubectl_describe.md b/docs/kubectl_describe.md index 527e57977ab..d68bac797c5 100644 --- a/docs/kubectl_describe.md +++ b/docs/kubectl_describe.md @@ -53,4 +53,4 @@ kubectl describe RESOURCE ID ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.388410556 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.483293174 +0000 UTC diff --git a/docs/kubectl_exec.md b/docs/kubectl_exec.md index 8339368116d..d473e662136 100644 --- a/docs/kubectl_exec.md +++ b/docs/kubectl_exec.md @@ -64,4 +64,4 @@ $ kubectl exec -p 123456-7890 -c ruby-container -i -t -- bash -il ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.390127525 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.484697863 +0000 UTC diff --git a/docs/kubectl_expose.md b/docs/kubectl_expose.md index de10086a0d9..76b67987813 100644 --- a/docs/kubectl_expose.md +++ b/docs/kubectl_expose.md @@ -82,4 +82,4 @@ $ kubectl expose streamer --port=4100 --protocol=udp --service-name=video-stream ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.390792874 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.485803902 +0000 UTC diff --git a/docs/kubectl_get.md b/docs/kubectl_get.md index 86e4b6aefb7..bd7d5e16a14 100644 --- a/docs/kubectl_get.md +++ b/docs/kubectl_get.md @@ -8,7 +8,7 @@ Display one or many resources Display one or many resources. Possible resources include pods (po), replication controllers (rc), services -(svc), minions (mi), or events (ev). +(svc), minions (mi), events (ev), or component statuses (cs). By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s). @@ -85,4 +85,4 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.387483074 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.482589064 +0000 UTC diff --git a/docs/kubectl_label.md b/docs/kubectl_label.md index 83d688ab8ab..982f40ffefa 100644 --- a/docs/kubectl_label.md +++ b/docs/kubectl_label.md @@ -81,4 +81,4 @@ $ kubectl label pods foo bar- ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.390937166 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.486060232 +0000 UTC diff --git a/docs/kubectl_log.md b/docs/kubectl_log.md index 5f7bb2f0644..418abfb3e57 100644 --- a/docs/kubectl_log.md +++ b/docs/kubectl_log.md @@ -62,4 +62,4 @@ $ kubectl log -f 123456-7890 ruby-container ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.389728881 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.484139739 +0000 UTC diff --git a/docs/kubectl_namespace.md b/docs/kubectl_namespace.md index 3fbb10002d9..c28b40709f3 100644 --- a/docs/kubectl_namespace.md +++ b/docs/kubectl_namespace.md @@ -53,4 +53,4 @@ kubectl namespace [namespace] ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.389609191 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.483937463 +0000 UTC diff --git a/docs/kubectl_port-forward.md b/docs/kubectl_port-forward.md index da2f9fd11d3..b47b5d0cbd3 100644 --- a/docs/kubectl_port-forward.md +++ b/docs/kubectl_port-forward.md @@ -68,4 +68,4 @@ $ kubectl port-forward -p mypod 0:5000 ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.390241417 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.484899751 +0000 UTC diff --git a/docs/kubectl_proxy.md b/docs/kubectl_proxy.md index 745cff9d3c7..67906f6a4cf 100644 --- a/docs/kubectl_proxy.md +++ b/docs/kubectl_proxy.md @@ -65,4 +65,4 @@ $ kubectl proxy --api-prefix=k8s-api ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.390360738 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.485099157 +0000 UTC diff --git a/docs/kubectl_resize.md b/docs/kubectl_resize.md index bc24c391b34..d4b9754392d 100644 --- a/docs/kubectl_resize.md +++ b/docs/kubectl_resize.md @@ -68,4 +68,4 @@ $ kubectl resize --current-replicas=2 --replicas=3 replicationcontrollers foo ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.389989377 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.484493463 +0000 UTC diff --git a/docs/kubectl_rolling-update.md b/docs/kubectl_rolling-update.md index 3d7dcf28d8e..47669766ffe 100644 --- a/docs/kubectl_rolling-update.md +++ b/docs/kubectl_rolling-update.md @@ -68,4 +68,4 @@ $ cat frontend-v2.json | kubectl rolling-update frontend-v1 -f - ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.38985117 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.484316119 +0000 UTC diff --git a/docs/kubectl_run-container.md b/docs/kubectl_run-container.md index 91cf6e0dba1..09735e0142c 100644 --- a/docs/kubectl_run-container.md +++ b/docs/kubectl_run-container.md @@ -78,4 +78,4 @@ $ kubectl run-container nginx --image=nginx --overrides='{ "apiVersion": "v1beta ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.390501802 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.485362986 +0000 UTC diff --git a/docs/kubectl_stop.md b/docs/kubectl_stop.md index 17849c8f27e..83690f783df 100644 --- a/docs/kubectl_stop.md +++ b/docs/kubectl_stop.md @@ -72,4 +72,4 @@ $ kubectl stop -f path/to/resources ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.390631789 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.48555328 +0000 UTC diff --git a/docs/kubectl_update.md b/docs/kubectl_update.md index 352ce0f68e0..b7b48ce7049 100644 --- a/docs/kubectl_update.md +++ b/docs/kubectl_update.md @@ -67,4 +67,4 @@ $ kubectl update pods my-pod --patch='{ "apiVersion": "v1beta1", "desiredState": ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.388743178 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.483572524 +0000 UTC diff --git a/docs/kubectl_version.md b/docs/kubectl_version.md index 7b79e8a885c..0692efd1a40 100644 --- a/docs/kubectl_version.md +++ b/docs/kubectl_version.md @@ -51,4 +51,4 @@ kubectl version ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-04-16 17:04:37.392395408 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-17 18:59:11.488692518 +0000 UTC diff --git a/docs/man/kubelet.1.md b/docs/man/kubelet.1.md index 9144e119d74..8a7f27a70c4 100644 --- a/docs/man/kubelet.1.md +++ b/docs/man/kubelet.1.md @@ -15,7 +15,7 @@ There are 4 ways that a container manifest can be provided to the Kubelet: File Path passed as a flag on the command line. This file is rechecked every 20 seconds (configurable with a flag). HTTP endpoint HTTP endpoint passed as a parameter on the command line. This endpoint is checked every 20 seconds (also configurable with a flag). - etcd server The Kubelet will reach out and do a watch on an etcd server. The etcd path that is watched is /registry/hosts/$(hostname -f). As this is a watch, changes are noticed and acted upon very quickly. + etcd server The Kubelet will reach out and do a watch on an etcd server. The etcd path that is watched is /registry/hosts/$(uname -n). As this is a watch, changes are noticed and acted upon very quickly. HTTP server The kubelet can also listen for HTTP and respond to a simple API (underspec'd currently) to submit a new manifest. diff --git a/docs/man/man1/kubectl-get.1 b/docs/man/man1/kubectl-get.1 index e7378419b95..5b87150369c 100644 --- a/docs/man/man1/kubectl-get.1 +++ b/docs/man/man1/kubectl-get.1 @@ -17,7 +17,7 @@ Display one or many resources. .PP Possible resources include pods (po), replication controllers (rc), services -(svc), minions (mi), or events (ev). +(svc), minions (mi), events (ev), or component statuses (cs). .PP By specifying the output as 'template' and providing a Go template as the value diff --git a/docs/man/man1/kubelet.1 b/docs/man/man1/kubelet.1 index 60cadee7815..4abd9ec368f 100644 --- a/docs/man/man1/kubelet.1 +++ b/docs/man/man1/kubelet.1 @@ -21,7 +21,7 @@ There are 4 ways that a container manifest can be provided to the Kubelet: .nf File Path passed as a flag on the command line. This file is rechecked every 20 seconds (configurable with a flag). HTTP endpoint HTTP endpoint passed as a parameter on the command line. This endpoint is checked every 20 seconds (also configurable with a flag). -etcd server The Kubelet will reach out and do a watch on an etcd server. The etcd path that is watched is /registry/hosts/\$(hostname \-f). As this is a watch, changes are noticed and acted upon very quickly. +etcd server The Kubelet will reach out and do a watch on an etcd server. The etcd path that is watched is /registry/hosts/\$(uname \-n). As this is a watch, changes are noticed and acted upon very quickly. HTTP server The kubelet can also listen for HTTP and respond to a simple API (underspec'd currently) to submit a new manifest. .fi diff --git a/docs/resource_quota_admin.md b/docs/resource_quota_admin.md index 7e87b152486..def166c5c83 100644 --- a/docs/resource_quota_admin.md +++ b/docs/resource_quota_admin.md @@ -25,6 +25,8 @@ are supported: | services | Total number of services | | replicationcontrollers | Total number of replication controllers | | resourcequotas | Total number of resource quotas | +| secrets | Total number of secrets | +| persistentvolumeclaims | Total number of persistent volume claims | For example, `pods` quota counts and enforces a maximum on the number of `pods` created in a single namespace. diff --git a/docs/roadmap.md b/docs/roadmap.md index 92e080e1326..af4aac6a14e 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -1,6 +1,6 @@ # Kubernetes Roadmap -Updated Feb 9, 2015 +Updated April 20, 2015 This document is intended to capture the set of supported use cases, features, docs, and patterns that we feel are required to call Kubernetes “feature @@ -18,30 +18,30 @@ clustered database or key-value store. We will target such workloads for our ## APIs and core features 1. Consistent v1 API - - Status: v1beta3 (#1519) is being developed as the release candidate for the v1 API. -2. Multi-port services for apps which need more than one port on the same portal IP (#1802) - - Status: #2585 covers the design. -3. Nominal services for applications which need one stable IP per pod instance (#260) + - Status: DONE. [v1beta3](http://kubernetesio.blogspot.com/2015/04/introducing-kubernetes-v1beta3.html) was developed as the release candidate for the v1 API. +2. Multi-port services for apps which need more than one port on the same portal IP ([#1802](https://github.com/GoogleCloudPlatform/kubernetes/issues/1802)) + - Status: DONE. Released in 0.15.0 +3. Nominal services for applications which need one stable IP per pod instance ([#260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260)) - Status: #2585 covers some design options. -4. API input is scrubbed of status fields in favor of a new API to set status (#4248) - - Status: in progress -5. Input validation reporting versioned field names (#2518) +4. API input is scrubbed of status fields in favor of a new API to set status ([#4248](https://github.com/GoogleCloudPlatform/kubernetes/issues/4248)) + - Status: DONE +5. Input validation reporting versioned field names ([#3084](https://github.com/GoogleCloudPlatform/kubernetes/issues/3084)) - Status: in progress 6. Error reporting: Report common problems in ways that users can discover - Status: 7. Event management: Make events usable and useful - Status: -8. Persistent storage support (#4055) +8. Persistent storage support ([#5105](https://github.com/GoogleCloudPlatform/kubernetes/issues/5105)) - Status: in progress -9. Allow nodes to join/leave a cluster (#2303,#2435) - - Status: high level [design doc](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/design/clustering.md). +9. Allow nodes to join/leave a cluster ([#6087](https://github.com/GoogleCloudPlatform/kubernetes/issues/6087),[#3168](https://github.com/GoogleCloudPlatform/kubernetes/issues/3168)) + - Status: in progress ([#6949](https://github.com/GoogleCloudPlatform/kubernetes/pull/6949)) 10. Handle node death - Status: mostly covered by nodes joining/leaving a cluster -11. Allow live cluster upgrades (#2524) +11. Allow live cluster upgrades ([#6075](https://github.com/GoogleCloudPlatform/kubernetes/issues/6075),[#6079](https://github.com/GoogleCloudPlatform/kubernetes/issues/6079)) - Status: design in progress 12. Allow kernel upgrades - Status: mostly covered by nodes joining/leaving a cluster, need demonstration -13. Allow rolling-updates to fail gracefully (#1353) +13. Allow rolling-updates to fail gracefully ([#1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353)) - Status: 14. Easy .dockercfg - Status: diff --git a/docs/salt.md b/docs/salt.md index 2e3699ab4f1..5425d6e95d3 100644 --- a/docs/salt.md +++ b/docs/salt.md @@ -63,9 +63,9 @@ Key | Value `cbr-cidr` | (Optional) The minion IP address range used for the docker container bridge. `cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure*, *aws*, *vagrant* `etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. -`hostnamef` | (Optional) The full host name of the machine, i.e. hostname -f (only used on Azure) +`hostnamef` | (Optional) The full host name of the machine, i.e. uname -n `node_ip` | (Optional) The IP address to use to address this node -`minion_ip` | (Optional) Mapped to the kubelet hostname_override, K8S TODO - change this name +`hostname_override` | (Optional) Mapped to the kubelet hostname_override `network_mode` | (Optional) Networking model to use among nodes: *openvswitch* `networkInterfaceName` | (Optional) Networking interface to use to bind addresses, default value *eth0* `publicAddressOverride` | (Optional) The IP address the kube-apiserver should use to bind against for external read-only access diff --git a/examples/cassandra/cassandra-service.yaml b/examples/cassandra/cassandra-service.yaml index 2efde01ba2d..ad17bce72c5 100644 --- a/examples/cassandra/cassandra-service.yaml +++ b/examples/cassandra/cassandra-service.yaml @@ -5,3 +5,5 @@ port: 9042 containerPort: 9042 selector: name: cassandra +labels: + name: cassandra diff --git a/examples/examples_test.go b/examples/examples_test.go index 994ba7ea7d2..d21c1eea150 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -157,11 +157,7 @@ func TestExampleObjectSchemas(t *testing.T) { "service": &api.Service{}, "replication-controller": &api.ReplicationController{}, }, - "../examples/update-demo/v1beta1": { - "kitten-rc": &api.ReplicationController{}, - "nautilus-rc": &api.ReplicationController{}, - }, - "../examples/update-demo/v1beta3": { + "../examples/update-demo": { "kitten-rc": &api.ReplicationController{}, "nautilus-rc": &api.ReplicationController{}, }, diff --git a/cluster/addons/fluentd-elasticsearch/logging-demo/Makefile b/examples/logging-demo/Makefile similarity index 95% rename from cluster/addons/fluentd-elasticsearch/logging-demo/Makefile rename to examples/logging-demo/Makefile index a1acec98806..c847f9d6b35 100644 --- a/cluster/addons/fluentd-elasticsearch/logging-demo/Makefile +++ b/examples/logging-demo/Makefile @@ -5,7 +5,7 @@ .PHONY: up down logger-up logger-down logger10-up logger10-downget net -KUBECTL=../../../kubectl.sh +KUBECTL=../../cluster/kubectl.sh up: logger-up logger10-up diff --git a/cluster/addons/fluentd-elasticsearch/logging-demo/README.md b/examples/logging-demo/README.md similarity index 100% rename from cluster/addons/fluentd-elasticsearch/logging-demo/README.md rename to examples/logging-demo/README.md diff --git a/cluster/addons/fluentd-elasticsearch/logging-demo/synth-logger.png b/examples/logging-demo/synth-logger.png similarity index 100% rename from cluster/addons/fluentd-elasticsearch/logging-demo/synth-logger.png rename to examples/logging-demo/synth-logger.png diff --git a/cluster/addons/fluentd-elasticsearch/logging-demo/synthetic_0_25lps.yaml b/examples/logging-demo/synthetic_0_25lps.yaml similarity index 100% rename from cluster/addons/fluentd-elasticsearch/logging-demo/synthetic_0_25lps.yaml rename to examples/logging-demo/synthetic_0_25lps.yaml diff --git a/cluster/addons/fluentd-elasticsearch/logging-demo/synthetic_10lps.yaml b/examples/logging-demo/synthetic_10lps.yaml similarity index 100% rename from cluster/addons/fluentd-elasticsearch/logging-demo/synthetic_10lps.yaml rename to examples/logging-demo/synthetic_10lps.yaml diff --git a/examples/resourcequota/resource-quota.json b/examples/resourcequota/resource-quota.json index 7d0c40aefb9..ea7394ce748 100644 --- a/examples/resourcequota/resource-quota.json +++ b/examples/resourcequota/resource-quota.json @@ -10,6 +10,8 @@ "services": "5", "replicationcontrollers":"20", "resourcequotas":"1", - }, + "secrets":"10", + "persistentvolumeclaims":"10" + } } } diff --git a/examples/resourcequota/v1beta3/resource-quota.json b/examples/resourcequota/v1beta3/resource-quota.json index d4b0cb1b76c..c062eb82e6f 100644 --- a/examples/resourcequota/v1beta3/resource-quota.json +++ b/examples/resourcequota/v1beta3/resource-quota.json @@ -11,7 +11,9 @@ "pods": "10", "services": "5", "replicationcontrollers":"20", - "resourcequotas":"1" + "resourcequotas":"1", + "secrets":"10", + "persistentvolumeclaims":"10" } } } diff --git a/examples/update-demo/README.md b/examples/update-demo/README.md index a07c341c78a..1b2e2ac8d07 100644 --- a/examples/update-demo/README.md +++ b/examples/update-demo/README.md @@ -42,7 +42,7 @@ Now visit the the [demo website](http://localhost:8001/static). You won't see a Now we will turn up two replicas of an image. They all serve on internal port 80. ```bash -$ ./cluster/kubectl.sh create -f examples/update-demo/v1beta1/nautilus-rc.yaml +$ ./cluster/kubectl.sh create -f examples/update-demo/nautilus-rc.yaml ``` After pulling the image from the Docker Hub to your worker nodes (which may take a minute or so) you'll see a couple of squares in the UI detailing the pods that are running along with the image that they are serving up. A cute little nautilus. @@ -61,7 +61,7 @@ If you go back to the [demo website](http://localhost:8001/static/index.html) yo We will now update the docker image to serve a different image by doing a rolling update to a new Docker image. ```bash -$ ./cluster/kubectl.sh rolling-update update-demo-nautilus --update-period=10s -f examples/update-demo/v1beta1/kitten-rc.yaml +$ ./cluster/kubectl.sh rolling-update update-demo-nautilus --update-period=10s -f examples/update-demo/kitten-rc.yaml ``` The rolling-update command in kubectl will do 2 things: @@ -106,7 +106,7 @@ $ export DOCKER_HUB_USER=my-docker-id $ ./examples/update-demo/build-images.sh ``` -To use your custom docker image in the above examples, you will need to change the image name in `examples/update-demo/v1beta1/nautilus-rc.yaml` and `examples/update-demo/v1beta1/kitten-rc.yaml`. +To use your custom docker image in the above examples, you will need to change the image name in `examples/update-demo/nautilus-rc.yaml` and `examples/update-demo/kitten-rc.yaml`. ### Image Copyright diff --git a/examples/update-demo/v1beta3/kitten-rc.yaml b/examples/update-demo/kitten-rc.yaml similarity index 100% rename from examples/update-demo/v1beta3/kitten-rc.yaml rename to examples/update-demo/kitten-rc.yaml diff --git a/examples/update-demo/local/index.html b/examples/update-demo/local/index.html index f3a69e79c30..22a4859126a 100644 --- a/examples/update-demo/local/index.html +++ b/examples/update-demo/local/index.html @@ -22,9 +22,9 @@ limitations under the License.
- - ID: {{server.podId}}
- Host: {{server.host}}
+ + ID: {{server.podName}}
+ Host: {{server.host}}
Status: {{server.status}}
Image: {{server.dockerImage}}
Labels: diff --git a/examples/update-demo/local/script.js b/examples/update-demo/local/script.js index 23b7a06f528..cf0fb3dd6b6 100644 --- a/examples/update-demo/local/script.js +++ b/examples/update-demo/local/script.js @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -var base = "http://localhost:8001/api/v1beta1/"; +var base = "http://localhost:8001/api/v1beta3/"; var updateImage = function($http, server) { - $http.get(base + "proxy/pods/" + server.podId + "/data.json") + $http.get(base + "proxy/namespaces/default/pods/" + server.podName + "/data.json") .success(function(data) { console.log(data); server.image = data.image; @@ -29,13 +29,13 @@ var updateImage = function($http, server) { }; var updateServer = function($http, server) { - $http.get(base + "pods/" + server.podId) + $http.get(base + "namespaces/default/pods/" + server.podName) .success(function(data) { console.log(data); - server.labels = data.labels; - server.host = data.currentState.host.split('.')[0]; - server.status = data.currentState.status; - server.dockerImage = data.currentState.info["update-demo"].image; + server.labels = data.metadata.labels; + server.host = data.spec.host.split('.')[0]; + server.status = data.status.phase; + server.dockerImage = data.status.containerStatuses[0].image; updateImage($http, server); }) .error(function(data) { @@ -57,10 +57,10 @@ var ButtonsCtrl = function ($scope, $http, $interval) { $interval(angular.bind({}, update, $scope, $http), 2000); }; -var getServer = function($scope, id) { +var getServer = function($scope, name) { var servers = $scope.servers; for (var i = 0; i < servers.length; ++i) { - if (servers[i].podId == id) { + if (servers[i].podName == name) { return servers[i]; } } @@ -68,7 +68,7 @@ var getServer = function($scope, id) { }; var isUpdateDemoPod = function(pod) { - return pod.labels && pod.labels.name == "update-demo"; + return pod.metadata && pod.metadata.labels && pod.metadata.labels.name == "update-demo"; }; var update = function($scope, $http) { @@ -76,7 +76,7 @@ var update = function($scope, $http) { console.log("No HTTP!"); return; } - $http.get(base + "pods") + $http.get(base + "namespaces/default/pods") .success(function(data) { console.log(data); var newServers = []; @@ -85,9 +85,9 @@ var update = function($scope, $http) { if (!isUpdateDemoPod(pod)) { continue; } - var server = getServer($scope, pod.id); + var server = getServer($scope, pod.metadata.name); if (server == null) { - server = { "podId": pod.id }; + server = { "podName": pod.metadata.name }; } newServers.push(server); } diff --git a/examples/update-demo/v1beta3/nautilus-rc.yaml b/examples/update-demo/nautilus-rc.yaml similarity index 100% rename from examples/update-demo/v1beta3/nautilus-rc.yaml rename to examples/update-demo/nautilus-rc.yaml diff --git a/examples/update-demo/v1beta1/kitten-rc.yaml b/examples/update-demo/v1beta1/kitten-rc.yaml deleted file mode 100644 index fe6926524e4..00000000000 --- a/examples/update-demo/v1beta1/kitten-rc.yaml +++ /dev/null @@ -1,19 +0,0 @@ -kind: ReplicationController -id: update-demo-kitten -apiVersion: v1beta1 -desiredState: - replicaSelector: - name: update-demo - version: kitten - podTemplate: - desiredState: - manifest: - containers: - - name: update-demo - image: gcr.io/google_containers/update-demo:kitten - ports: - - containerPort: 80 - protocol: TCP - labels: - name: update-demo - version: kitten diff --git a/examples/update-demo/v1beta1/nautilus-rc.yaml b/examples/update-demo/v1beta1/nautilus-rc.yaml deleted file mode 100644 index 5218f62bf91..00000000000 --- a/examples/update-demo/v1beta1/nautilus-rc.yaml +++ /dev/null @@ -1,20 +0,0 @@ -kind: ReplicationController -id: update-demo-nautilus -apiVersion: v1beta1 -desiredState: - replicas: 2 - replicaSelector: - name: update-demo - version: nautilus - podTemplate: - desiredState: - manifest: - containers: - - name: update-demo - image: gcr.io/google_containers/update-demo:nautilus - ports: - - containerPort: 80 - protocol: TCP - labels: - name: update-demo - version: nautilus diff --git a/hack/build-ui.sh b/hack/build-ui.sh index d38715b60b1..647bd5fff5c 100755 --- a/hack/build-ui.sh +++ b/hack/build-ui.sh @@ -33,7 +33,7 @@ fi DATAFILE=pkg/ui/datafile.go TMP_DATAFILE=/tmp/datafile.go -go-bindata -nocompress -o $DATAFILE -prefix ${PWD} -pkg ui www/... third_party/swagger-ui/... +go-bindata -nocompress -o $DATAFILE -prefix ${PWD} -pkg ui www/app/... third_party/swagger-ui/... cat hooks/boilerplate.go.txt > $TMP_DATAFILE echo "// generated by hack/build-ui.sh; DO NOT EDIT diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index 93d7364e1dd..6044b54f546 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -19,6 +19,7 @@ set -o nounset set -o pipefail KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/cluster/common.sh" # --- Find local test binaries. @@ -86,15 +87,7 @@ if [[ -z "${AUTH_CONFIG:-}" ]]; then detect-master >/dev/null - - if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then - # When we are using vagrant it has hard coded auth. We repeat that here so that - # we don't clobber auth that might be used for a publicly facing cluster. - auth_config=( - "--auth_config=${HOME}/.kubernetes_vagrant_auth" - "--kubeconfig=${HOME}/.kubernetes_vagrant_kubeconfig" - ) - elif [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then + if [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then # GKE stores its own kubeconfig in gcloud's config directory. detect-project &> /dev/null auth_config=( @@ -102,25 +95,15 @@ if [[ -z "${AUTH_CONFIG:-}" ]]; then # gcloud doesn't set the current-context, so we have to set it "--context=gke_${PROJECT}_${ZONE}_${CLUSTER_NAME}" ) - elif [[ "${KUBERNETES_PROVIDER}" == "gce" ]]; then - auth_config=( - "--kubeconfig=${HOME}/.kube/.kubeconfig" - ) - elif [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then - auth_config=( - "--auth_config=${HOME}/.kube/${INSTANCE_PREFIX}/kubernetes_auth" - ) - elif [[ "${KUBERNETES_PROVIDER}" == "libvirt-coreos" ]]; then - auth_config=( - "--kubeconfig=${HOME}/.kube/.kubeconfig" - ) elif [[ "${KUBERNETES_PROVIDER}" == "conformance_test" ]]; then auth_config=( "--auth_config=${KUBERNETES_CONFORMANCE_TEST_AUTH_CONFIG:-}" "--cert_dir=${KUBERNETES_CONFORMANCE_TEST_CERT_DIR:-}" ) else - auth_config=() + auth_config=( + "--kubeconfig=${KUBECONFIG:-$DEFAULT_KUBECONFIG}" + ) fi else echo "Conformance Test. No cloud-provider-specific preparation." diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index a4635f79b40..2e07193a4e6 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -102,7 +102,7 @@ else exit 1 fi - sudo gcloud components update -q || true + sudo flock -x -n /var/run/lock/gcloud-components.lock -c "gcloud components update -q" || true GITHASH=$(gsutil cat gs://kubernetes-release/ci/latest.txt) gsutil -m cp gs://kubernetes-release/ci/${GITHASH}/kubernetes.tar.gz gs://kubernetes-release/ci/${GITHASH}/kubernetes-test.tar.gz . diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 5dc923f80e2..44ae2bdb193 100644 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -247,11 +247,12 @@ kube::golang::place_bins() { done } -kube::golang::exit_if_stdlib_not_installed() { +kube::golang::fallback_if_stdlib_not_installable() { local go_root_dir=$(go env GOROOT); local go_host_os=$(go env GOHOSTOS); local go_host_arch=$(go env GOHOSTARCH); local cgo_pkg_dir=${go_root_dir}/pkg/${go_host_os}_${go_host_arch}_cgo; + if [ -e ${cgo_pkg_dir} ]; then return 0; fi @@ -260,12 +261,13 @@ kube::golang::exit_if_stdlib_not_installed() { return 0; fi - kube::log::status "+++ Error. stdlib pkg with cgo flag not found."; - kube::log::status "+++ Error. stdlib pkg cannot be rebuilt since ${go_root_dir}/pkg is not writable by `whoami`"; - kube::log::status "+++ Error. Make ${go_root_dir}/pkg writable for `whoami` for a one-time stdlib install, Or" - kube::log::status "+++ Error. Rebuild stdlib using the command 'CGO_ENABLED=0 go install -a -installsuffix cgo std'"; - - exit 0; + kube::log::status "+++ Warning: stdlib pkg with cgo flag not found."; + kube::log::status "+++ Warning: stdlib pkg cannot be rebuilt since ${go_root_dir}/pkg is not writable by `whoami`"; + kube::log::status "+++ Warning: Make ${go_root_dir}/pkg writable for `whoami` for a one-time stdlib install, Or" + kube::log::status "+++ Warning: Rebuild stdlib using the command 'CGO_ENABLED=0 go install -a -installsuffix cgo std'"; + kube::log::status "+++ Falling back to go build, which is slower"; + + use_go_build=true } kube::golang::build_binaries_for_platform() { @@ -276,12 +278,14 @@ kube::golang::build_binaries_for_platform() { local -a nonstatics=() for binary in "${binaries[@]}"; do if kube::golang::is_statically_linked_library "${binary}"; then - kube::golang::exit_if_stdlib_not_installed; statics+=($binary) else nonstatics+=($binary) fi done + if [[ "${#statics[@]}" != 0 ]]; then + kube::golang::fallback_if_stdlib_not_installable; + fi if [[ -n ${use_go_build:-} ]]; then # Try and replicate the native binary placement of go install without @@ -291,6 +295,7 @@ kube::golang::build_binaries_for_platform() { output_path="${output_path}/${platform//\//_}" fi + kube::log::progress " " for binary in "${binaries[@]}"; do local bin=$(basename "${binary}") if [[ ${GOOS} == "windows" ]]; then @@ -298,8 +303,7 @@ kube::golang::build_binaries_for_platform() { fi if kube::golang::is_statically_linked_library "${binary}"; then - kube::golang::exit_if_stdlib_not_installed; - CGO_ENABLED=0 go build -installsuffix cgo -o "${output_path}/${bin}" \ + CGO_ENABLED=0 go build -o "${output_path}/${bin}" \ "${goflags[@]:+${goflags[@]}}" \ -ldflags "${version_ldflags}" \ "${binary}" @@ -309,7 +313,9 @@ kube::golang::build_binaries_for_platform() { -ldflags "${version_ldflags}" \ "${binary}" fi + kube::log::progress "*" done + kube::log::progress "\n" else # Use go install. if [[ "${#nonstatics[@]}" != 0 ]]; then diff --git a/hack/lib/logging.sh b/hack/lib/logging.sh index 55394cb4349..00dc5a626a3 100644 --- a/hack/lib/logging.sh +++ b/hack/lib/logging.sh @@ -119,6 +119,13 @@ kube::log::info() { done } +# Just like kube::log::info, but no \n, so you can make a progress bar +kube::log::progress() { + for message; do + echo -e -n "$message" + done +} + kube::log::info_from_stdin() { local messages=() while read -r line; do diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 85e22c2e39d..a947cea1468 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -176,8 +176,8 @@ Logs: To start using your cluster, open up another terminal/tab and run: - cluster/kubectl.sh config set-cluster local --server=http://${API_HOST}:${API_PORT} --insecure-skip-tls-verify=true --global - cluster/kubectl.sh config set-context local --cluster=local --global + cluster/kubectl.sh config set-cluster local --server=http://${API_HOST}:${API_PORT} --insecure-skip-tls-verify=true + cluster/kubectl.sh config set-context local --cluster=local cluster/kubectl.sh config use-context local cluster/kubectl.sh EOF diff --git a/hack/parallel-e2e.sh b/hack/parallel-e2e.sh new file mode 100755 index 00000000000..a26ba0ba9a0 --- /dev/null +++ b/hack/parallel-e2e.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. + +function down-clusters { + for count in $(seq 1 ${clusters}); do + export KUBE_GCE_INSTANCE_PREFIX=e2e-test-${USER}-${count} + local cluster_dir=${KUBE_ROOT}/_output/e2e/${KUBE_GCE_INSTANCE_PREFIX} + export KUBECONFIG=${cluster_dir}/.kubeconfig + go run ${KUBE_ROOT}/hack/e2e.go -down -v & + done + + wait +} + +function up-clusters { + for count in $(seq 1 ${clusters}); do + export KUBE_GCE_INSTANCE_PREFIX=e2e-test-${USER}-${count} + export KUBE_GCE_CLUSTER_CLASS_B="10.$((${count}*2-1))" + export MASTER_IP_RANGE="10.$((${count}*2)).0.0/24" + + local cluster_dir=${KUBE_ROOT}/_output/e2e/${KUBE_GCE_INSTANCE_PREFIX} + mkdir -p ${cluster_dir} + export KUBECONFIG=${cluster_dir}/.kubeconfig + go run hack/e2e.go -up -v |& tee ${cluster_dir}/up.log & + done + + fail=0 + for job in $(jobs -p); do + wait "${job}" || fail=$((fail + 1)) + done + + if (( fail != 0 )); then + echo "${fail} cluster creation failures. Not continuing with tests." + exit 1 + fi +} + +function run-tests { + for count in $(seq 1 ${clusters}); do + export KUBE_GCE_INSTANCE_PREFIX=e2e-test-${USER}-${count} + + local cluster_dir=${KUBE_ROOT}/_output/e2e/${KUBE_GCE_INSTANCE_PREFIX} + export KUBECONFIG=${cluster_dir}/.kubeconfig + export E2E_REPORT_DIR=${cluster_dir} + go run hack/e2e.go -test --test_args="--ginkgo.noColor" "${@:-}" -down |& tee ${cluster_dir}/e2e.log & + done + + wait +} + +# Outputs something like: +# _output/e2e/e2e-test-zml-5/junit.xml +# FAIL: Shell tests that services.sh passes +function post-process { + echo $1 + cat $1 | python -c ' +import sys +from xml.dom.minidom import parse + +failed = False +for testcase in parse(sys.stdin).getElementsByTagName("testcase"): + if len(testcase.getElementsByTagName("failure")) != 0: + failed = True + print " FAIL: {test}".format(test = testcase.getAttribute("name")) +if not failed: + print " SUCCESS!" +' +} + +function print-results { + for count in $(seq 1 ${clusters}); do + for junit in ${KUBE_ROOT}/_output/e2e/e2e-test-${USER}-${count}/junit*.xml; do + post-process ${junit} + done + done +} + +if [[ ${KUBERNETES_PROVIDER:-gce} != "gce" ]]; then + echo "$0 not supported on ${KUBERNETES_PROVIDER} yet" >&2 + exit 1 +fi + +readonly clusters=${1:-} + +if ! [[ "${clusters}" =~ ^[0-9]+$ ]]; then + echo "Usage: ${0} [options to hack/e2e.go]" >&2 + exit 1 +fi + +shift 1 + +rm -rf _output/e2e +down-clusters +up-clusters +run-tests "${@:-}" +print-results diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index f58fbcd1de7..b86756a9a02 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -95,6 +95,7 @@ kube::log::status "Starting kube-apiserver" --public_address_override="127.0.0.1" \ --kubelet_port=${KUBELET_PORT} \ --runtime_config=api/v1beta3 \ + --cert_dir="${TMPDIR:-/tmp/}" \ --portal_net="10.0.0.0/24" 1>&2 & APISERVER_PID=$! @@ -162,6 +163,7 @@ for version in "${kube_api_versions[@]}"; do # Command kubectl create "${kube_flags[@]}" -f examples/limitrange/valid-pod.json # Post-condition: valid-pod POD is running + kubectl get "${kube_flags[@]}" pods -o json kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' kube::test::get_object_assert 'pod valid-pod' "{{$id_field}}" 'valid-pod' kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod' @@ -615,6 +617,18 @@ __EOF__ kube::test::get_object_assert 'nodes/127.0.0.1 service/kubernetes' "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:kubernetes:' + ##################### + # Resource aliasing # + ##################### + + kube::log::status "Testing resource aliasing" + kubectl create -f examples/cassandra/cassandra.yaml "${kube_flags[@]}" + kubectl create -f examples/cassandra/cassandra-controller.yaml "${kube_flags[@]}" + kubectl create -f examples/cassandra/cassandra-service.yaml "${kube_flags[@]}" + kube::test::get_object_assert "all -l'name=cassandra'" "{{range.items}}{{$id_field}}:{{end}}" 'cassandra:cassandra:cassandra:' + kubectl delete all -l name=cassandra "${kube_flags[@]}" + + ########### # Swagger # ########### diff --git a/hack/test-go.sh b/hack/test-go.sh index 70b834f4d72..f2d787950ab 100755 --- a/hack/test-go.sh +++ b/hack/test-go.sh @@ -151,7 +151,7 @@ runTests() { if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then kube::log::status "Running unit tests without code coverage" go test "${goflags[@]:+${goflags[@]}}" \ - ${KUBE_RACE} ${KUBE_TIMEOUT} "${@+${@/#/${KUBE_GO_PACKAGE}/}}" || true + ${KUBE_RACE} ${KUBE_TIMEOUT} "${@+${@/#/${KUBE_GO_PACKAGE}/}}" return 0 fi @@ -173,7 +173,7 @@ runTests() { -cover -covermode="${KUBE_COVERMODE}" \ -coverprofile="${cover_report_dir}/{}/${cover_profile}" \ "${cover_params[@]+${cover_params[@]}}" \ - "${KUBE_GO_PACKAGE}/{}" || true + "${KUBE_GO_PACKAGE}/{}" COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out" { diff --git a/pkg/api/helpers.go b/pkg/api/helpers.go index 06e3e785f6e..1c64fb02a1c 100644 --- a/pkg/api/helpers.go +++ b/pkg/api/helpers.go @@ -80,6 +80,8 @@ var standardResources = util.NewStringSet( string(ResourceQuotas), string(ResourceServices), string(ResourceReplicationControllers), + string(ResourceSecrets), + string(ResourcePersistentVolumeClaims), string(ResourceStorage)) func IsStandardResourceName(str string) bool { diff --git a/pkg/api/latest/latest.go b/pkg/api/latest/latest.go index 887bba1a5b9..a58d44d437f 100644 --- a/pkg/api/latest/latest.go +++ b/pkg/api/latest/latest.go @@ -61,6 +61,9 @@ var SelfLinker = runtime.SelfLinker(accessor) // Kubernetes versions. var RESTMapper meta.RESTMapper +// userResources is a group of resources mostly used by a kubectl user +var userResources = []string{"rc", "svc", "pods", "pvc"} + // InterfacesFor returns the default Codec and ResourceVersioner for a given version // string, or an error if the version is not known. func InterfacesFor(version string) (*meta.VersionInterfaces, error) { @@ -124,6 +127,9 @@ func init() { "PersistentVolume": true, } + // setup aliases for groups of resources + mapper.AddResourceAlias("all", userResources...) + // these kinds should be excluded from the list of resources ignoredKinds := util.NewStringSet( "ListOptions", diff --git a/pkg/api/meta/interfaces.go b/pkg/api/meta/interfaces.go index 8d471a9a69a..fca198458b0 100644 --- a/pkg/api/meta/interfaces.go +++ b/pkg/api/meta/interfaces.go @@ -147,4 +147,5 @@ type RESTMapping struct { type RESTMapper interface { VersionAndKindForResource(resource string) (defaultVersion, kind string, err error) RESTMapping(kind string, versions ...string) (*RESTMapping, error) + AliasesForResource(resource string) ([]string, bool) } diff --git a/pkg/api/meta/restmapper.go b/pkg/api/meta/restmapper.go index fb0480298fa..117a942c328 100644 --- a/pkg/api/meta/restmapper.go +++ b/pkg/api/meta/restmapper.go @@ -138,13 +138,17 @@ func kindToResource(kind string, mixedCase bool) (plural, singular string) { } else { singular = strings.ToLower(kind) } - switch string(singular[len(singular)-1]) { - case "s": - plural = singular - case "y": - plural = strings.TrimSuffix(singular, "y") + "ies" - default: - plural = singular + "s" + if strings.HasSuffix(singular, "status") { + plural = strings.TrimSuffix(singular, "status") + "statuses" + } else { + switch string(singular[len(singular)-1]) { + case "s": + plural = singular + case "y": + plural = strings.TrimSuffix(singular, "y") + "ies" + default: + plural = singular + "s" + } } return } @@ -215,7 +219,7 @@ func (m *DefaultRESTMapper) RESTMapping(kind string, versions ...string) (*RESTM return nil, fmt.Errorf("the provided version %q has no relevant versions", version) } - return &RESTMapping{ + retVal := &RESTMapping{ Resource: resource, APIVersion: version, Kind: kind, @@ -224,5 +228,65 @@ func (m *DefaultRESTMapper) RESTMapping(kind string, versions ...string) (*RESTM Codec: interfaces.Codec, ObjectConvertor: interfaces.ObjectConvertor, MetadataAccessor: interfaces.MetadataAccessor, - }, nil + } + + return retVal, nil +} + +// aliasToResource is used for mapping aliases to resources +var aliasToResource = map[string][]string{} + +// AddResourceAlias maps aliases to resources +func (m *DefaultRESTMapper) AddResourceAlias(alias string, resources ...string) { + if len(resources) == 0 { + return + } + aliasToResource[alias] = resources +} + +// AliasesForResource returns whether a resource has an alias or not +func (m *DefaultRESTMapper) AliasesForResource(alias string) ([]string, bool) { + if res, ok := aliasToResource[alias]; ok { + return res, true + } + return nil, false +} + +// MultiRESTMapper is a wrapper for multiple RESTMappers. +type MultiRESTMapper []RESTMapper + +// VersionAndKindForResource provides the Version and Kind mappings for the +// REST resources. This implementation supports multiple REST schemas and return +// the first match. +func (m MultiRESTMapper) VersionAndKindForResource(resource string) (defaultVersion, kind string, err error) { + for _, t := range m { + defaultVersion, kind, err = t.VersionAndKindForResource(resource) + if err == nil { + return + } + } + return +} + +// RESTMapping provides the REST mapping for the resource based on the resource +// kind and version. This implementation supports multiple REST schemas and +// return the first match. +func (m MultiRESTMapper) RESTMapping(kind string, versions ...string) (mapping *RESTMapping, err error) { + for _, t := range m { + mapping, err = t.RESTMapping(kind, versions...) + if err == nil { + return + } + } + return +} + +// AliasesForResource finds the first alias response for the provided mappers. +func (m MultiRESTMapper) AliasesForResource(alias string) (aliases []string, ok bool) { + for _, t := range m { + if aliases, ok = t.AliasesForResource(alias); ok { + return + } + } + return nil, false } diff --git a/pkg/api/register.go b/pkg/api/register.go index 4a352961990..83fc40e23af 100644 --- a/pkg/api/register.go +++ b/pkg/api/register.go @@ -60,6 +60,8 @@ func init() { &PodLogOptions{}, &PodExecOptions{}, &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, ) // Legacy names are supported Scheme.AddKnownTypeWithName("", "Minion", &Node{}) @@ -101,3 +103,5 @@ func (*ListOptions) IsAnAPIObject() {} func (*PodLogOptions) IsAnAPIObject() {} func (*PodExecOptions) IsAnAPIObject() {} func (*PodProxyOptions) IsAnAPIObject() {} +func (*ComponentStatus) IsAnAPIObject() {} +func (*ComponentStatusList) IsAnAPIObject() {} diff --git a/pkg/api/resource_helpers.go b/pkg/api/resource_helpers.go index 7dff8b94121..e4f8f7edeb1 100644 --- a/pkg/api/resource_helpers.go +++ b/pkg/api/resource_helpers.go @@ -58,3 +58,13 @@ func GetExistingContainerStatus(statuses []ContainerStatus, name string) Contain } return ContainerStatus{} } + +// IsPodReady retruns true if a pod is ready; false otherwise. +func IsPodReady(pod *Pod) bool { + for _, c := range pod.Status.Conditions { + if c.Type == PodReady && c.Status == ConditionTrue { + return true + } + } + return false +} diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 3fa1d38a2a3..774bda50e8e 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -18,6 +18,7 @@ package testing import ( "math/rand" + "reflect" "strconv" "testing" @@ -34,13 +35,6 @@ import ( "speter.net/go/exp/math/dec/inf" ) -func fuzzOneOf(c fuzz.Continue, objs ...interface{}) { - // Use a new fuzzer which cannot populate nil to ensure one obj will be set. - f := fuzz.New().NilChance(0).NumElements(1, 1) - i := c.RandUint64() % uint64(len(objs)) - f.Fuzz(objs[i]) -} - // FuzzerFor can randomly populate api objects that are destined for version. func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { f := fuzz.New().NilChance(.5).NumElements(1, 1) @@ -171,9 +165,12 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { *rp = policies[c.Rand.Intn(len(policies))] }, func(vs *api.VolumeSource, c fuzz.Continue) { - // Exactly one of the fields should be set. - //FIXME: the fuzz can still end up nil. What if fuzz allowed me to say that? - fuzzOneOf(c, &vs.HostPath, &vs.EmptyDir, &vs.GCEPersistentDisk, &vs.AWSElasticBlockStore, &vs.GitRepo, &vs.Secret, &vs.NFS, &vs.ISCSI, &vs.Glusterfs) + // Exactly one of the fields must be set. + v := reflect.ValueOf(vs).Elem() + i := int(c.RandUint64() % uint64(v.NumField())) + v = v.Field(i).Addr() + // Use a new fuzzer which cannot populate nil to ensure one field will be set. + fuzz.New().NilChance(0).NumElements(1, 1).Fuzz(v.Interface()) }, func(d *api.DNSPolicy, c fuzz.Continue) { policies := []api.DNSPolicy{api.DNSClusterFirst, api.DNSDefault} diff --git a/pkg/api/types.go b/pkg/api/types.go index 5868517c84c..6dc455f7bb3 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1705,6 +1705,10 @@ const ( ResourceReplicationControllers ResourceName = "replicationcontrollers" // ResourceQuotas, number ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" ) // ResourceQuotaSpec defines the desired hard limits to enforce for Quota @@ -1827,3 +1831,33 @@ func AddToNodeAddresses(addresses *[]NodeAddress, addAddresses ...NodeAddress) { } } } + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType `json:"type"` + Status ConditionStatus `json:"status"` + Message string `json:"message,omitempty"` + Error string `json:"error,omitempty"` +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty"` + + Conditions []ComponentCondition `json:"conditions,omitempty"` +} + +type ComponentStatusList struct { + TypeMeta `json:",inline"` + ListMeta `json:"metadata,omitempty"` + + Items []ComponentStatus `json:"items"` +} diff --git a/pkg/api/v1beta1/register.go b/pkg/api/v1beta1/register.go index f2a137292e7..3947ac26f9a 100644 --- a/pkg/api/v1beta1/register.go +++ b/pkg/api/v1beta1/register.go @@ -68,6 +68,8 @@ func init() { &PodLogOptions{}, &PodExecOptions{}, &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, ) // Future names are supported api.Scheme.AddKnownTypeWithName("v1beta1", "Node", &Minion{}) @@ -110,3 +112,5 @@ func (*ListOptions) IsAnAPIObject() {} func (*PodLogOptions) IsAnAPIObject() {} func (*PodExecOptions) IsAnAPIObject() {} func (*PodProxyOptions) IsAnAPIObject() {} +func (*ComponentStatus) IsAnAPIObject() {} +func (*ComponentStatusList) IsAnAPIObject() {} diff --git a/pkg/api/v1beta1/types.go b/pkg/api/v1beta1/types.go index 57d41de70ef..493d59eedeb 100644 --- a/pkg/api/v1beta1/types.go +++ b/pkg/api/v1beta1/types.go @@ -1503,6 +1503,10 @@ const ( ResourceReplicationControllers ResourceName = "replicationcontrollers" // ResourceQuotas, number ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" ) // ResourceQuotaSpec defines the desired hard limits to enforce for Quota @@ -1592,3 +1596,32 @@ type GlusterfsVolumeSource struct { // the Glusterfs volume to be mounted with read-only permissions ReadOnly bool `json:"readOnly,omitempty" description:"Glusterfs volume to be mounted with read-only permissions"` } + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType `json:"type" description:"type of component condition, currently only Healthy"` + Status ConditionStatus `json:"status" description:"current status of this component condition, one of True, False, Unknown"` + Message string `json:"message,omitempty" description:"health check message received from the component"` + Error string `json:"error,omitempty" description:"error code from health check attempt (if any)"` +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + TypeMeta `json:",inline"` + + Name string `json:"name,omitempty" description:"name of the component"` + Conditions []ComponentCondition `json:"conditions,omitempty" description:"list of component conditions observed"` +} + +type ComponentStatusList struct { + TypeMeta `json:",inline"` + + Items []ComponentStatus `json:"items" description:"list of component status objects"` +} diff --git a/pkg/api/v1beta2/register.go b/pkg/api/v1beta2/register.go index 3cc2bc7d54a..75393fd89c4 100644 --- a/pkg/api/v1beta2/register.go +++ b/pkg/api/v1beta2/register.go @@ -68,6 +68,8 @@ func init() { &PodLogOptions{}, &PodExecOptions{}, &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, ) // Future names are supported api.Scheme.AddKnownTypeWithName("v1beta2", "Node", &Minion{}) @@ -110,3 +112,5 @@ func (*ListOptions) IsAnAPIObject() {} func (*PodLogOptions) IsAnAPIObject() {} func (*PodExecOptions) IsAnAPIObject() {} func (*PodProxyOptions) IsAnAPIObject() {} +func (*ComponentStatus) IsAnAPIObject() {} +func (*ComponentStatusList) IsAnAPIObject() {} diff --git a/pkg/api/v1beta2/types.go b/pkg/api/v1beta2/types.go index 52e1fc720c8..14fab56566e 100644 --- a/pkg/api/v1beta2/types.go +++ b/pkg/api/v1beta2/types.go @@ -1578,6 +1578,10 @@ const ( ResourceReplicationControllers ResourceName = "replicationcontrollers" // ResourceQuotas, number ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" ) // ResourceQuotaSpec defines the desired hard limits to enforce for Quota @@ -1655,3 +1659,32 @@ type SecretList struct { Items []Secret `json:"items" description:"items is a list of secret objects"` } + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType `json:"type" description:"type of component condition, currently only Healthy"` + Status ConditionStatus `json:"status" description:"current status of this component condition, one of True, False, Unknown"` + Message string `json:"message,omitempty" description:"health check message received from the component"` + Error string `json:"error,omitempty" description:"error code from health check attempt (if any)"` +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + TypeMeta `json:",inline"` + + Name string `json:"name,omitempty" description:"name of the component"` + Conditions []ComponentCondition `json:"conditions,omitempty" description:"list of component conditions observed"` +} + +type ComponentStatusList struct { + TypeMeta `json:",inline"` + + Items []ComponentStatus `json:"items" description:"list of component status objects"` +} diff --git a/pkg/api/v1beta3/register.go b/pkg/api/v1beta3/register.go index 538ea5a9ac9..ea168aaf83a 100644 --- a/pkg/api/v1beta3/register.go +++ b/pkg/api/v1beta3/register.go @@ -61,6 +61,8 @@ func init() { &PodLogOptions{}, &PodExecOptions{}, &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, ) // Legacy names are supported api.Scheme.AddKnownTypeWithName("v1beta3", "Minion", &Node{}) @@ -102,3 +104,5 @@ func (*ListOptions) IsAnAPIObject() {} func (*PodLogOptions) IsAnAPIObject() {} func (*PodExecOptions) IsAnAPIObject() {} func (*PodProxyOptions) IsAnAPIObject() {} +func (*ComponentStatus) IsAnAPIObject() {} +func (*ComponentStatusList) IsAnAPIObject() {} diff --git a/pkg/api/v1beta3/types.go b/pkg/api/v1beta3/types.go index a70316c06d9..f9ecda57586 100644 --- a/pkg/api/v1beta3/types.go +++ b/pkg/api/v1beta3/types.go @@ -1606,6 +1606,10 @@ const ( ResourceReplicationControllers ResourceName = "replicationcontrollers" // ResourceQuotas, number ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" ) // ResourceQuotaSpec defines the desired hard limits to enforce for Quota @@ -1672,3 +1676,33 @@ type SecretList struct { Items []Secret `json:"items" description:"items is a list of secret objects"` } + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +type ComponentCondition struct { + Type ComponentConditionType `json:"type" description:"type of component condition, currently only Healthy"` + Status ConditionStatus `json:"status" description:"current status of this component condition, one of True, False, Unknown"` + Message string `json:"message,omitempty" description:"health check message received from the component"` + Error string `json:"error,omitempty" description:"error code from health check attempt (if any)"` +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + TypeMeta `json:",inline"` + ObjectMeta `json:"metadata,omitempty" description:"standard object metadata; see https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#metadata"` + + Conditions []ComponentCondition `json:"conditions,omitempty" description:"list of component conditions observed" patchStrategy:"merge" patchMergeKey:"type"` +} + +type ComponentStatusList struct { + TypeMeta `json:",inline"` + ListMeta `json:"metadata,omitempty" description:"standard list metadata; see https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/api-conventions.md#metadata"` + + Items []ComponentStatus `json:"items" description:"list of component status objects"` +} diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index b49e883796b..78c0a9f4737 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -469,7 +469,7 @@ func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) er allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldPv.ObjectMeta, &newPv.ObjectMeta).Prefix("metadata")...) if newPv.ResourceVersion == "" { - allErrs = append(allErrs, fmt.Errorf("ResourceVersion must be specified")) + allErrs = append(allErrs, errs.NewFieldRequired("resourceVersion")) } newPv.Spec = oldPv.Spec return allErrs @@ -507,7 +507,7 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVol allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldPvc.ObjectMeta, &newPvc.ObjectMeta).Prefix("metadata")...) if newPvc.ResourceVersion == "" { - allErrs = append(allErrs, fmt.Errorf("ResourceVersion must be specified")) + allErrs = append(allErrs, errs.NewFieldRequired("resourceVersion")) } newPvc.Spec = oldPvc.Spec return allErrs @@ -1161,7 +1161,7 @@ func ValidateSecret(secret *api.Secret) errs.ValidationErrorList { func validateBasicResource(quantity resource.Quantity) errs.ValidationErrorList { if quantity.Value() < 0 { - return errs.ValidationErrorList{fmt.Errorf("%v is not a valid resource quantity", quantity.Value())} + return errs.ValidationErrorList{errs.NewFieldInvalid("", quantity.Value(), "must be a valid resource quantity")} } return errs.ValidationErrorList{} } @@ -1216,7 +1216,7 @@ func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.R allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldResourceQuota.ObjectMeta, &newResourceQuota.ObjectMeta).Prefix("metadata")...) if newResourceQuota.ResourceVersion == "" { - allErrs = append(allErrs, fmt.Errorf("ResourceVersion must be specified")) + allErrs = append(allErrs, errs.NewFieldRequired("resourceVersion")) } for k := range newResourceQuota.Status.Hard { allErrs = append(allErrs, validateResourceName(string(k), string(newResourceQuota.TypeMeta.Kind))...) @@ -1242,12 +1242,12 @@ func ValidateNamespace(namespace *api.Namespace) errs.ValidationErrorList { func validateFinalizerName(stringValue string) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if !util.IsQualifiedName(stringValue) { - return append(allErrs, fmt.Errorf("finalizer name: %v, %v", stringValue, qualifiedNameErrorMsg)) + return append(allErrs, errs.NewFieldInvalid("spec.finalizers", stringValue, qualifiedNameErrorMsg)) } if len(strings.Split(stringValue, "/")) == 1 { if !api.IsStandardFinalizerName(stringValue) { - return append(allErrs, fmt.Errorf("finalizer name: %v is neither a standard finalizer name nor is it fully qualified", stringValue)) + return append(allErrs, errs.NewFieldInvalid("spec.finalizers", stringValue, fmt.Sprintf("finalizer name is neither a standard finalizer name nor is it fully qualified"))) } } diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 693ac3013cf..924165eafa8 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -140,7 +140,8 @@ func (g *APIGroupVersion) InstallREST(container *restful.Container) error { return errors.NewAggregate(registrationErrors) } -// TODO: Convert to go-restful +// TODO: This endpoint is deprecated and should be removed at some point. +// Use "componentstatus" API instead. func InstallValidator(mux Mux, servers func() map[string]Server) { mux.Handle("/validate", NewValidator(servers)) } diff --git a/pkg/apiserver/validator.go b/pkg/apiserver/validator.go index 368ea66d67f..1db5e64b5c2 100644 --- a/pkg/apiserver/validator.go +++ b/pkg/apiserver/validator.go @@ -48,13 +48,39 @@ type validator struct { rt http.RoundTripper } +type ServerStatus struct { + Component string `json:"component,omitempty"` + Health string `json:"health,omitempty"` + HealthCode probe.Result `json:"healthCode,omitempty"` + Msg string `json:"msg,omitempty"` + Err string `json:"err,omitempty"` +} + // TODO: can this use pkg/probe/http -func (s *Server) check(client httpGet) (probe.Result, string, error) { +func (server *Server) DoServerCheck(rt http.RoundTripper) (probe.Result, string, error) { + var client *http.Client scheme := "http://" - if s.EnableHTTPS { + if server.EnableHTTPS { + // TODO(roberthbailey): The servers that use HTTPS are currently the + // kubelets, and we should be using a standard kubelet client library + // to talk to them rather than a separate http client. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + + client = &http.Client{Transport: transport} scheme = "https://" + } else { + client = &http.Client{Transport: rt} } - resp, err := client.Get(scheme + net.JoinHostPort(s.Addr, strconv.Itoa(s.Port)) + s.Path) + + resp, err := client.Get(scheme + net.JoinHostPort(server.Addr, strconv.Itoa(server.Port)) + server.Path) if err != nil { return probe.Unknown, "", err } @@ -70,14 +96,6 @@ func (s *Server) check(client httpGet) (probe.Result, string, error) { return probe.Success, string(data), nil } -type ServerStatus struct { - Component string `json:"component,omitempty"` - Health string `json:"health,omitempty"` - HealthCode probe.Result `json:"healthCode,omitempty"` - Msg string `json:"msg,omitempty"` - Err string `json:"err,omitempty"` -} - func (v *validator) ServeHTTP(w http.ResponseWriter, r *http.Request) { verb := "get" apiResource := "" @@ -88,21 +106,7 @@ func (v *validator) ServeHTTP(w http.ResponseWriter, r *http.Request) { reply := []ServerStatus{} for name, server := range v.servers() { transport := v.rt - if server.EnableHTTPS { - // TODO(roberthbailey): The servers that use HTTPS are currently the - // kubelets, and we should be using a standard kubelet client library - // to talk to them rather than a separate http client. - transport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } - } - status, msg, err := server.check(&http.Client{Transport: transport}) + status, msg, err := server.DoServerCheck(transport) var errorMsg string if err != nil { errorMsg = err.Error() diff --git a/pkg/apiserver/validator_test.go b/pkg/apiserver/validator_test.go index 6d0c3a2a0e0..5b01a88f883 100644 --- a/pkg/apiserver/validator_test.go +++ b/pkg/apiserver/validator_test.go @@ -65,8 +65,7 @@ func TestValidate(t *testing.T) { StatusCode: test.code, }, } - fake := &http.Client{Transport: fakeRT} - status, data, err := s.check(fake) + status, data, err := s.DoServerCheck(fakeRT) expect := fmt.Sprintf("http://%s:%d/healthz", s.Addr, s.Port) if fakeRT.url != expect { t.Errorf("expected %s, got %s", expect, fakeRT.url) diff --git a/pkg/client/cache/listers.go b/pkg/client/cache/listers.go index f989116fad8..0afd64b8661 100644 --- a/pkg/client/cache/listers.go +++ b/pkg/client/cache/listers.go @@ -39,19 +39,51 @@ type StoreToPodLister struct { Store } -// TODO Get rid of the selector because that is confusing because the user might not realize that there has already been -// some selection at the caching stage. Also, consistency will facilitate code generation. However, the pkg/client -// is inconsistent too. -func (s *StoreToPodLister) List(selector labels.Selector) (pods []api.Pod, err error) { +// Please note that selector is filtering among the pods that have gotten into +// the store; there may have been some filtering that already happened before +// that. +// +// TODO: converge on the interface in pkg/client. +func (s *StoreToPodLister) List(selector labels.Selector) (pods []*api.Pod, err error) { + // TODO: it'd be great to just call + // s.Pods(api.NamespaceAll).List(selector), however then we'd have to + // remake the list.Items as a []*api.Pod. So leave this separate for + // now. for _, m := range s.Store.List() { pod := m.(*api.Pod) if selector.Matches(labels.Set(pod.Labels)) { - pods = append(pods, *pod) + pods = append(pods, pod) } } return pods, nil } +// Pods is taking baby steps to be more like the api in pkg/client +func (s *StoreToPodLister) Pods(namespace string) storePodsNamespacer { + return storePodsNamespacer{s.Store, namespace} +} + +type storePodsNamespacer struct { + store Store + namespace string +} + +// Please note that selector is filtering among the pods that have gotten into +// the store; there may have been some filtering that already happened before +// that. +func (s storePodsNamespacer) List(selector labels.Selector) (pods api.PodList, err error) { + list := api.PodList{} + for _, m := range s.store.List() { + pod := m.(*api.Pod) + if s.namespace == api.NamespaceAll || s.namespace == pod.Namespace { + if selector.Matches(labels.Set(pod.Labels)) { + list.Items = append(list.Items, *pod) + } + } + } + return list, nil +} + // Exists returns true if a pod matching the namespace/name of the given pod exists in the store. func (s *StoreToPodLister) Exists(pod *api.Pod) (bool, error) { _, exists, err := s.Store.Get(pod) @@ -106,7 +138,7 @@ func (s *StoreToServiceLister) List() (services api.ServiceList, err error) { // TODO: Move this back to scheduler as a helper function that takes a Store, // rather than a method of StoreToServiceLister. -func (s *StoreToServiceLister) GetPodServices(pod api.Pod) (services []api.Service, err error) { +func (s *StoreToServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) { var selector labels.Selector var service api.Service @@ -116,6 +148,10 @@ func (s *StoreToServiceLister) GetPodServices(pod api.Pod) (services []api.Servi if service.Namespace != pod.Namespace { continue } + if service.Spec.Selector == nil { + // services with nil selectors match nothing, not everything. + continue + } selector = labels.Set(service.Spec.Selector).AsSelector() if selector.Matches(labels.Set(pod.Labels)) { services = append(services, service) diff --git a/pkg/client/cache/listers_test.go b/pkg/client/cache/listers_test.go index 3f759c3c715..5e57208559a 100644 --- a/pkg/client/cache/listers_test.go +++ b/pkg/client/cache/listers_test.go @@ -90,3 +90,34 @@ func TestStoreToPodLister(t *testing.T) { t.Errorf("Unexpected pod exists") } } + +func TestStoreToServiceLister(t *testing.T) { + store := NewStore(MetaNamespaceKeyFunc) + store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + }, + }) + store.Add(&api.Service{ObjectMeta: api.ObjectMeta{Name: "bar"}}) + ssl := StoreToServiceLister{store} + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "foopod", + Labels: map[string]string{"role": "foo"}, + }, + } + + services, err := ssl.GetPodServices(pod) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + if len(services) != 1 { + t.Fatalf("Expected 1 service, got %v", len(services)) + } + if e, a := "foo", services[0].Name; e != a { + t.Errorf("Expected service %q, got %q", e, a) + } +} diff --git a/pkg/client/cache/store.go b/pkg/client/cache/store.go index b6031d43ca3..40aceec467d 100644 --- a/pkg/client/cache/store.go +++ b/pkg/client/cache/store.go @@ -18,6 +18,8 @@ package cache import ( "fmt" + "strings" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" ) @@ -67,6 +69,9 @@ type ExplicitKey string // keys for API objects which implement meta.Interface. // The key uses the format / unless is empty, then // it's just . +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. func MetaNamespaceKeyFunc(obj interface{}) (string, error) { if key, ok := obj.(ExplicitKey); ok { return string(key), nil @@ -81,6 +86,25 @@ func MetaNamespaceKeyFunc(obj interface{}) (string, error) { return meta.Name(), nil } +// SplitMetaNamespaceKey returns the namespace and name that +// MetaNamespaceKeyFunc encoded into key. +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { + parts := strings.Split(key, "/") + switch len(parts) { + case 1: + // name only, no namespace + return "", parts[0], nil + case 2: + // name and namespace + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected key format: %q", key) +} + // cache responsibilities are limited to: // 1. Computing keys for objects via keyFunc // 2. Invoking methods of a ThreadSafeStorage interface diff --git a/pkg/client/client.go b/pkg/client/client.go index 3903e4324cf..cf99707cb5c 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -43,6 +43,7 @@ type Interface interface { NamespacesInterface PersistentVolumesInterface PersistentVolumeClaimsNamespacer + ComponentStatusesInterface } func (c *Client) ReplicationControllers(namespace string) ReplicationControllerInterface { @@ -92,6 +93,10 @@ func (c *Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimI return newPersistentVolumeClaims(c, namespace) } +func (c *Client) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + // VersionInterface has a method to retrieve the server version. type VersionInterface interface { ServerVersion() (*version.Info, error) @@ -137,6 +142,25 @@ func (c *Client) ServerAPIVersions() (*api.APIVersions, error) { return &v, nil } +type ComponentValidatorInterface interface { + ValidateComponents() (*api.ComponentStatusList, error) +} + +// ValidateComponents retrieves and parses the master's self-monitored cluster state. +// TODO: This should hit the versioned endpoint when that is implemented. +func (c *Client) ValidateComponents() (*api.ComponentStatusList, error) { + body, err := c.Get().AbsPath("/validate").DoRaw() + if err != nil { + return nil, err + } + + statuses := []api.ComponentStatus{} + if err := json.Unmarshal(body, &statuses); err != nil { + return nil, fmt.Errorf("got '%s': %v", string(body), err) + } + return &api.ComponentStatusList{Items: statuses}, nil +} + // IsTimeout tests if this is a timeout error in the underlying transport. // This is unbelievably ugly. // See: http://stackoverflow.com/questions/23494950/specifically-check-for-timeout-error for details diff --git a/pkg/client/componentstatuses.go b/pkg/client/componentstatuses.go new file mode 100644 index 00000000000..133aae05c3f --- /dev/null +++ b/pkg/client/componentstatuses.go @@ -0,0 +1,63 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" + "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" +) + +type ComponentStatusesInterface interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface contains methods to retrieve ComponentStatus +type ComponentStatusInterface interface { + List(label labels.Selector, field fields.Selector) (*api.ComponentStatusList, error) + Get(name string) (*api.ComponentStatus, error) + + // TODO: It'd be nice to have watch support at some point + //Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) +} + +// componentStatuses implements ComponentStatusesInterface +type componentStatuses struct { + client *Client +} + +func newComponentStatuses(c *Client) *componentStatuses { + return &componentStatuses{c} +} + +func (c *componentStatuses) List(label labels.Selector, field fields.Selector) (result *api.ComponentStatusList, err error) { + result = &api.ComponentStatusList{} + err = c.client.Get(). + Resource("componentStatuses"). + LabelsSelectorParam(label). + FieldsSelectorParam(field). + Do(). + Into(result) + + return result, err +} + +func (c *componentStatuses) Get(name string) (result *api.ComponentStatus, err error) { + result = &api.ComponentStatus{} + err = c.client.Get().Resource("componentStatuses").Name(name).Do().Into(result) + return +} diff --git a/pkg/client/endpoints.go b/pkg/client/endpoints.go index a3684468fb8..ce634a3cb22 100644 --- a/pkg/client/endpoints.go +++ b/pkg/client/endpoints.go @@ -35,6 +35,7 @@ type EndpointsInterface interface { Create(endpoints *api.Endpoints) (*api.Endpoints, error) List(selector labels.Selector) (*api.EndpointsList, error) Get(name string) (*api.Endpoints, error) + Delete(name string) error Update(endpoints *api.Endpoints) (*api.Endpoints, error) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) } @@ -76,6 +77,11 @@ func (c *endpoints) Get(name string) (result *api.Endpoints, err error) { return } +// Delete takes the name of the endpoint, and returns an error if one occurs +func (c *endpoints) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("endpoints").Name(name).Do().Error() +} + // Watch returns a watch.Interface that watches the requested endpoints for a service. func (c *endpoints) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { return c.r.Get(). diff --git a/pkg/client/testclient/fake_componentstatuses.go b/pkg/client/testclient/fake_componentstatuses.go new file mode 100644 index 00000000000..b14150f63af --- /dev/null +++ b/pkg/client/testclient/fake_componentstatuses.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" + "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" +) + +// Fake implements ComponentStatusInterface. +type FakeComponentStatuses struct { + Fake *Fake +} + +func (c *FakeComponentStatuses) List(label labels.Selector, field fields.Selector) (result *api.ComponentStatusList, err error) { + obj, err := c.Fake.Invokes(FakeAction{Action: "list-componentstatuses"}, &api.ComponentStatusList{}) + return obj.(*api.ComponentStatusList), err +} + +func (c *FakeComponentStatuses) Get(name string) (*api.ComponentStatus, error) { + obj, err := c.Fake.Invokes(FakeAction{Action: "get-componentstatus", Value: name}, &api.ComponentStatus{}) + // c.Actions = append(c.Actions, FakeAction{Action: "get-componentstatuses", Value: nil}) + // testStatus := &api.ComponentStatus{ + // Name: "test", + // Health: "ok", + // HealthCode: int(probe.Success), + // Message: "ok", + // Error: "", + // } + // return &api.ComponentStatusList{Items: []api.ComponentStatus{*testStatus}}, nil + return obj.(*api.ComponentStatus), err +} diff --git a/pkg/client/testclient/fake_endpoints.go b/pkg/client/testclient/fake_endpoints.go index 617b9f757bd..891dc9664ad 100644 --- a/pkg/client/testclient/fake_endpoints.go +++ b/pkg/client/testclient/fake_endpoints.go @@ -45,6 +45,11 @@ func (c *FakeEndpoints) Get(name string) (*api.Endpoints, error) { return obj.(*api.Endpoints), err } +func (c *FakeEndpoints) Delete(name string) error { + _, err := c.Fake.Invokes(FakeAction{Action: "delete-endpoints", Value: name}, &api.Endpoints{}) + return err +} + func (c *FakeEndpoints) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-endpoints", Value: resourceVersion}) return c.Fake.Watch, c.Fake.Err diff --git a/pkg/client/testclient/testclient.go b/pkg/client/testclient/testclient.go index 02799596629..41ec014b153 100644 --- a/pkg/client/testclient/testclient.go +++ b/pkg/client/testclient/testclient.go @@ -125,3 +125,7 @@ func (c *Fake) ServerAPIVersions() (*api.APIVersions, error) { c.Actions = append(c.Actions, FakeAction{Action: "get-apiversions", Value: nil}) return &api.APIVersions{Versions: []string{"v1beta1", "v1beta2"}}, nil } + +func (c *Fake) ComponentStatuses() client.ComponentStatusInterface { + return &FakeComponentStatuses{Fake: c} +} diff --git a/pkg/cloudprovider/aws/aws_test.go b/pkg/cloudprovider/aws/aws_test.go index 79c89ea5cf0..382f1dab85d 100644 --- a/pkg/cloudprovider/aws/aws_test.go +++ b/pkg/cloudprovider/aws/aws_test.go @@ -164,12 +164,25 @@ type FakeEC2 struct { instances []ec2.Instance } +func contains(haystack []string, needle string) bool { + for _, s := range haystack { + if needle == s { + return true + } + } + return false +} + func (self *FakeEC2) Instances(instanceIds []string, filter *ec2InstanceFilter) (resp *ec2.InstancesResp, err error) { matches := []ec2.Instance{} for _, instance := range self.instances { - if filter == nil || filter.Matches(instance) { - matches = append(matches, instance) + if filter != nil && !filter.Matches(instance) { + continue } + if instanceIds != nil && !contains(instanceIds, instance.InstanceId) { + continue + } + matches = append(matches, instance) } return &ec2.InstancesResp{"", []ec2.Reservation{ diff --git a/pkg/cloudprovider/gce/gce.go b/pkg/cloudprovider/gce/gce.go index f5f577bce38..fcbd35e7ac9 100644 --- a/pkg/cloudprovider/gce/gce.go +++ b/pkg/cloudprovider/gce/gce.go @@ -22,7 +22,6 @@ import ( "io/ioutil" "net" "net/http" - "os/exec" "path" "strconv" "strings" @@ -446,32 +445,8 @@ func (gce *GCECloud) ExternalID(instance string) (string, error) { return strconv.FormatUint(inst.Id, 10), nil } -// fqdnSuffix is hacky function to compute the delta between hostame and hostname -f. -func fqdnSuffix() (string, error) { - fullHostname, err := exec.Command("hostname", "-f").Output() - if err != nil { - return "", err - } - hostname, err := exec.Command("hostname").Output() - if err != nil { - return "", err - } - return strings.TrimSpace(string(fullHostname)[len(string(hostname)):]), nil -} - // List is an implementation of Instances.List. func (gce *GCECloud) List(filter string) ([]string, error) { - // GCE gives names without their fqdn suffix, so get that here for appending. - // This is needed because the kubelet looks for its jobs in /registry/hosts//pods - // We should really just replace this convention, with a negotiated naming protocol for kubelet's - // to register with the master. - suffix, err := fqdnSuffix() - if err != nil { - return []string{}, err - } - if len(suffix) > 0 { - suffix = "." + suffix - } listCall := gce.service.Instances.List(gce.projectID, gce.zone) if len(filter) > 0 { listCall = listCall.Filter("name eq " + filter) @@ -482,7 +457,7 @@ func (gce *GCECloud) List(filter string) ([]string, error) { } var instances []string for _, instance := range res.Items { - instances = append(instances, instance.Name+suffix) + instances = append(instances, instance.Name) } return instances, nil } diff --git a/pkg/cloudprovider/openstack/openstack.go b/pkg/cloudprovider/openstack/openstack.go index 9ab41c4dd4d..97c9d2ce22f 100644 --- a/pkg/cloudprovider/openstack/openstack.go +++ b/pkg/cloudprovider/openstack/openstack.go @@ -76,7 +76,6 @@ type LoadBalancerOpts struct { // OpenStack is an implementation of cloud provider Interface for OpenStack. type OpenStack struct { provider *gophercloud.ProviderClient - authOpts gophercloud.AuthOptions region string lbOpts LoadBalancerOpts } @@ -117,11 +116,7 @@ func (cfg Config) toAuthOptions() gophercloud.AuthOptions { TenantID: cfg.Global.TenantId, TenantName: cfg.Global.TenantName, - // Persistent service, so we need to be able to renew - // tokens. - // (gophercloud doesn't appear to actually reauth yet, - // hence the explicit openstack.Authenticate() calls - // below) + // Persistent service, so we need to be able to renew tokens. AllowReauth: true, } } @@ -138,15 +133,13 @@ func readConfig(config io.Reader) (Config, error) { } func newOpenStack(cfg Config) (*OpenStack, error) { - authOpts := cfg.toAuthOptions() - provider, err := openstack.AuthenticatedClient(authOpts) + provider, err := openstack.AuthenticatedClient(cfg.toAuthOptions()) if err != nil { return nil, err } os := OpenStack{ provider: provider, - authOpts: authOpts, region: cfg.Global.Region, lbOpts: cfg.LoadBalancer, } @@ -162,11 +155,6 @@ type Instances struct { func (os *OpenStack) Instances() (cloudprovider.Instances, bool) { glog.V(4).Info("openstack.Instances() called") - if err := openstack.Authenticate(os.provider, os.authOpts); err != nil { - glog.Warningf("Failed to reauthenticate: %v", err) - return nil, false - } - compute, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, }) @@ -413,11 +401,6 @@ type LoadBalancer struct { func (os *OpenStack) TCPLoadBalancer() (cloudprovider.TCPLoadBalancer, bool) { glog.V(4).Info("openstack.TCPLoadBalancer() called") - if err := openstack.Authenticate(os.provider, os.authOpts); err != nil { - glog.Warningf("Failed to reauthenticate: %v", err) - return nil, false - } - // TODO: Search for and support Rackspace loadbalancer API, and others. network, err := openstack.NewNetworkV2(os.provider, gophercloud.EndpointOpts{ Region: os.region, diff --git a/pkg/controller/replication_controller.go b/pkg/controller/replication_controller.go index bdf77eedacf..0e43b909911 100644 --- a/pkg/controller/replication_controller.go +++ b/pkg/controller/replication_controller.go @@ -18,6 +18,7 @@ package controller import ( "fmt" + "sort" "sync" "time" @@ -163,7 +164,7 @@ func (rm *ReplicationManager) watchControllers(resourceVersion *string) { if !ok { if status, ok := event.Object.(*api.Status); ok { if status.Status == api.StatusFailure { - glog.Errorf("failed to watch: %v", status) + glog.Errorf("Failed to watch: %v", status) // Clear resource version here, as above, this won't hurt consistency, but we // should consider introspecting more carefully here. (or make the apiserver smarter) // "why not both?" @@ -178,7 +179,7 @@ func (rm *ReplicationManager) watchControllers(resourceVersion *string) { *resourceVersion = rc.ResourceVersion // Sync even if this is a deletion event, to ensure that we leave // it in the desired state. - glog.V(4).Infof("About to sync from watch: %v", rc.Name) + glog.V(4).Infof("About to sync from watch: %q", rc.Name) if err := rm.syncHandler(*rc); err != nil { util.HandleError(fmt.Errorf("unexpected sync error: %v", err)) } @@ -186,32 +187,54 @@ func (rm *ReplicationManager) watchControllers(resourceVersion *string) { } } -// Helper function. Also used in pkg/registry/controller, for now. -func FilterActivePods(pods []api.Pod) []api.Pod { - var result []api.Pod +// filterActivePods returns pods that have not terminated. +func filterActivePods(pods []api.Pod) []*api.Pod { + var result []*api.Pod for _, value := range pods { if api.PodSucceeded != value.Status.Phase && api.PodFailed != value.Status.Phase { - result = append(result, value) + result = append(result, &value) } } return result } +type activePods []*api.Pod + +func (s activePods) Len() int { return len(s) } +func (s activePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s activePods) Less(i, j int) bool { + // Unassigned < assigned + if s[i].Spec.Host == "" && s[j].Spec.Host != "" { + return true + } + // PodPending < PodUnknown < PodRunning + m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2} + if m[s[i].Status.Phase] != m[s[j].Status.Phase] { + return m[s[i].Status.Phase] < m[s[j].Status.Phase] + } + // Not ready < ready + if !api.IsPodReady(s[i]) && api.IsPodReady(s[j]) { + return true + } + return false +} + func (rm *ReplicationManager) syncReplicationController(controller api.ReplicationController) error { s := labels.Set(controller.Spec.Selector).AsSelector() podList, err := rm.kubeClient.Pods(controller.Namespace).List(s) if err != nil { return err } - filteredList := FilterActivePods(podList.Items) - activePods := len(filteredList) - diff := activePods - controller.Spec.Replicas + filteredList := filterActivePods(podList.Items) + numActivePods := len(filteredList) + diff := numActivePods - controller.Spec.Replicas if diff < 0 { diff *= -1 wait := sync.WaitGroup{} wait.Add(diff) - glog.V(2).Infof("Too few \"%s\" replicas, creating %d\n", controller.Name, diff) + glog.V(2).Infof("Too few %q replicas, creating %d", controller.Name, diff) for i := 0; i < diff; i++ { go func() { defer wait.Done() @@ -220,7 +243,12 @@ func (rm *ReplicationManager) syncReplicationController(controller api.Replicati } wait.Wait() } else if diff > 0 { - glog.V(2).Infof("Too many \"%s\" replicas, deleting %d\n", controller.Name, diff) + glog.V(2).Infof("Too many %q replicas, deleting %d", controller.Name, diff) + // Sort the pods in the order such that not-ready < ready, unscheduled + // < scheduled, and pending < running. This ensures that we delete pods + // in the earlier stages whenever possible. + sort.Sort(activePods(filteredList)) + wait := sync.WaitGroup{} wait.Add(diff) for i := 0; i < diff; i++ { @@ -231,8 +259,8 @@ func (rm *ReplicationManager) syncReplicationController(controller api.Replicati } wait.Wait() } - if controller.Status.Replicas != activePods { - controller.Status.Replicas = activePods + if controller.Status.Replicas != numActivePods { + controller.Status.Replicas = numActivePods _, err = rm.kubeClient.ReplicationControllers(controller.Namespace).Update(&controller) if err != nil { return err diff --git a/pkg/controller/replication_controller_test.go b/pkg/controller/replication_controller_test.go index 8adb7094ba5..aac5fc7bcb3 100644 --- a/pkg/controller/replication_controller_test.go +++ b/pkg/controller/replication_controller_test.go @@ -18,8 +18,11 @@ package controller import ( "fmt" + "math/rand" "net/http" "net/http/httptest" + "reflect" + "sort" "sync" "testing" "time" @@ -375,3 +378,52 @@ func TestWatchControllers(t *testing.T) { t.Errorf("Expected 1 call but got 0") } } + +func TestSortingActivePods(t *testing.T) { + numPods := 5 + podList := newPodList(numPods) + pods := make([]*api.Pod, len(podList.Items)) + for i := range podList.Items { + pods[i] = &podList.Items[i] + } + // pods[0] is not scheduled yet. + pods[0].Spec.Host = "" + pods[0].Status.Phase = api.PodPending + // pods[1] is scheduled but pending. + pods[1].Spec.Host = "bar" + pods[1].Status.Phase = api.PodPending + // pods[2] is unknown. + pods[2].Spec.Host = "foo" + pods[2].Status.Phase = api.PodUnknown + // pods[3] is running but not ready. + pods[3].Spec.Host = "foo" + pods[3].Status.Phase = api.PodRunning + // pods[4] is running and ready. + pods[4].Spec.Host = "foo" + pods[4].Status.Phase = api.PodRunning + pods[4].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}} + + getOrder := func(pods []*api.Pod) []string { + names := make([]string, len(pods)) + for i := range pods { + names[i] = pods[i].Name + } + return names + } + + expected := getOrder(pods) + + for i := 0; i < 20; i++ { + idx := rand.Perm(numPods) + randomizedPods := make([]*api.Pod, numPods) + for j := 0; j < numPods; j++ { + randomizedPods[j] = pods[idx[j]] + } + sort.Sort(activePods(randomizedPods)) + actual := getOrder(randomizedPods) + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("expected %v, got %v", expected, actual) + } + } +} diff --git a/pkg/conversion/encode.go b/pkg/conversion/encode.go index fa753e0d3a2..e83e26cf878 100644 --- a/pkg/conversion/encode.go +++ b/pkg/conversion/encode.go @@ -53,7 +53,7 @@ func (s *Scheme) EncodeToVersion(obj interface{}, destVersion string) (data []by obj = maybeCopy(obj) v, _ := EnforcePtr(obj) // maybeCopy guarantees a pointer if _, registered := s.typeToVersion[v.Type()]; !registered { - return nil, fmt.Errorf("type %v is not registered and it will be impossible to Decode it, therefore Encode will refuse to encode it.", v.Type()) + return nil, fmt.Errorf("type %v is not registered for %q and it will be impossible to Decode it, therefore Encode will refuse to encode it.", v.Type(), destVersion) } objVersion, objKind, err := s.ObjectVersionAndKind(obj) diff --git a/pkg/kubectl/cmd/cmd_test.go b/pkg/kubectl/cmd/cmd_test.go index 3a767b0d684..b4f4dadf3e1 100644 --- a/pkg/kubectl/cmd/cmd_test.go +++ b/pkg/kubectl/cmd/cmd_test.go @@ -49,23 +49,32 @@ type externalType struct { Name string `json:"name"` } -func (*internalType) IsAnAPIObject() {} -func (*externalType) IsAnAPIObject() {} +type ExternalType2 struct { + Kind string `json:"kind"` + APIVersion string `json:"apiVersion"` + + Name string `json:"name"` +} + +func (*internalType) IsAnAPIObject() {} +func (*externalType) IsAnAPIObject() {} +func (*ExternalType2) IsAnAPIObject() {} func newExternalScheme() (*runtime.Scheme, meta.RESTMapper, runtime.Codec) { scheme := runtime.NewScheme() scheme.AddKnownTypeWithName("", "Type", &internalType{}) scheme.AddKnownTypeWithName("unlikelyversion", "Type", &externalType{}) + scheme.AddKnownTypeWithName("v1beta1", "Type", &ExternalType2{}) codec := runtime.CodecFor(scheme, "unlikelyversion") - mapper := meta.NewDefaultRESTMapper([]string{"unlikelyversion"}, func(version string) (*meta.VersionInterfaces, bool) { + mapper := meta.NewDefaultRESTMapper([]string{"unlikelyversion", "v1beta1"}, func(version string) (*meta.VersionInterfaces, bool) { return &meta.VersionInterfaces{ - Codec: codec, + Codec: runtime.CodecFor(scheme, version), ObjectConvertor: scheme, MetadataAccessor: meta.NewAccessor(), - }, (version == "unlikelyversion") + }, (version == "v1beta1" || version == "unlikelyversion") }) - for _, version := range []string{"unlikelyversion"} { + for _, version := range []string{"unlikelyversion", "v1beta1"} { for kind := range scheme.KnownTypes(version) { mixedCase := false scope := meta.RESTScopeNamespace @@ -142,6 +151,20 @@ func NewTestFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { }, t, codec } +func NewMixedFactory(apiClient resource.RESTClient) (*cmdutil.Factory, *testFactory, runtime.Codec) { + f, t, c := NewTestFactory() + f.Object = func() (meta.RESTMapper, runtime.ObjectTyper) { + return meta.MultiRESTMapper{t.Mapper, latest.RESTMapper}, runtime.MultiObjectTyper{t.Typer, api.Scheme} + } + f.RESTClient = func(m *meta.RESTMapping) (resource.RESTClient, error) { + if m.ObjectConvertor == api.Scheme { + return apiClient, t.Err + } + return t.Client, t.Err + } + return f, t, c +} + func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { t := &testFactory{ Validator: validation.NullSchema{}, @@ -150,6 +173,13 @@ func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) { Object: func() (meta.RESTMapper, runtime.ObjectTyper) { return latest.RESTMapper, api.Scheme }, + Client: func() (*client.Client, error) { + // Swap out the HTTP client out of the client with the fake's version. + fakeClient := t.Client.(*client.FakeRESTClient) + c := client.NewOrDie(t.ClientConfig) + c.Client = fakeClient.Client + return c, t.Err + }, RESTClient: func(*meta.RESTMapping) (resource.RESTClient, error) { return t.Client, t.Err }, diff --git a/pkg/kubectl/cmd/config/config.go b/pkg/kubectl/cmd/config/config.go index b7576ad19ca..6d717b73f2c 100644 --- a/pkg/kubectl/cmd/config/config.go +++ b/pkg/kubectl/cmd/config/config.go @@ -186,7 +186,7 @@ func (o *PathOptions) GetExplicitFile() string { // ModifyConfig takes a Config object, iterates through Clusters, AuthInfos, and Contexts, uses the LocationOfOrigin if specified or // uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. // Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values -// (no nil strings), we're forced have separate handling for them. In all the currently known cases, newConfig should have, at most, one difference, +// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, // that means that this code will only write into a single file. func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) error { startingConfig, err := configAccess.GetStartingConfig() @@ -194,124 +194,123 @@ func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) erro return err } - // at this point, config and startingConfig should have, at most, one difference. We need to chase the difference until we find it - // then we'll build a partial config object to call write upon. Special case the test for current context and preferences since those - // always write to the default file. - switch { - case reflect.DeepEqual(*startingConfig, newConfig): + // We need to find all differences, locate their original files, read a partial config to modify only that stanza and write out the file. + // Special case the test for current context and preferences since those always write to the default file. + if reflect.DeepEqual(*startingConfig, newConfig) { // nothing to do + return nil + } - case startingConfig.CurrentContext != newConfig.CurrentContext: + if startingConfig.CurrentContext != newConfig.CurrentContext { if err := writeCurrentContext(configAccess, newConfig.CurrentContext); err != nil { return err } + } - case !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences): + if !reflect.DeepEqual(startingConfig.Preferences, newConfig.Preferences) { if err := writePreferences(configAccess, newConfig.Preferences); err != nil { return err } + } - default: - // something is different. Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions - for key, cluster := range newConfig.Clusters { - startingCluster, exists := startingConfig.Clusters[key] - if !reflect.DeepEqual(cluster, startingCluster) || !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } + // Search every cluster, authInfo, and context. First from new to old for differences, then from old to new for deletions + for key, cluster := range newConfig.Clusters { + startingCluster, exists := startingConfig.Clusters[key] + if !reflect.DeepEqual(cluster, startingCluster) || !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } - configToWrite := getConfigFromFileOrDie(destinationFile) - configToWrite.Clusters[key] = cluster + configToWrite := getConfigFromFileOrDie(destinationFile) + configToWrite.Clusters[key] = cluster - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } + if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { + return err } } + } - for key, context := range newConfig.Contexts { - startingContext, exists := startingConfig.Contexts[key] - if !reflect.DeepEqual(context, startingContext) || !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } + for key, context := range newConfig.Contexts { + startingContext, exists := startingConfig.Contexts[key] + if !reflect.DeepEqual(context, startingContext) || !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } - configToWrite := getConfigFromFileOrDie(destinationFile) - configToWrite.Contexts[key] = context + configToWrite := getConfigFromFileOrDie(destinationFile) + configToWrite.Contexts[key] = context - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } + if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { + return err } } + } - for key, authInfo := range newConfig.AuthInfos { - startingAuthInfo, exists := startingConfig.AuthInfos[key] - if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } + for key, authInfo := range newConfig.AuthInfos { + startingAuthInfo, exists := startingConfig.AuthInfos[key] + if !reflect.DeepEqual(authInfo, startingAuthInfo) || !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } - configToWrite := getConfigFromFileOrDie(destinationFile) - configToWrite.AuthInfos[key] = authInfo + configToWrite := getConfigFromFileOrDie(destinationFile) + configToWrite.AuthInfos[key] = authInfo - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } + if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { + return err } } + } - for key, cluster := range startingConfig.Clusters { - if _, exists := newConfig.Clusters[key]; !exists { - destinationFile := cluster.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } + for key, cluster := range startingConfig.Clusters { + if _, exists := newConfig.Clusters[key]; !exists { + destinationFile := cluster.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } - configToWrite := getConfigFromFileOrDie(destinationFile) - delete(configToWrite.Clusters, key) + configToWrite := getConfigFromFileOrDie(destinationFile) + delete(configToWrite.Clusters, key) - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } + if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { + return err } } + } - for key, context := range startingConfig.Contexts { - if _, exists := newConfig.Contexts[key]; !exists { - destinationFile := context.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } + for key, context := range startingConfig.Contexts { + if _, exists := newConfig.Contexts[key]; !exists { + destinationFile := context.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } - configToWrite := getConfigFromFileOrDie(destinationFile) - delete(configToWrite.Contexts, key) + configToWrite := getConfigFromFileOrDie(destinationFile) + delete(configToWrite.Contexts, key) - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } + if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { + return err } } + } - for key, authInfo := range startingConfig.AuthInfos { - if _, exists := newConfig.AuthInfos[key]; !exists { - destinationFile := authInfo.LocationOfOrigin - if len(destinationFile) == 0 { - destinationFile = configAccess.GetDefaultFilename() - } + for key, authInfo := range startingConfig.AuthInfos { + if _, exists := newConfig.AuthInfos[key]; !exists { + destinationFile := authInfo.LocationOfOrigin + if len(destinationFile) == 0 { + destinationFile = configAccess.GetDefaultFilename() + } - configToWrite := getConfigFromFileOrDie(destinationFile) - delete(configToWrite.AuthInfos, key) + configToWrite := getConfigFromFileOrDie(destinationFile) + delete(configToWrite.AuthInfos, key) - if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { - return err - } + if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { + return err } } - } return nil diff --git a/pkg/kubectl/cmd/delete.go b/pkg/kubectl/cmd/delete.go index f5a2ddf3601..545f6c8f584 100644 --- a/pkg/kubectl/cmd/delete.go +++ b/pkg/kubectl/cmd/delete.go @@ -87,7 +87,7 @@ func RunDelete(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str FilenameParam(filenames...). SelectorParam(cmdutil.GetFlagString(cmd, "selector")). SelectAllParam(cmdutil.GetFlagBool(cmd, "all")). - ResourceTypeOrNameArgs(false, args...). + ResourceTypeOrNameArgs(false, args...).RequireObject(false). Flatten(). Do() err = r.Err() diff --git a/pkg/kubectl/cmd/delete_test.go b/pkg/kubectl/cmd/delete_test.go index 44a6b45aa0d..98fa13b904a 100644 --- a/pkg/kubectl/cmd/delete_test.go +++ b/pkg/kubectl/cmd/delete_test.go @@ -27,6 +27,66 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/client" ) +func TestDeleteObjectByTuple(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &client.FakeRESTClient{ + Codec: codec, + Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master-controller" && m == "DELETE": + return &http.Response{StatusCode: 200, Body: objBody(codec, &rc.Items[0])}, nil + default: + // Ensures no GET is performed when deleting by name + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Run(cmd, []string{"replicationcontrollers/redis-master-controller"}) + + if buf.String() != "replicationControllers/redis-master-controller\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + +func TestDeleteNamedObject(t *testing.T) { + _, _, rc := testData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &client.FakeRESTClient{ + Codec: codec, + Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == "/namespaces/test/replicationcontrollers/redis-master-controller" && m == "DELETE": + return &http.Response{StatusCode: 200, Body: objBody(codec, &rc.Items[0])}, nil + default: + // Ensures no GET is performed when deleting by name + t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) + return nil, nil + } + }), + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdDelete(f, buf) + cmd.Flags().Set("namespace", "test") + cmd.Run(cmd, []string{"replicationcontrollers", "redis-master-controller"}) + + if buf.String() != "replicationControllers/redis-master-controller\n" { + t.Errorf("unexpected output: %s", buf.String()) + } +} + func TestDeleteObject(t *testing.T) { _, _, rc := testData() diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index fcb2f24714e..0b19931332c 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -22,6 +22,7 @@ import ( "github.com/spf13/cobra" + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource" ) @@ -38,6 +39,7 @@ given resource.`, err := RunDescribe(f, out, cmd, args) cmdutil.CheckErr(err) }, + ValidArgs: kubectl.DescribableResources(), } return cmd } diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go index 72519966eb4..4438c66dc1d 100644 --- a/pkg/kubectl/cmd/get.go +++ b/pkg/kubectl/cmd/get.go @@ -20,8 +20,6 @@ import ( "fmt" "io" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource" @@ -34,7 +32,7 @@ const ( get_long = `Display one or many resources. Possible resources include pods (po), replication controllers (rc), services -(svc), minions (mi), or events (ev). +(svc), minions (mi), events (ev), or component statuses (cs). By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s).` @@ -162,29 +160,21 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string } defaultVersion := clientConfig.Version - // the outermost object will be converted to the output-version - version := cmdutil.OutputVersion(cmd, defaultVersion) - + singular := false r := b.Flatten().Do() - obj, err := r.Object() + infos, err := r.IntoSingular(&singular).Infos() if err != nil { return err } - // try conversion to all the possible versions - // TODO: simplify by adding a ResourceBuilder mode - versions := []string{version, latest.Version} - infos, _ := r.Infos() - for _, info := range infos { - versions = append(versions, info.Mapping.APIVersion) + // the outermost object will be converted to the output-version, but inner + // objects can use their mappings + version := cmdutil.OutputVersion(cmd, defaultVersion) + obj, err := resource.AsVersionedObject(infos, !singular, version) + if err != nil { + return err } - // TODO: add a new ResourceBuilder mode for Object() that attempts to ensure the objects - // are in the appropriate version if one exists (and if not, use the best effort). - // TODO: ensure api-version is set with the default preferred api version by the client - // builder on initialization - printer := kubectl.NewVersionedPrinter(printer, api.Scheme, versions...) - return printer.PrintObj(obj, out) } diff --git a/pkg/kubectl/cmd/get_test.go b/pkg/kubectl/cmd/get_test.go index a432872bd8c..cba2498ca06 100644 --- a/pkg/kubectl/cmd/get_test.go +++ b/pkg/kubectl/cmd/get_test.go @@ -18,6 +18,7 @@ package cmd import ( "bytes" + encjson "encoding/json" "fmt" "io" "io/ioutil" @@ -87,6 +88,33 @@ func testData() (*api.PodList, *api.ServiceList, *api.ReplicationControllerList) return pods, svc, rc } +func testComponentStatusData() *api.ComponentStatusList { + good := &api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: api.ConditionTrue, Message: "ok", Error: "nil"}, + }, + } + good.Name = "servergood" + + bad := &api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: api.ConditionFalse, Message: "", Error: "bad status: 500"}, + }, + } + bad.Name = "serverbad" + + unknown := &api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: api.ConditionUnknown, Message: "", Error: "fizzbuzz error"}, + }, + } + unknown.Name = "serverunknown" + + return &api.ComponentStatusList{ + Items: []api.ComponentStatus{*good, *bad, *unknown}, + } +} + // Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. func TestGetUnknownSchemaObject(t *testing.T) { f, tf, codec := NewTestFactory() @@ -113,6 +141,84 @@ func TestGetUnknownSchemaObject(t *testing.T) { } } +// Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. +// Because api.List is part of the Kube API, resource.Builder has to perform a conversion on +// api.Scheme, which may not have access to all objects, and not all objects are at the same +// internal versioning scheme. This test verifies that two isolated schemes (Test, and api.Scheme) +// can be conjoined into a single output object. +func TestGetUnknownSchemaObjectListGeneric(t *testing.T) { + testCases := map[string]struct { + output string + list string + obj1 string + obj2 string + }{ + "handles specific version": { + output: "v1beta3", + list: "v1beta3", + obj1: "unlikelyversion", + obj2: "v1beta3", + }, + "handles second specific version": { + output: "unlikelyversion", + list: "v1beta3", + obj1: "unlikelyversion", // doesn't have v1beta3 + obj2: "v1beta1", // version of the API response + }, + "handles common version": { + output: "v1beta1", + list: "v1beta1", + obj1: "unlikelyversion", // because test scheme defaults to unlikelyversion + obj2: "v1beta1", + }, + } + for k, test := range testCases { + apiCodec := runtime.CodecFor(api.Scheme, "v1beta1") + regularClient := &client.FakeRESTClient{ + Codec: apiCodec, + Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 200, Body: objBody(apiCodec, &api.ReplicationController{ObjectMeta: api.ObjectMeta{Name: "foo"}})}, nil + }), + } + + f, tf, codec := NewMixedFactory(regularClient) + tf.Printer = &testPrinter{} + tf.Client = &client.FakeRESTClient{ + Codec: codec, + Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 200, Body: objBody(codec, &internalType{Name: "foo"})}, nil + }), + } + tf.Namespace = "test" + tf.ClientConfig = &client.Config{Version: latest.Version} + buf := bytes.NewBuffer([]byte{}) + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Flags().Set("output", "json") + cmd.Flags().Set("output-version", test.output) + err := RunGet(f, buf, cmd, []string{"type/foo", "replicationcontrollers/foo"}) + if err != nil { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + out := make(map[string]interface{}) + if err := encjson.Unmarshal(buf.Bytes(), &out); err != nil { + t.Errorf("%s: unexpected error: %v\n%s", k, err, buf.String()) + continue + } + if out["apiVersion"] != test.list { + t.Errorf("%s: unexpected list: %#v", k, out) + } + arr := out["items"].([]interface{}) + if arr[0].(map[string]interface{})["apiVersion"] != test.obj1 { + t.Errorf("%s: unexpected list: %#v", k, out) + } + if arr[1].(map[string]interface{})["apiVersion"] != test.obj2 { + t.Errorf("%s: unexpected list: %#v", k, out) + } + } +} + // Verifies that schemas that are not in the master tree of Kubernetes can be retrieved via Get. func TestGetSchemaObject(t *testing.T) { f, tf, _ := NewTestFactory() @@ -188,6 +294,32 @@ func TestGetListObjects(t *testing.T) { } } +func TestGetListComponentStatus(t *testing.T) { + statuses := testComponentStatusData() + + f, tf, codec := NewAPIFactory() + tf.Printer = &testPrinter{} + tf.Client = &client.FakeRESTClient{ + Codec: codec, + Resp: &http.Response{StatusCode: 200, Body: objBody(codec, statuses)}, + } + tf.Namespace = "test" + buf := bytes.NewBuffer([]byte{}) + + cmd := NewCmdGet(f, buf) + cmd.SetOutput(buf) + cmd.Run(cmd, []string{"componentstatuses"}) + + expected := []runtime.Object{statuses} + actual := tf.Printer.(*testPrinter).Objects + if !reflect.DeepEqual(expected, actual) { + t.Errorf("unexpected object: %#v %#v", expected, actual) + } + if len(buf.String()) == 0 { + t.Errorf("unexpected empty output") + } +} + func TestGetMultipleTypeObjects(t *testing.T) { pods, svc, _ := testData() diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index 0b0293e506a..250339bf18f 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -134,7 +134,15 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg if newRc.Spec.Replicas == 0 { newRc.Spec.Replicas = oldRc.Spec.Replicas } - err = updater.Update(out, oldRc, newRc, period, interval, timeout) + err = updater.Update(&kubectl.RollingUpdaterConfig{ + Out: out, + OldRc: oldRc, + NewRc: newRc, + UpdatePeriod: period, + Interval: interval, + Timeout: timeout, + CleanupPolicy: kubectl.DeleteRollingUpdateCleanupPolicy, + }) if err != nil { return err } diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index b613b7776ab..14cb35bdc9b 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -58,26 +58,38 @@ func (e ErrNoDescriber) Error() string { return fmt.Sprintf("no describer has been defined for %v", e.Types) } +func describerMap(c *client.Client) map[string]Describer { + m := map[string]Describer{ + "Pod": &PodDescriber{c}, + "ReplicationController": &ReplicationControllerDescriber{c}, + "Service": &ServiceDescriber{c}, + "Minion": &NodeDescriber{c}, + "Node": &NodeDescriber{c}, + "LimitRange": &LimitRangeDescriber{c}, + "ResourceQuota": &ResourceQuotaDescriber{c}, + "PersistentVolume": &PersistentVolumeDescriber{c}, + "PersistentVolumeClaim": &PersistentVolumeClaimDescriber{c}, + } + return m +} + +// List of all resource types we can describe +func DescribableResources() []string { + keys := make([]string, 0) + + for k := range describerMap(nil) { + resource := strings.ToLower(k) + keys = append(keys, resource) + } + return keys +} + // Describer returns the default describe functions for each of the standard // Kubernetes types. func DescriberFor(kind string, c *client.Client) (Describer, bool) { - switch kind { - case "Pod": - return &PodDescriber{c}, true - case "ReplicationController": - return &ReplicationControllerDescriber{c}, true - case "Service": - return &ServiceDescriber{c}, true - case "PersistentVolume": - return &PersistentVolumeDescriber{c}, true - case "PersistentVolumeClaim": - return &PersistentVolumeClaimDescriber{c}, true - case "Minion", "Node": - return &NodeDescriber{c}, true - case "LimitRange": - return &LimitRangeDescriber{c}, true - case "ResourceQuota": - return &ResourceQuotaDescriber{c}, true + f, ok := describerMap(c)[kind] + if ok { + return f, true } return nil, false } @@ -225,7 +237,7 @@ func (d *PodDescriber) Describe(namespace, name string) (string, error) { if err2 == nil && len(events.Items) > 0 { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Pod '%v': error '%v', but found events.\n", name, err) - describeEvents(events, out) + DescribeEvents(events, out) return nil }) } @@ -267,7 +279,7 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even } } if events != nil { - describeEvents(events, out) + DescribeEvents(events, out) } return nil }) @@ -398,7 +410,7 @@ func describeReplicationController(controller *api.ReplicationController, events fmt.Fprintf(out, "Replicas:\t%d current / %d desired\n", controller.Status.Replicas, controller.Spec.Replicas) fmt.Fprintf(out, "Pods Status:\t%d Running / %d Waiting / %d Succeeded / %d Failed\n", running, waiting, succeeded, failed) if events != nil { - describeEvents(events, out) + DescribeEvents(events, out) } return nil }) @@ -448,7 +460,7 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api fmt.Fprintf(out, "Endpoints:\t%s\n", formatEndpoints(endpoints)) fmt.Fprintf(out, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) if events != nil { - describeEvents(events, out) + DescribeEvents(events, out) } return nil }) @@ -466,12 +478,13 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) { return "", err } - var pods []api.Pod + var pods []*api.Pod allPods, err := d.Pods(namespace).List(labels.Everything()) if err != nil { return "", err } - for _, pod := range allPods.Items { + for i := range allPods.Items { + pod := &allPods.Items[i] if pod.Spec.Host != name { continue } @@ -490,7 +503,7 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) { return describeNode(node, pods, events) } -func describeNode(node *api.Node, pods []api.Pod, events *api.EventList) (string, error) { +func describeNode(node *api.Node, pods []*api.Pod, events *api.EventList) (string, error) { return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", node.Name) fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(node.Labels)) @@ -537,13 +550,13 @@ func describeNode(node *api.Node, pods []api.Pod, events *api.EventList) (string fmt.Fprintf(out, " %s\n", pod.Name) } if events != nil { - describeEvents(events, out) + DescribeEvents(events, out) } return nil }) } -func describeEvents(el *api.EventList, w io.Writer) { +func DescribeEvents(el *api.EventList, w io.Writer) { if len(el.Items) == 0 { fmt.Fprint(w, "No events.") return diff --git a/pkg/kubectl/kubectl.go b/pkg/kubectl/kubectl.go index 51c24ea95a0..074678925cb 100644 --- a/pkg/kubectl/kubectl.go +++ b/pkg/kubectl/kubectl.go @@ -96,17 +96,19 @@ func (e ShortcutExpander) VersionAndKindForResource(resource string) (defaultVer // indeed a shortcut. Otherwise, will return resource unmodified. func expandResourceShortcut(resource string) string { shortForms := map[string]string{ - "po": "pods", - "rc": "replicationcontrollers", - // DEPRECATED: will be removed before 1.0 - "se": "services", - "svc": "services", - "mi": "minions", + // Please keep this alphabetized + "cs": "componentstatuses", "ev": "events", "limits": "limitRanges", - "quota": "resourceQuotas", + "mi": "minions", + "po": "pods", "pv": "persistentVolumes", "pvc": "persistentVolumeClaims", + "quota": "resourceQuotas", + "rc": "replicationcontrollers", + // DEPRECATED: will be removed before 1.0 + "se": "services", + "svc": "services", } if expanded, ok := shortForms[resource]; ok { return expanded diff --git a/pkg/kubectl/resource/builder.go b/pkg/kubectl/resource/builder.go index 7e7aceaa23f..43dc3883a7c 100644 --- a/pkg/kubectl/resource/builder.go +++ b/pkg/kubectl/resource/builder.go @@ -58,6 +58,8 @@ type Builder struct { flatten bool latest bool + requireObject bool + singleResourceType bool continueOnError bool } @@ -70,7 +72,8 @@ type resourceTuple struct { // NewBuilder creates a builder that operates on generic objects. func NewBuilder(mapper meta.RESTMapper, typer runtime.ObjectTyper, clientMapper ClientMapper) *Builder { return &Builder{ - mapper: &Mapper{typer, mapper, clientMapper}, + mapper: &Mapper{typer, mapper, clientMapper}, + requireObject: true, } } @@ -230,6 +233,7 @@ func (b *Builder) SelectAllParam(selectAll bool) *Builder { // When two or more arguments are received, they must be a single type and resource name(s). // The allowEmptySelector permits to select all the resources (via Everything func). func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string) *Builder { + args = b.replaceAliases(args) if ok, err := hasCombinedTypeArgs(args); ok { if err != nil { b.errs = append(b.errs, err) @@ -269,6 +273,18 @@ func (b *Builder) ResourceTypeOrNameArgs(allowEmptySelector bool, args ...string return b } +func (b *Builder) replaceAliases(args []string) []string { + replaced := []string{} + for _, arg := range args { + if aliases, ok := b.mapper.AliasesForResource(arg); ok { + arg = strings.Join(aliases, ",") + } + replaced = append(replaced, arg) + } + + return replaced +} + func hasCombinedTypeArgs(args []string) (bool, error) { hasSlash := 0 for _, s := range args { @@ -314,6 +330,12 @@ func (b *Builder) Latest() *Builder { return b } +// RequireObject ensures that resulting infos have an object set. If false, resulting info may not have an object set. +func (b *Builder) RequireObject(require bool) *Builder { + b.requireObject = require + return b +} + // ContinueOnError will attempt to load and visit as many objects as possible, even if some visits // return errors or some objects cannot be loaded. The default behavior is to terminate after // the first error is returned from a VisitorFunc. @@ -524,9 +546,6 @@ func (b *Builder) visitorResult() *Result { visitors := []Visitor{} for _, name := range b.names { info := NewInfo(client, mapping, selectorNamespace, name) - if err := info.Get(); err != nil { - return &Result{singular: isSingular, err: err} - } visitors = append(visitors, info) } return &Result{singular: isSingular, visitor: VisitorList(visitors), sources: visitors} @@ -580,15 +599,24 @@ func (b *Builder) Do() *Result { helpers = append(helpers, RequireNamespace(b.namespace)) } helpers = append(helpers, FilterNamespace) - if b.latest { + if b.requireObject { helpers = append(helpers, RetrieveLazy) } r.visitor = NewDecoratedVisitor(r.visitor, helpers...) return r } +// SplitResourceArgument splits the argument with commas and returns unique +// strings in the original order. func SplitResourceArgument(arg string) []string { + out := []string{} set := util.NewStringSet() - set.Insert(strings.Split(arg, ",")...) - return set.List() + for _, s := range strings.Split(arg, ",") { + if set.Has(s) { + continue + } + set.Insert(s) + out = append(out, s) + } + return out } diff --git a/pkg/kubectl/resource/builder_test.go b/pkg/kubectl/resource/builder_test.go index b6d61eca5ac..39d185520bd 100644 --- a/pkg/kubectl/resource/builder_test.go +++ b/pkg/kubectl/resource/builder_test.go @@ -58,7 +58,7 @@ func fakeClient() ClientMapper { }) } -func fakeClientWith(t *testing.T, data map[string]string) ClientMapper { +func fakeClientWith(testName string, t *testing.T, data map[string]string) ClientMapper { return ClientMapperFunc(func(*meta.RESTMapping) (RESTClient, error) { return &client.FakeRESTClient{ Codec: latest.Codec, @@ -70,7 +70,7 @@ func fakeClientWith(t *testing.T, data map[string]string) ClientMapper { } body, ok := data[p] if !ok { - t.Fatalf("unexpected request: %s (%s)\n%#v", p, req.URL, req) + t.Fatalf("%s: unexpected request: %s (%s)\n%#v", testName, p, req.URL, req) } return &http.Response{ StatusCode: http.StatusOK, @@ -305,7 +305,7 @@ func TestURLBuilderRequireNamespace(t *testing.T) { func TestResourceByName(t *testing.T) { pods, _ := testData() - b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods/foo": runtime.EncodeOrDie(latest.Codec, &pods.Items[0]), })). NamespaceParam("test") @@ -336,9 +336,42 @@ func TestResourceByName(t *testing.T) { } } +func TestResourceByNameWithoutRequireObject(t *testing.T) { + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{})). + NamespaceParam("test") + + test := &testVisitor{} + singular := false + + if b.Do().Err() == nil { + t.Errorf("unexpected non-error") + } + + b.ResourceTypeOrNameArgs(true, "pods", "foo").RequireObject(false) + + err := b.Do().IntoSingular(&singular).Visit(test.Handle) + if err != nil || !singular || len(test.Infos) != 1 { + t.Fatalf("unexpected response: %v %t %#v", err, singular, test.Infos) + } + if test.Infos[0].Name != "foo" { + t.Errorf("unexpected name: %#v", test.Infos[0].Name) + } + if test.Infos[0].Object != nil { + t.Errorf("unexpected object: %#v", test.Infos[0].Object) + } + + mapping, err := b.Do().ResourceMapping() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if mapping.Kind != "Pod" || mapping.Resource != "pods" { + t.Errorf("unexpected resource mapping: %#v", mapping) + } +} + func TestResourceByNameAndEmptySelector(t *testing.T) { pods, _ := testData() - b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods/foo": runtime.EncodeOrDie(latest.Codec, &pods.Items[0]), })). NamespaceParam("test"). @@ -366,7 +399,7 @@ func TestResourceByNameAndEmptySelector(t *testing.T) { func TestSelector(t *testing.T) { pods, svc := testData() labelKey := api.LabelSelectorQueryParam(testapi.Version()) - b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, pods), "/namespaces/test/services?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, svc), })). @@ -467,35 +500,43 @@ func TestResourceTuple(t *testing.T) { }, } for k, testCase := range testCases { - pods, _ := testData() - b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ - "/namespaces/test/pods/foo": runtime.EncodeOrDie(latest.Codec, &pods.Items[0]), - "/namespaces/test/pods/bar": runtime.EncodeOrDie(latest.Codec, &pods.Items[0]), - "/nodes/foo": runtime.EncodeOrDie(latest.Codec, &api.Node{ObjectMeta: api.ObjectMeta{Name: "foo"}}), - })). - NamespaceParam("test").DefaultNamespace(). - ResourceTypeOrNameArgs(true, testCase.args...) + for _, requireObject := range []bool{true, false} { + expectedRequests := map[string]string{} + if requireObject { + pods, _ := testData() + expectedRequests = map[string]string{ + "/namespaces/test/pods/foo": runtime.EncodeOrDie(latest.Codec, &pods.Items[0]), + "/namespaces/test/pods/bar": runtime.EncodeOrDie(latest.Codec, &pods.Items[0]), + "/nodes/foo": runtime.EncodeOrDie(latest.Codec, &api.Node{ObjectMeta: api.ObjectMeta{Name: "foo"}}), + "/minions/foo": runtime.EncodeOrDie(latest.Codec, &api.Node{ObjectMeta: api.ObjectMeta{Name: "foo"}}), + } + } - r := b.Do() + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(k, t, expectedRequests)). + NamespaceParam("test").DefaultNamespace(). + ResourceTypeOrNameArgs(true, testCase.args...).RequireObject(requireObject) - if !testCase.errFn(r.Err()) { - t.Errorf("%s: unexpected error: %v", k, r.Err()) - } - if r.Err() != nil { - continue - } - switch { - case (r.singular && len(testCase.args) != 1), - (!r.singular && len(testCase.args) == 1): - t.Errorf("%s: result had unexpected singular value", k) - } - info, err := r.Infos() - if err != nil { - // test error - continue - } - if len(info) != len(testCase.args) { - t.Errorf("%s: unexpected number of infos returned: %#v", k, info) + r := b.Do() + + if !testCase.errFn(r.Err()) { + t.Errorf("%s: unexpected error: %v", k, r.Err()) + } + if r.Err() != nil { + continue + } + switch { + case (r.singular && len(testCase.args) != 1), + (!r.singular && len(testCase.args) == 1): + t.Errorf("%s: result had unexpected singular value", k) + } + info, err := r.Infos() + if err != nil { + // test error + continue + } + if len(info) != len(testCase.args) { + t.Errorf("%s: unexpected number of infos returned: %#v", k, info) + } } } } @@ -579,7 +620,7 @@ func TestSingularObject(t *testing.T) { func TestListObject(t *testing.T) { pods, _ := testData() labelKey := api.LabelSelectorQueryParam(testapi.Version()) - b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, pods), })). SelectorParam("a=b"). @@ -612,7 +653,7 @@ func TestListObject(t *testing.T) { func TestListObjectWithDifferentVersions(t *testing.T) { pods, svc := testData() labelKey := api.LabelSelectorQueryParam(testapi.Version()) - obj, err := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ + obj, err := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, pods), "/namespaces/test/services?" + labelKey + "=a%3Db": runtime.EncodeOrDie(latest.Codec, svc), })). @@ -638,7 +679,7 @@ func TestListObjectWithDifferentVersions(t *testing.T) { func TestWatch(t *testing.T) { _, svc := testData() - w, err := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ + w, err := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/watch/namespaces/test/services/redis-master?resourceVersion=12": watchBody(watch.Event{ Type: watch.Added, Object: &svc.Items[0], @@ -693,7 +734,7 @@ func TestLatest(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "15"}, } - b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith(t, map[string]string{ + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClientWith("", t, map[string]string{ "/namespaces/test/pods/foo": runtime.EncodeOrDie(latest.Codec, newPod), "/namespaces/test/pods/bar": runtime.EncodeOrDie(latest.Codec, newPod2), "/namespaces/test/services/baz": runtime.EncodeOrDie(latest.Codec, newSvc), @@ -783,3 +824,36 @@ func TestReceiveMultipleErrors(t *testing.T) { t.Errorf("unexpected errors %v", errs) } } + +func TestReplaceAliases(t *testing.T) { + tests := []struct { + name string + args []string + expected []string + }{ + { + name: "no-replacement", + args: []string{"service", "pods", "rc"}, + expected: []string{"service", "pods", "rc"}, + }, + { + name: "all-replacement", + args: []string{"all"}, + expected: []string{"rc,svc,pods,pvc"}, + }, + } + + b := NewBuilder(latest.RESTMapper, api.Scheme, fakeClient()) + + for _, test := range tests { + replaced := b.replaceAliases(test.args) + if len(replaced) != len(test.expected) { + t.Errorf("%s: unexpected args length: expected %d, got %d", test.name, len(test.expected), len(replaced)) + } + for i, arg := range test.expected { + if arg != replaced[i] { + t.Errorf("%s: unexpected argument: expected %s, got %s", test.name, arg, replaced[i]) + } + } + } +} diff --git a/pkg/kubectl/resource/result.go b/pkg/kubectl/resource/result.go index 13e5b89638b..be75dd9099b 100644 --- a/pkg/kubectl/resource/result.go +++ b/pkg/kubectl/resource/result.go @@ -21,6 +21,7 @@ import ( "reflect" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -200,3 +201,67 @@ func (r *Result) Watch(resourceVersion string) (watch.Interface, error) { } return w.Watch(resourceVersion) } + +// AsVersionedObject converts a list of infos into a single object - either a List containing +// the objects as children, or if only a single Object is present, as that object. The provided +// version will be preferred as the conversion target, but the Object's mapping version will be +// used if that version is not present. +func AsVersionedObject(infos []*Info, forceList bool, version string) (runtime.Object, error) { + objects := []runtime.Object{} + for _, info := range infos { + if info.Object == nil { + continue + } + + // objects that are not part of api.Scheme must be converted to JSON + // TODO: convert to map[string]interface{}, attach to runtime.Unknown? + if len(version) > 0 { + if _, _, err := api.Scheme.ObjectVersionAndKind(info.Object); runtime.IsNotRegisteredError(err) { + // TODO: ideally this would encode to version, but we don't expose multiple codecs here. + data, err := info.Mapping.Codec.Encode(info.Object) + if err != nil { + return nil, err + } + objects = append(objects, &runtime.Unknown{RawJSON: data}) + continue + } + } + + converted, err := tryConvert(info.Mapping.ObjectConvertor, info.Object, version, info.Mapping.APIVersion) + if err != nil { + return nil, err + } + objects = append(objects, converted) + } + + var object runtime.Object + if len(objects) == 1 && !forceList { + object = objects[0] + } else { + object = &api.List{Items: objects} + converted, err := tryConvert(api.Scheme, object, version, latest.Version) + if err != nil { + return nil, err + } + object = converted + } + return object, nil +} + +// tryConvert attempts to convert the given object to the provided versions in order. This function assumes +// the object is in internal version. +func tryConvert(convertor runtime.ObjectConvertor, object runtime.Object, versions ...string) (runtime.Object, error) { + var last error + for _, version := range versions { + if len(version) == 0 { + return object, nil + } + obj, err := convertor.ConvertToVersion(object, version) + if err != nil { + last = err + continue + } + return obj, nil + } + return nil, last +} diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index e3f393c241d..958491f6e9f 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -256,6 +256,7 @@ var namespaceColumns = []string{"NAME", "LABELS", "STATUS"} var secretColumns = []string{"NAME", "DATA"} var persistentVolumeColumns = []string{"NAME", "LABELS", "CAPACITY", "ACCESSMODES", "STATUS", "CLAIM"} var persistentVolumeClaimColumns = []string{"NAME", "LABELS", "STATUS", "VOLUME"} +var componentStatusColumns = []string{"NAME", "STATUS", "MESSAGE", "ERROR"} // addDefaultHandlers adds print handlers for default Kubernetes types. func (h *HumanReadablePrinter) addDefaultHandlers() { @@ -284,6 +285,8 @@ func (h *HumanReadablePrinter) addDefaultHandlers() { h.Handler(persistentVolumeClaimColumns, printPersistentVolumeClaimList) h.Handler(persistentVolumeColumns, printPersistentVolume) h.Handler(persistentVolumeColumns, printPersistentVolumeList) + h.Handler(componentStatusColumns, printComponentStatus) + h.Handler(componentStatusColumns, printComponentStatusList) } func (h *HumanReadablePrinter) unknown(data []byte, w io.Writer) error { @@ -503,12 +506,6 @@ func printNode(node *api.Node, w io.Writer) error { cond := node.Status.Conditions[i] conditionMap[cond.Type] = &cond } - var schedulable string - if node.Spec.Unschedulable { - schedulable = "Unschedulable" - } else { - schedulable = "Schedulable" - } var status []string for _, validCondition := range NodeAllConditions { if condition, ok := conditionMap[validCondition]; ok { @@ -522,7 +519,10 @@ func printNode(node *api.Node, w io.Writer) error { if len(status) == 0 { status = append(status, "Unknown") } - _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", node.Name, schedulable, formatLabels(node.Labels), strings.Join(status, ",")) + if node.Spec.Unschedulable { + status = append(status, "SchedulingDisabled") + } + _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", node.Name, formatLabels(node.Labels), strings.Join(status, ",")) return err } @@ -647,6 +647,36 @@ func printResourceQuotaList(list *api.ResourceQuotaList, w io.Writer) error { return nil } +func printComponentStatus(item *api.ComponentStatus, w io.Writer) error { + status := "Unknown" + message := "" + error := "" + for _, condition := range item.Conditions { + if condition.Type == api.ComponentHealthy { + if condition.Status == api.ConditionTrue { + status = "Healthy" + } else { + status = "Unhealthy" + } + message = condition.Message + error = condition.Error + break + } + } + _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\n", item.Name, status, message, error) + return err +} + +func printComponentStatusList(list *api.ComponentStatusList, w io.Writer) error { + for _, item := range list.Items { + if err := printComponentStatus(&item, w); err != nil { + return err + } + } + + return nil +} + // PrintObj prints the obj in a human-friendly format according to the type of the obj. func (h *HumanReadablePrinter) PrintObj(obj runtime.Object, output io.Writer) error { w := tabwriter.NewWriter(output, 10, 4, 3, ' ', 0) diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index f34e17cdffb..db7455f65e2 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -555,6 +555,14 @@ func TestPrintMinionStatus(t *testing.T) { }, status: "Ready", }, + { + minion: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo2"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}, + }, + status: "Ready,SchedulingDisabled", + }, { minion: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo3"}, @@ -574,17 +582,41 @@ func TestPrintMinionStatus(t *testing.T) { { minion: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo5"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}}, + }, + status: "NotReady,SchedulingDisabled", + }, + { + minion: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo6"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}}, }, status: "Unknown", }, { minion: api.Node{ - ObjectMeta: api.ObjectMeta{Name: "foo6"}, + ObjectMeta: api.ObjectMeta{Name: "foo7"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}}, }, status: "Unknown", }, + { + minion: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo8"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}}, + }, + status: "Unknown,SchedulingDisabled", + }, + { + minion: api.Node{ + ObjectMeta: api.ObjectMeta{Name: "foo9"}, + Spec: api.NodeSpec{Unschedulable: true}, + Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}}, + }, + status: "Unknown,SchedulingDisabled", + }, } for _, test := range table { diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index 7172b8663b6..26afd6538d7 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -36,6 +36,38 @@ type RollingUpdater struct { ns string } +// RollingUpdaterConfig is the configuration for a rolling deployment process. +type RollingUpdaterConfig struct { + // Out is a writer for progress output. + Out io.Writer + // OldRC is an existing controller to be replaced. + OldRc *api.ReplicationController + // NewRc is a controller that will take ownership of updated pods (will be + // created if needed). + NewRc *api.ReplicationController + // UpdatePeriod is the time to wait between individual pod updates. + UpdatePeriod time.Duration + // Interval is the time to wait between polling controller status after + // update. + Interval time.Duration + // Timeout is the time to wait for controller updates before giving up. + Timeout time.Duration + // CleanupPolicy defines the cleanup action to take after the deployment is + // complete. + CleanupPolicy RollingUpdaterCleanupPolicy +} + +// RollingUpdaterCleanupPolicy is a cleanup action to take after the +// deployment is complete. +type RollingUpdaterCleanupPolicy string + +const ( + // DeleteRollingUpdateCleanupPolicy means delete the old controller. + DeleteRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Delete" + // PreserveRollingUpdateCleanupPolicy means keep the old controller. + PreserveRollingUpdateCleanupPolicy RollingUpdaterCleanupPolicy = "Preserve" +) + // NewRollingUpdater creates a RollingUpdater from a client func NewRollingUpdater(namespace string, c RollingUpdaterClient) *RollingUpdater { return &RollingUpdater{ @@ -49,20 +81,25 @@ const ( desiredReplicasAnnotation = kubectlAnnotationPrefix + "desired-replicas" ) -// Update all pods for a ReplicationController (oldRc) by creating a new controller (newRc) -// with 0 replicas, and synchronously resizing oldRc,newRc by 1 until oldRc has 0 replicas -// and newRc has the original # of desired replicas. oldRc is then deleted. -// If an update from newRc to oldRc is already in progress, we attempt to drive it to completion. -// If an error occurs at any step of the update, the error will be returned. -// 'out' writer for progress output -// 'oldRc' existing controller to be replaced -// 'newRc' controller that will take ownership of updated pods (will be created if needed) -// 'updatePeriod' time to wait between individual pod updates -// 'interval' time to wait between polling controller status after update -// 'timeout' time to wait for controller updates before giving up +// Update all pods for a ReplicationController (oldRc) by creating a new +// controller (newRc) with 0 replicas, and synchronously resizing oldRc,newRc +// by 1 until oldRc has 0 replicas and newRc has the original # of desired +// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy. // -// TODO: make this handle performing a rollback of a partially completed rollout. -func (r *RollingUpdater) Update(out io.Writer, oldRc, newRc *api.ReplicationController, updatePeriod, interval, timeout time.Duration) error { +// If an update from newRc to oldRc is already in progress, we attempt to +// drive it to completion. If an error occurs at any step of the update, the +// error will be returned. +// +// TODO: make this handle performing a rollback of a partially completed +// rollout. +func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { + out := config.Out + oldRc := config.OldRc + newRc := config.NewRc + updatePeriod := config.UpdatePeriod + interval := config.Interval + timeout := config.Timeout + oldName := oldRc.ObjectMeta.Name newName := newRc.ObjectMeta.Name retry := &RetryParams{interval, timeout} @@ -156,9 +193,17 @@ func (r *RollingUpdater) Update(out io.Writer, oldRc, newRc *api.ReplicationCont if err != nil { return err } - // delete old rc - fmt.Fprintf(out, "Update succeeded. Deleting %s\n", oldName) - return r.c.DeleteReplicationController(r.ns, oldName) + + switch config.CleanupPolicy { + case DeleteRollingUpdateCleanupPolicy: + // delete old rc + fmt.Fprintf(out, "Update succeeded. Deleting %s\n", oldName) + return r.c.DeleteReplicationController(r.ns, oldName) + case PreserveRollingUpdateCleanupPolicy: + return nil + default: + return nil + } } func (r *RollingUpdater) getExistingNewRc(sourceId, name string) (rc *api.ReplicationController, existing bool, err error) { diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 144d7059c90..51bb2ea621a 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -19,12 +19,14 @@ package kubectl import ( "bytes" "fmt" + "io/ioutil" "testing" "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait" ) type updaterFake struct { @@ -257,7 +259,16 @@ Update succeeded. Deleting foo-v1 "default", } var buffer bytes.Buffer - if err := updater.Update(&buffer, test.oldRc, test.newRc, 0, time.Millisecond, time.Millisecond); err != nil { + config := &RollingUpdaterConfig{ + Out: &buffer, + OldRc: test.oldRc, + NewRc: test.newRc, + UpdatePeriod: 0, + Interval: time.Millisecond, + Timeout: time.Millisecond, + CleanupPolicy: DeleteRollingUpdateCleanupPolicy, + } + if err := updater.Update(config); err != nil { t.Errorf("Update failed: %v", err) } if buffer.String() != test.output { @@ -299,10 +310,101 @@ Update succeeded. Deleting foo-v1 updater := RollingUpdater{NewRollingUpdaterClient(fakeClientFor("default", responses)), "default"} var buffer bytes.Buffer - if err := updater.Update(&buffer, rc, rcExisting, 0, time.Millisecond, time.Millisecond); err != nil { + config := &RollingUpdaterConfig{ + Out: &buffer, + OldRc: rc, + NewRc: rcExisting, + UpdatePeriod: 0, + Interval: time.Millisecond, + Timeout: time.Millisecond, + CleanupPolicy: DeleteRollingUpdateCleanupPolicy, + } + if err := updater.Update(config); err != nil { t.Errorf("Update failed: %v", err) } if buffer.String() != output { t.Errorf("Output was not as expected. Expected:\n%s\nGot:\n%s", output, buffer.String()) } } + +// TestRollingUpdater_preserveCleanup ensures that the old controller isn't +// deleted following a successful deployment. +func TestRollingUpdater_preserveCleanup(t *testing.T) { + rc := oldRc(2) + rcExisting := newRc(1, 3) + + updater := &RollingUpdater{ + ns: "default", + c: &rollingUpdaterClientImpl{ + GetReplicationControllerFn: func(namespace, name string) (*api.ReplicationController, error) { + switch name { + case rc.Name: + return rc, nil + case rcExisting.Name: + return rcExisting, nil + default: + return nil, fmt.Errorf("unexpected get call for %s/%s", namespace, name) + } + }, + UpdateReplicationControllerFn: func(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) { + return rc, nil + }, + CreateReplicationControllerFn: func(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) { + t.Fatalf("unexpected call to create %s/rc:%#v", namespace, rc) + return nil, nil + }, + DeleteReplicationControllerFn: func(namespace, name string) error { + t.Fatalf("unexpected call to delete %s/%s", namespace, name) + return nil + }, + ControllerHasDesiredReplicasFn: func(rc *api.ReplicationController) wait.ConditionFunc { + return func() (done bool, err error) { + return true, nil + } + }, + }, + } + + config := &RollingUpdaterConfig{ + Out: ioutil.Discard, + OldRc: rc, + NewRc: rcExisting, + UpdatePeriod: 0, + Interval: time.Millisecond, + Timeout: time.Millisecond, + CleanupPolicy: PreserveRollingUpdateCleanupPolicy, + } + err := updater.Update(config) + if err != nil { + t.Errorf("unexpected error: %v", err) + } +} + +// rollingUpdaterClientImpl is a dynamic RollingUpdaterClient. +type rollingUpdaterClientImpl struct { + GetReplicationControllerFn func(namespace, name string) (*api.ReplicationController, error) + UpdateReplicationControllerFn func(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) + CreateReplicationControllerFn func(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) + DeleteReplicationControllerFn func(namespace, name string) error + ControllerHasDesiredReplicasFn func(rc *api.ReplicationController) wait.ConditionFunc +} + +func (c *rollingUpdaterClientImpl) GetReplicationController(namespace, name string) (*api.ReplicationController, error) { + return c.GetReplicationControllerFn(namespace, name) +} + +func (c *rollingUpdaterClientImpl) UpdateReplicationController(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) { + return c.UpdateReplicationControllerFn(namespace, rc) +} + +func (c *rollingUpdaterClientImpl) CreateReplicationController(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) { + return c.CreateReplicationControllerFn(namespace, rc) +} + +func (c *rollingUpdaterClientImpl) DeleteReplicationController(namespace, name string) error { + return c.DeleteReplicationControllerFn(namespace, name) +} + +func (c *rollingUpdaterClientImpl) ControllerHasDesiredReplicas(rc *api.ReplicationController) wait.ConditionFunc { + return c.ControllerHasDesiredReplicasFn(rc) +} diff --git a/pkg/kubelet/cadvisor/cadvisor_linux.go b/pkg/kubelet/cadvisor/cadvisor_linux.go index 6a75201c794..e0c4d27c625 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -77,9 +77,9 @@ func New(port uint) (Interface, error) { return cadvisorClient, nil } -func (self *cadvisorClient) exportHTTP(port uint) error { +func (cc *cadvisorClient) exportHTTP(port uint) error { mux := http.NewServeMux() - err := cadvisorHttp.RegisterHandlers(mux, self, "", "", "", "", "/metrics") + err := cadvisorHttp.RegisterHandlers(mux, cc, "", "", "", "", "/metrics") if err != nil { return err } @@ -106,20 +106,20 @@ func (self *cadvisorClient) exportHTTP(port uint) error { return nil } -func (self *cadvisorClient) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) { - return self.GetContainerInfo(name, req) +func (cc *cadvisorClient) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) { + return cc.GetContainerInfo(name, req) } -func (self *cadvisorClient) VersionInfo() (*cadvisorApi.VersionInfo, error) { - return self.GetVersionInfo() +func (cc *cadvisorClient) VersionInfo() (*cadvisorApi.VersionInfo, error) { + return cc.GetVersionInfo() } -func (self *cadvisorClient) MachineInfo() (*cadvisorApi.MachineInfo, error) { - return self.GetMachineInfo() +func (cc *cadvisorClient) MachineInfo() (*cadvisorApi.MachineInfo, error) { + return cc.GetMachineInfo() } -func (self *cadvisorClient) DockerImagesFsInfo() (cadvisorApiV2.FsInfo, error) { - res, err := self.GetFsInfo(cadvisorFs.LabelDockerImages) +func (cc *cadvisorClient) DockerImagesFsInfo() (cadvisorApiV2.FsInfo, error) { + res, err := cc.GetFsInfo(cadvisorFs.LabelDockerImages) if err != nil { return cadvisorApiV2.FsInfo{}, err } @@ -134,6 +134,6 @@ func (self *cadvisorClient) DockerImagesFsInfo() (cadvisorApiV2.FsInfo, error) { return res[0], nil } -func (self *cadvisorClient) GetPastEvents(request *events.Request) ([]*cadvisorApi.Event, error) { - return self.GetPastEvents(request) +func (cc *cadvisorClient) GetPastEvents(request *events.Request) ([]*cadvisorApi.Event, error) { + return cc.GetPastEvents(request) } diff --git a/pkg/kubelet/cadvisor/cadvisor_unsupported.go b/pkg/kubelet/cadvisor/cadvisor_unsupported.go index 1c755085aae..21e22a9cc60 100644 --- a/pkg/kubelet/cadvisor/cadvisor_unsupported.go +++ b/pkg/kubelet/cadvisor/cadvisor_unsupported.go @@ -37,26 +37,26 @@ func New(port uint) (Interface, error) { var unsupportedErr = errors.New("cAdvisor is unsupported in this build") -func (self *cadvisorUnsupported) DockerContainer(name string, req *cadvisorApi.ContainerInfoRequest) (cadvisorApi.ContainerInfo, error) { +func (cu *cadvisorUnsupported) DockerContainer(name string, req *cadvisorApi.ContainerInfoRequest) (cadvisorApi.ContainerInfo, error) { return cadvisorApi.ContainerInfo{}, unsupportedErr } -func (self *cadvisorUnsupported) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) { +func (cu *cadvisorUnsupported) ContainerInfo(name string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) { return nil, unsupportedErr } -func (self *cadvisorUnsupported) MachineInfo() (*cadvisorApi.MachineInfo, error) { +func (cu *cadvisorUnsupported) MachineInfo() (*cadvisorApi.MachineInfo, error) { return nil, unsupportedErr } -func (self *cadvisorUnsupported) VersionInfo() (*cadvisorApi.VersionInfo, error) { +func (cu *cadvisorUnsupported) VersionInfo() (*cadvisorApi.VersionInfo, error) { return nil, unsupportedErr } -func (self *cadvisorUnsupported) DockerImagesFsInfo() (cadvisorApiV2.FsInfo, error) { +func (cu *cadvisorUnsupported) DockerImagesFsInfo() (cadvisorApiV2.FsInfo, error) { return cadvisorApiV2.FsInfo{}, unsupportedErr } -func (self *cadvisorUnsupported) GetPastEvents(request *events.Request) ([]*cadvisorApi.Event, error) { +func (cu *cadvisorUnsupported) GetPastEvents(request *events.Request) ([]*cadvisorApi.Event, error) { return []*cadvisorApi.Event{}, unsupportedErr } diff --git a/pkg/kubelet/config/apiserver.go b/pkg/kubelet/config/apiserver.go index b859944fc0a..e55364877ef 100644 --- a/pkg/kubelet/config/apiserver.go +++ b/pkg/kubelet/config/apiserver.go @@ -34,9 +34,9 @@ func NewSourceApiserver(c *client.Client, hostname string, updates chan<- interf // newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver. func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) { send := func(objs []interface{}) { - var pods []api.Pod + var pods []*api.Pod for _, o := range objs { - pods = append(pods, *o.(*api.Pod)) + pods = append(pods, o.(*api.Pod)) } updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.ApiserverSource} } diff --git a/pkg/kubelet/config/apiserver_test.go b/pkg/kubelet/config/apiserver_test.go index 7b5367578fb..afb9fbb90f5 100644 --- a/pkg/kubelet/config/apiserver_test.go +++ b/pkg/kubelet/config/apiserver_test.go @@ -42,20 +42,20 @@ func (lw fakePodLW) Watch(resourceVersion string) (watch.Interface, error) { var _ cache.ListerWatcher = fakePodLW{} func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { - pod1v1 := api.Pod{ + pod1v1 := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "p"}, Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}} - pod1v2 := api.Pod{ + pod1v2 := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "p"}, Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}} - pod2 := api.Pod{ + pod2 := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "q"}, Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}} // Setup fake api client. fakeWatch := watch.NewFake() lw := fakePodLW{ - listResp: &api.PodList{Items: []api.Pod{pod1v1}}, + listResp: &api.PodList{Items: []api.Pod{*pod1v1}}, watchResp: fakeWatch, } @@ -74,7 +74,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { } // Add another pod - fakeWatch.Add(&pod2) + fakeWatch.Add(pod2) got, ok = <-ch if !ok { t.Errorf("Unable to read from channel when expected") @@ -89,7 +89,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { } // Modify pod1 - fakeWatch.Modify(&pod1v2) + fakeWatch.Modify(pod1v2) got, ok = <-ch if !ok { t.Errorf("Unable to read from channel when expected") @@ -103,7 +103,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { } // Delete pod1 - fakeWatch.Delete(&pod1v2) + fakeWatch.Delete(pod1v2) got, ok = <-ch if !ok { t.Errorf("Unable to read from channel when expected") @@ -115,7 +115,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) { } // Delete pod2 - fakeWatch.Delete(&pod2) + fakeWatch.Delete(pod2) got, ok = <-ch if !ok { t.Errorf("Unable to read from channel when expected") diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index 72ec9c7e503..f74084e3fe8 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -85,7 +85,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, hostname string) er type defaultFunc func(pod *api.Pod) error -func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod api.Pod, err error) { +func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *api.Pod, err error) { obj, err := api.Scheme.Decode(data) if err != nil { return false, pod, err @@ -104,7 +104,7 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod ap err = fmt.Errorf("invalid pod: %v", errs) return true, pod, err } - return true, *newPod, nil + return true, newPod, nil } func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api.PodList, err error) { @@ -132,7 +132,7 @@ func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api return true, *newPods, err } -func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, manifest v1beta1.ContainerManifest, pod api.Pod, err error) { +func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, manifest v1beta1.ContainerManifest, pod *api.Pod, err error) { // TODO: should be api.Scheme.Decode // This is awful. DecodeInto() expects to find an APIObject, which // Manifest is not. We keep reading manifest for now for compat, but @@ -144,6 +144,7 @@ func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, m // avoids writing a v1beta1.ContainerManifest -> api.Pod // conversion which would be identical to the api.ContainerManifest -> // api.Pod conversion. + pod = new(api.Pod) if err = yaml.Unmarshal(data, &manifest); err != nil { return false, manifest, pod, err } @@ -155,10 +156,10 @@ func tryDecodeSingleManifest(data []byte, defaultFn defaultFunc) (parsed bool, m err = fmt.Errorf("invalid manifest: %v", errs) return false, manifest, pod, err } - if err = api.Scheme.Convert(&newManifest, &pod); err != nil { + if err = api.Scheme.Convert(&newManifest, pod); err != nil { return true, manifest, pod, err } - if err := defaultFn(&pod); err != nil { + if err := defaultFn(pod); err != nil { return true, manifest, pod, err } // Success. diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index bc4c1685693..5fb063f1f37 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -168,12 +168,12 @@ func (s *podStorage) Merge(source string, change interface{}) error { s.updates <- *updates } if len(deletes.Pods) > 0 || len(adds.Pods) > 0 { - s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source} + s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, source} } case PodConfigNotificationSnapshot: if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 { - s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, source} + s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, source} } default: @@ -212,7 +212,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de if !reflect.DeepEqual(existing.Spec, ref.Spec) { // this is an update existing.Spec = ref.Spec - updates.Pods = append(updates.Pods, *existing) + updates.Pods = append(updates.Pods, existing) continue } // this is a no-op @@ -224,17 +224,17 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de } ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source pods[name] = ref - adds.Pods = append(adds.Pods, *ref) + adds.Pods = append(adds.Pods, ref) } case kubelet.REMOVE: glog.V(4).Infof("Removing a pod %v", update) for _, value := range update.Pods { - name := kubecontainer.GetPodFullName(&value) + name := kubecontainer.GetPodFullName(value) if existing, found := pods[name]; found { // this is a delete delete(pods, name) - deletes.Pods = append(deletes.Pods, *existing) + deletes.Pods = append(deletes.Pods, existing) continue } // this is a no-op @@ -255,7 +255,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de if !reflect.DeepEqual(existing.Spec, ref.Spec) { // this is an update existing.Spec = ref.Spec - updates.Pods = append(updates.Pods, *existing) + updates.Pods = append(updates.Pods, existing) continue } // this is a no-op @@ -266,13 +266,13 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de } ref.Annotations[kubelet.ConfigSourceAnnotationKey] = source pods[name] = ref - adds.Pods = append(adds.Pods, *ref) + adds.Pods = append(adds.Pods, ref) } for name, existing := range oldPods { if _, found := pods[name]; !found { // this is a delete - deletes.Pods = append(deletes.Pods, *existing) + deletes.Pods = append(deletes.Pods, existing) } } @@ -297,10 +297,9 @@ func (s *podStorage) seenSources(sources ...string) bool { return s.sourcesSeen.HasAll(sources...) } -func filterInvalidPods(pods []api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { +func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { names := util.StringSet{} - for i := range pods { - pod := &pods[i] + for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { errlist = append(errlist, errs...) @@ -330,21 +329,21 @@ func filterInvalidPods(pods []api.Pod, source string, recorder record.EventRecor func (s *podStorage) Sync() { s.updateLock.Lock() defer s.updateLock.Unlock() - s.updates <- kubelet.PodUpdate{s.MergedState().([]api.Pod), kubelet.SET, kubelet.AllSource} + s.updates <- kubelet.PodUpdate{s.MergedState().([]*api.Pod), kubelet.SET, kubelet.AllSource} } // Object implements config.Accessor func (s *podStorage) MergedState() interface{} { s.podLock.RLock() defer s.podLock.RUnlock() - pods := make([]api.Pod, 0) + pods := make([]*api.Pod, 0) for _, sourcePods := range s.pods { for _, podRef := range sourcePods { pod, err := api.Scheme.Copy(podRef) if err != nil { glog.Errorf("unable to copy pod: %v", err) } - pods = append(pods, *pod.(*api.Pod)) + pods = append(pods, pod.(*api.Pod)) } } return pods diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index c9288b5d715..6f53db865c5 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -39,7 +39,7 @@ func expectEmptyChannel(t *testing.T, ch <-chan interface{}) { } } -type sortedPods []api.Pod +type sortedPods []*api.Pod func (s sortedPods) Len() int { return len(s) @@ -51,8 +51,8 @@ func (s sortedPods) Less(i, j int) bool { return s[i].Namespace < s[j].Namespace } -func CreateValidPod(name, namespace, source string) api.Pod { - return api.Pod{ +func CreateValidPod(name, namespace, source string) *api.Pod { + return &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: types.UID(name), // for the purpose of testing, this is unique enough Name: name, @@ -67,12 +67,8 @@ func CreateValidPod(name, namespace, source string) api.Pod { } } -func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...api.Pod) kubelet.PodUpdate { - newPods := make([]api.Pod, len(pods)) - for i := range pods { - newPods[i] = pods[i] - } - return kubelet.PodUpdate{newPods, op, source} +func CreatePodUpdate(op kubelet.PodOperation, source string, pods ...*api.Pod) kubelet.PodUpdate { + return kubelet.PodUpdate{Pods: pods, Op: op, Source: source} } func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubelet.PodUpdate, *PodConfig) { @@ -162,7 +158,7 @@ func TestInvalidPodFiltered(t *testing.T) { expectPodUpdate(t, ch, CreatePodUpdate(kubelet.ADD, NoneSource, CreateValidPod("foo", "new", "test"))) // add an invalid update - podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) + podUpdate = CreatePodUpdate(kubelet.UPDATE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}) channel <- podUpdate expectNoPodUpdate(t, ch) } @@ -179,10 +175,10 @@ func TestNewPodAddedSnapshotAndUpdates(t *testing.T) { expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) // container updates are separated as UPDATE - pod := podUpdate.Pods[0] + pod := *podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} - channel <- CreatePodUpdate(kubelet.ADD, NoneSource, pod) - expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) + channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod) + expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, &pod)) } func TestNewPodAddedSnapshot(t *testing.T) { @@ -197,10 +193,10 @@ func TestNewPodAddedSnapshot(t *testing.T) { expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, kubelet.AllSource, CreateValidPod("foo", "new", "test"))) // container updates are separated as UPDATE - pod := podUpdate.Pods[0] + pod := *podUpdate.Pods[0] pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}} - channel <- CreatePodUpdate(kubelet.ADD, NoneSource, pod) - expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, pod)) + channel <- CreatePodUpdate(kubelet.ADD, NoneSource, &pod) + expectPodUpdate(t, ch, CreatePodUpdate(kubelet.SET, TestSource, &pod)) } func TestNewPodAddedUpdatedRemoved(t *testing.T) { @@ -221,7 +217,7 @@ func TestNewPodAddedUpdatedRemoved(t *testing.T) { channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.UPDATE, NoneSource, pod)) - podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}}) + podUpdate = CreatePodUpdate(kubelet.REMOVE, NoneSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}}) channel <- podUpdate expectPodUpdate(t, ch, CreatePodUpdate(kubelet.REMOVE, NoneSource, pod)) } diff --git a/pkg/kubelet/config/file.go b/pkg/kubelet/config/file.go index bb75fa53a42..cfa48470561 100644 --- a/pkg/kubelet/config/file.go +++ b/pkg/kubelet/config/file.go @@ -66,7 +66,7 @@ func (s *sourceFile) extractFromPath() error { return err } // Emit an update with an empty PodList to allow FileSource to be marked as seen - s.updates <- kubelet.PodUpdate{[]api.Pod{}, kubelet.SET, kubelet.FileSource} + s.updates <- kubelet.PodUpdate{[]*api.Pod{}, kubelet.SET, kubelet.FileSource} return fmt.Errorf("path does not exist, ignoring") } @@ -83,7 +83,7 @@ func (s *sourceFile) extractFromPath() error { if err != nil { return err } - s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.FileSource} + s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.FileSource} default: return fmt.Errorf("path is not a directory or file") @@ -95,13 +95,13 @@ func (s *sourceFile) extractFromPath() error { // Get as many pod configs as we can from a directory. Return an error iff something // prevented us from reading anything at all. Do not return an error if only some files // were problematic. -func (s *sourceFile) extractFromDir(name string) ([]api.Pod, error) { +func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) { dirents, err := filepath.Glob(filepath.Join(name, "[^.]*")) if err != nil { return nil, fmt.Errorf("glob failed: %v", err) } - pods := make([]api.Pod, 0) + pods := make([]*api.Pod, 0) if len(dirents) == 0 { return pods, nil } @@ -131,7 +131,7 @@ func (s *sourceFile) extractFromDir(name string) ([]api.Pod, error) { return pods, nil } -func (s *sourceFile) extractFromFile(filename string) (pod api.Pod, err error) { +func (s *sourceFile) extractFromFile(filename string) (pod *api.Pod, err error) { glog.V(3).Infof("Reading config file %q", filename) file, err := os.Open(filename) if err != nil { diff --git a/pkg/kubelet/config/file_test.go b/pkg/kubelet/config/file_test.go index 9ffa5608f3e..9871e47fb34 100644 --- a/pkg/kubelet/config/file_test.go +++ b/pkg/kubelet/config/file_test.go @@ -83,7 +83,7 @@ func TestReadFromFile(t *testing.T) { "id": "test", "containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}] }`, - expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{ + expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test-" + hostname, UID: "12345", @@ -109,7 +109,7 @@ func TestReadFromFile(t *testing.T) { "uuid": "12345", "containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}] }`, - expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{ + expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "12345-" + hostname, UID: "12345", @@ -136,7 +136,7 @@ func TestReadFromFile(t *testing.T) { "id": "test", "containers": [{ "name": "image", "image": "test/image", "imagePullPolicy": "PullAlways"}] }`, - expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{ + expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test-" + hostname, UID: "12345", @@ -169,7 +169,7 @@ func TestReadFromFile(t *testing.T) { } } }`, - expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{ + expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test-" + hostname, UID: "12345", @@ -200,7 +200,7 @@ func TestReadFromFile(t *testing.T) { } } }`, - expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{ + expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "12345-" + hostname, UID: "12345", @@ -232,7 +232,7 @@ func TestReadFromFile(t *testing.T) { "containers": [{ "name": "image", "image": "test/image" }] } }`, - expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.Pod{ + expected: CreatePodUpdate(kubelet.SET, kubelet.FileSource, &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "test-" + hostname, UID: "12345", @@ -264,7 +264,7 @@ func TestReadFromFile(t *testing.T) { case got := <-ch: update := got.(kubelet.PodUpdate) for _, pod := range update.Pods { - if errs := validation.ValidatePod(&pod); len(errs) > 0 { + if errs := validation.ValidatePod(pod); len(errs) > 0 { t.Errorf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs) } } @@ -335,7 +335,7 @@ func TestExtractFromEmptyDir(t *testing.T) { } } -func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.Pod) { +func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, *api.Pod) { hostname := "an-example-host" manifest := v1beta1.ContainerManifest{ @@ -358,7 +358,7 @@ func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.Pod) { }, }, } - expectedPod := api.Pod{ + expectedPod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: id + "-" + hostname, UID: types.UID(id), @@ -391,7 +391,7 @@ func TestExtractFromDir(t *testing.T) { manifest2, expectedPod2 := ExampleManifestAndPod("2") manifests := []v1beta1.ContainerManifest{manifest, manifest2} - pods := []api.Pod{expectedPod, expectedPod2} + pods := []*api.Pod{expectedPod, expectedPod2} files := make([]*os.File, len(manifests)) dirName, err := ioutil.TempDir("", "foo") @@ -433,9 +433,9 @@ func TestExtractFromDir(t *testing.T) { if !api.Semantic.DeepDerivative(expected, update) { t.Fatalf("Expected %#v, Got %#v", expected, update) } - for i := range update.Pods { - if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 { - t.Errorf("Expected no validation errors on %#v, Got %q", update.Pods[i], errs) + for _, pod := range update.Pods { + if errs := validation.ValidatePod(pod); len(errs) != 0 { + t.Errorf("Expected no validation errors on %#v, Got %q", pod, errs) } } } diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index 5ae1082efc3..e16efcfccce 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -74,7 +74,7 @@ func (s *sourceURL) extractFromURL() error { } if len(data) == 0 { // Emit an update with an empty PodList to allow HTTPSource to be marked as seen - s.updates <- kubelet.PodUpdate{[]api.Pod{}, kubelet.SET, kubelet.HTTPSource} + s.updates <- kubelet.PodUpdate{[]*api.Pod{}, kubelet.SET, kubelet.HTTPSource} return fmt.Errorf("zero-length data received from %v", s.url) } // Short circuit if the manifest has not changed since the last time it was read. @@ -91,12 +91,12 @@ func (s *sourceURL) extractFromURL() error { return singleErr } // It parsed! - s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.HTTPSource} + s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.HTTPSource} return nil } // That didn't work, so try an array of manifests. - parsed, manifests, pods, multiErr := tryDecodeManifestList(data, s.applyDefaults) + parsed, manifests, podList, multiErr := tryDecodeManifestList(data, s.applyDefaults) if parsed { if multiErr != nil { // It parsed but could not be used. @@ -110,7 +110,11 @@ func (s *sourceURL) extractFromURL() error { return singleErr } // It parsed! - s.updates <- kubelet.PodUpdate{pods.Items, kubelet.SET, kubelet.HTTPSource} + pods := make([]*api.Pod, 0) + for i := range podList.Items { + pods = append(pods, &podList.Items[i]) + } + s.updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.HTTPSource} return nil } @@ -124,18 +128,22 @@ func (s *sourceURL) extractFromURL() error { // It parsed but could not be used. return singlePodErr } - s.updates <- kubelet.PodUpdate{[]api.Pod{pod}, kubelet.SET, kubelet.HTTPSource} + s.updates <- kubelet.PodUpdate{[]*api.Pod{pod}, kubelet.SET, kubelet.HTTPSource} return nil } // That didn't work, so try a list of pods. - parsed, pods, multiPodErr := tryDecodePodList(data, s.applyDefaults) + parsed, podList, multiPodErr := tryDecodePodList(data, s.applyDefaults) if parsed { if multiPodErr != nil { // It parsed but could not be used. return multiPodErr } - s.updates <- kubelet.PodUpdate{pods.Items, kubelet.SET, kubelet.HTTPSource} + pods := make([]*api.Pod, 0) + for i := range podList.Items { + pods = append(pods, &podList.Items[i]) + } + s.updates <- kubelet.PodUpdate{pods, kubelet.SET, kubelet.HTTPSource} return nil } diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index 0032f14625e..0db1c826c7b 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -130,7 +130,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}}, expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "111", Name: "foo" + "-" + hostname, @@ -155,7 +155,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { Containers: []v1beta1.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent"}}}, expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "111", Name: "111" + "-" + hostname, @@ -180,7 +180,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}}, expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "111", Name: "foo" + "-" + hostname, @@ -209,7 +209,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { }, expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "111", Name: "foo" + "-" + hostname, @@ -227,7 +227,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { ImagePullPolicy: "Always"}}, }, }, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "222", Name: "bar" + "-" + hostname, @@ -283,9 +283,9 @@ func TestExtractManifestFromHTTP(t *testing.T) { if !api.Semantic.DeepEqual(testCase.expected, update) { t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update) } - for i := range update.Pods { - if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 { - t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, update.Pods[i], errors.NewAggregate(errs)) + for _, pod := range update.Pods { + if errs := validation.ValidatePod(pod); len(errs) != 0 { + t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, pod, errors.NewAggregate(errs)) } } } @@ -317,7 +317,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { }, expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "111", Name: "foo" + "-" + hostname, @@ -355,7 +355,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { }, expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "111", Name: "foo" + "-" + hostname, @@ -406,7 +406,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { }, expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "111", Name: "foo" + "-" + hostname, @@ -424,7 +424,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { ImagePullPolicy: "Always"}}, }, }, - api.Pod{ + &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "222", Name: "bar" + "-" + hostname, @@ -472,9 +472,9 @@ func TestExtractPodsFromHTTP(t *testing.T) { if !api.Semantic.DeepEqual(testCase.expected, update) { t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update) } - for i := range update.Pods { - if errs := validation.ValidatePod(&update.Pods[i]); len(errs) != 0 { - t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, update.Pods[i], errors.NewAggregate(errs)) + for _, pod := range update.Pods { + if errs := validation.ValidatePod(pod); len(errs) != 0 { + t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, pod, errors.NewAggregate(errs)) } } } diff --git a/pkg/kubelet/container_gc.go b/pkg/kubelet/container_gc.go index 8241a6f2dc6..c3193f20b90 100644 --- a/pkg/kubelet/container_gc.go +++ b/pkg/kubelet/container_gc.go @@ -89,21 +89,22 @@ type evictUnit struct { // Name of the container in the pod. name string } + type containersByEvictUnit map[evictUnit][]containerGCInfo // Returns the number of containers in this map. -func (self containersByEvictUnit) NumContainers() int { +func (cu containersByEvictUnit) NumContainers() int { num := 0 - for key := range self { - num += len(self[key]) + for key := range cu { + num += len(cu[key]) } return num } // Returns the number of pod in this map. -func (self containersByEvictUnit) NumEvictUnits() int { - return len(self) +func (cu containersByEvictUnit) NumEvictUnits() int { + return len(cu) } // Newest first. @@ -113,9 +114,9 @@ func (a byCreated) Len() int { return len(a) } func (a byCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) } -func (self *realContainerGC) GarbageCollect() error { +func (cgc *realContainerGC) GarbageCollect() error { // Separate containers by evict units. - evictUnits, unidentifiedContainers, err := self.evictableContainers() + evictUnits, unidentifiedContainers, err := cgc.evictableContainers() if err != nil { return err } @@ -123,58 +124,58 @@ func (self *realContainerGC) GarbageCollect() error { // Remove unidentified containers. for _, container := range unidentifiedContainers { glog.Infof("Removing unidentified dead container %q with ID %q", container.name, container.id) - err = self.dockerClient.RemoveContainer(docker.RemoveContainerOptions{ID: container.id}) + err = cgc.dockerClient.RemoveContainer(docker.RemoveContainerOptions{ID: container.id}) if err != nil { glog.Warningf("Failed to remove unidentified dead container %q: %v", container.name, err) } } // Enforce max containers per evict unit. - if self.policy.MaxPerPodContainer >= 0 { - self.enforceMaxContainersPerEvictUnit(evictUnits, self.policy.MaxPerPodContainer) + if cgc.policy.MaxPerPodContainer >= 0 { + cgc.enforceMaxContainersPerEvictUnit(evictUnits, cgc.policy.MaxPerPodContainer) } // Enforce max total number of containers. - if self.policy.MaxContainers >= 0 && evictUnits.NumContainers() > self.policy.MaxContainers { + if cgc.policy.MaxContainers >= 0 && evictUnits.NumContainers() > cgc.policy.MaxContainers { // Leave an equal number of containers per evict unit (min: 1). - numContainersPerEvictUnit := self.policy.MaxContainers / evictUnits.NumEvictUnits() + numContainersPerEvictUnit := cgc.policy.MaxContainers / evictUnits.NumEvictUnits() if numContainersPerEvictUnit < 1 { numContainersPerEvictUnit = 1 } - self.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit) + cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit) // If we still need to evict, evict oldest first. numContainers := evictUnits.NumContainers() - if numContainers > self.policy.MaxContainers { + if numContainers > cgc.policy.MaxContainers { flattened := make([]containerGCInfo, 0, numContainers) for uid := range evictUnits { flattened = append(flattened, evictUnits[uid]...) } sort.Sort(byCreated(flattened)) - self.removeOldestN(flattened, numContainers-self.policy.MaxContainers) + cgc.removeOldestN(flattened, numContainers-cgc.policy.MaxContainers) } } return nil } -func (self *realContainerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) { +func (cgc *realContainerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) { for uid := range evictUnits { toRemove := len(evictUnits[uid]) - MaxContainers if toRemove > 0 { - evictUnits[uid] = self.removeOldestN(evictUnits[uid], toRemove) + evictUnits[uid] = cgc.removeOldestN(evictUnits[uid], toRemove) } } } // Removes the oldest toRemove containers and returns the resulting slice. -func (self *realContainerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo { +func (cgc *realContainerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo { // Remove from oldest to newest (last to first). numToKeep := len(containers) - toRemove for i := numToKeep; i < len(containers); i++ { - err := self.dockerClient.RemoveContainer(docker.RemoveContainerOptions{ID: containers[i].id}) + err := cgc.dockerClient.RemoveContainer(docker.RemoveContainerOptions{ID: containers[i].id}) if err != nil { glog.Warningf("Failed to remove dead container %q: %v", containers[i].name, err) } @@ -186,18 +187,18 @@ func (self *realContainerGC) removeOldestN(containers []containerGCInfo, toRemov // Get all containers that are evictable. Evictable containers are: not running // and created more than MinAge ago. -func (self *realContainerGC) evictableContainers() (containersByEvictUnit, []containerGCInfo, error) { - containers, err := dockertools.GetKubeletDockerContainers(self.dockerClient, true) +func (cgc *realContainerGC) evictableContainers() (containersByEvictUnit, []containerGCInfo, error) { + containers, err := dockertools.GetKubeletDockerContainers(cgc.dockerClient, true) if err != nil { return containersByEvictUnit{}, []containerGCInfo{}, err } unidentifiedContainers := make([]containerGCInfo, 0) evictUnits := make(containersByEvictUnit) - newestGCTime := time.Now().Add(-self.policy.MinAge) + newestGCTime := time.Now().Add(-cgc.policy.MinAge) for _, container := range containers { // Prune out running containers. - data, err := self.dockerClient.InspectContainer(container.ID) + data, err := cgc.dockerClient.InspectContainer(container.ID) if err != nil { // Container may have been removed already, skip. continue diff --git a/pkg/kubelet/dockertools/docker.go b/pkg/kubelet/dockertools/docker.go index 03ae96d88d9..2f0a7d1c655 100644 --- a/pkg/kubelet/dockertools/docker.go +++ b/pkg/kubelet/dockertools/docker.go @@ -117,29 +117,22 @@ type dockerContainerCommandRunner struct { } // The first version of docker that supports exec natively is 1.3.0 == API 1.15 -var dockerAPIVersionWithExec = []uint{1, 15} +var dockerAPIVersionWithExec, _ = docker.NewAPIVersion("1.15") // Returns the major and minor version numbers of docker server. -func (d *dockerContainerCommandRunner) GetDockerServerVersion() ([]uint, error) { +func (d *dockerContainerCommandRunner) GetDockerServerVersion() (docker.APIVersion, error) { env, err := d.client.Version() if err != nil { return nil, fmt.Errorf("failed to get docker server version - %v", err) } - version := []uint{} - for _, entry := range *env { - if strings.Contains(strings.ToLower(entry), "apiversion") || strings.Contains(strings.ToLower(entry), "api version") { - elems := strings.Split(strings.Split(entry, "=")[1], ".") - for _, elem := range elems { - val, err := strconv.ParseUint(elem, 10, 32) - if err != nil { - return nil, fmt.Errorf("failed to parse docker server version %q: %v", entry, err) - } - version = append(version, uint(val)) - } - return version, nil - } + + apiVersion := env.Get("ApiVersion") + version, err := docker.NewAPIVersion(apiVersion) + if err != nil { + return nil, fmt.Errorf("failed to parse docker server version %q: %v", apiVersion, err) } - return nil, fmt.Errorf("docker server version missing from server version output - %+v", env) + + return version, nil } func (d *dockerContainerCommandRunner) nativeExecSupportExists() (bool, error) { @@ -147,15 +140,7 @@ func (d *dockerContainerCommandRunner) nativeExecSupportExists() (bool, error) { if err != nil { return false, err } - if len(dockerAPIVersionWithExec) != len(version) { - return false, fmt.Errorf("unexpected docker version format. Expecting %v format, got %v", dockerAPIVersionWithExec, version) - } - for idx, val := range dockerAPIVersionWithExec { - if version[idx] < val { - return false, nil - } - } - return true, nil + return version.GreaterThanOrEqualTo(dockerAPIVersionWithExec), nil } func (d *dockerContainerCommandRunner) getRunInContainerCommand(containerID string, cmd []string) (*exec.Cmd, error) { @@ -494,7 +479,7 @@ func ConnectToDockerOrDie(dockerEndpoint string) DockerInterface { // TODO(yifan): Move this to container.Runtime. type ContainerCommandRunner interface { RunInContainer(containerID string, cmd []string) ([]byte, error) - GetDockerServerVersion() ([]uint, error) + GetDockerServerVersion() (docker.APIVersion, error) ExecInContainer(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error PortForward(pod *kubecontainer.Pod, port uint16, stream io.ReadWriteCloser) error } diff --git a/pkg/kubelet/dockertools/docker_test.go b/pkg/kubelet/dockertools/docker_test.go index 5771d42b346..2b57a5820a5 100644 --- a/pkg/kubelet/dockertools/docker_test.go +++ b/pkg/kubelet/dockertools/docker_test.go @@ -130,26 +130,20 @@ func TestContainerManifestNaming(t *testing.T) { } func TestGetDockerServerVersion(t *testing.T) { - fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Client version=1.2", "Server version=1.1.3", "Server API version=1.15"}} + fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}} runner := dockerContainerCommandRunner{fakeDocker} version, err := runner.GetDockerServerVersion() if err != nil { t.Errorf("got error while getting docker server version - %s", err) } - expectedVersion := []uint{1, 15} - if len(expectedVersion) != len(version) { - t.Errorf("invalid docker server version. expected: %v, got: %v", expectedVersion, version) - } else { - for idx, val := range expectedVersion { - if version[idx] != val { - t.Errorf("invalid docker server version. expected: %v, got: %v", expectedVersion, version) - } - } + expectedVersion, _ := docker.NewAPIVersion("1.15") + if e, a := expectedVersion.String(), version.String(); e != a { + t.Errorf("invalid docker server version. expected: %v, got: %v", e, a) } } func TestExecSupportExists(t *testing.T) { - fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Client version=1.2", "Server version=1.3.0", "Server API version=1.15"}} + fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.3.0", "ApiVersion=1.15"}} runner := dockerContainerCommandRunner{fakeDocker} useNativeExec, err := runner.nativeExecSupportExists() if err != nil { @@ -161,7 +155,7 @@ func TestExecSupportExists(t *testing.T) { } func TestExecSupportNotExists(t *testing.T) { - fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Client version=1.2", "Server version=1.1.2", "Server API version=1.14"}} + fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.2", "ApiVersion=1.14"}} runner := dockerContainerCommandRunner{fakeDocker} useNativeExec, _ := runner.nativeExecSupportExists() if useNativeExec { diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index 3bbd59c0f25..4e8b15e1a28 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -87,26 +87,26 @@ type stringCache struct { cache *lru.Cache } -func (self *stringCache) composeKey(uid types.UID, name string) string { +func (sc *stringCache) composeKey(uid types.UID, name string) string { return fmt.Sprintf("%s_%s", uid, name) } -func (self *stringCache) Add(uid types.UID, name string, value string) { - self.lock.Lock() - defer self.lock.Unlock() - self.cache.Add(self.composeKey(uid, name), value) +func (sc *stringCache) Add(uid types.UID, name string, value string) { + sc.lock.Lock() + defer sc.lock.Unlock() + sc.cache.Add(sc.composeKey(uid, name), value) } -func (self *stringCache) Remove(uid types.UID, name string) { - self.lock.Lock() - defer self.lock.Unlock() - self.cache.Remove(self.composeKey(uid, name)) +func (sc *stringCache) Remove(uid types.UID, name string) { + sc.lock.Lock() + defer sc.lock.Unlock() + sc.cache.Remove(sc.composeKey(uid, name)) } -func (self *stringCache) Get(uid types.UID, name string) (string, bool) { - self.lock.RLock() - defer self.lock.RUnlock() - value, ok := self.cache.Get(self.composeKey(uid, name)) +func (sc *stringCache) Get(uid types.UID, name string) (string, bool) { + sc.lock.RLock() + defer sc.lock.RUnlock() + value, ok := sc.cache.Get(sc.composeKey(uid, name)) if ok { return value.(string), ok } else { @@ -119,7 +119,7 @@ func (self *stringCache) Get(uid types.UID, name string) (string, bool) { // stream the log. Set |follow| to false and specify the number of lines (e.g. // "100" or "all") to tail the log. // TODO: Make 'RawTerminal' option flagable. -func (self *DockerManager) GetKubeletDockerContainerLogs(containerID, tail string, follow bool, stdout, stderr io.Writer) (err error) { +func (dm *DockerManager) GetKubeletDockerContainerLogs(containerID, tail string, follow bool, stdout, stderr io.Writer) (err error) { opts := docker.LogsOptions{ Container: containerID, Stdout: true, @@ -135,7 +135,7 @@ func (self *DockerManager) GetKubeletDockerContainerLogs(containerID, tail strin opts.Tail = tail } - err = self.client.Logs(opts) + err = dm.client.Logs(opts) return } @@ -157,10 +157,10 @@ type containerStatusResult struct { err error } -func (self *DockerManager) inspectContainer(dockerID, containerName, tPath string) *containerStatusResult { +func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string) *containerStatusResult { result := containerStatusResult{api.ContainerStatus{}, "", nil} - inspectResult, err := self.client.InspectContainer(dockerID) + inspectResult, err := dm.client.InspectContainer(dockerID) if err != nil { result.err = err @@ -226,7 +226,7 @@ func (self *DockerManager) inspectContainer(dockerID, containerName, tPath strin // GetPodStatus returns docker related status for all containers in the pod as // well as the infrastructure container. -func (self *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { +func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { podFullName := kubecontainer.GetPodFullName(pod) uid := pod.UID manifest := pod.Spec @@ -249,7 +249,7 @@ func (self *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { } expectedContainers[PodInfraContainerName] = api.Container{} - containers, err := self.client.ListContainers(docker.ListContainersOptions{All: true}) + containers, err := dm.client.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { return nil, err } @@ -284,7 +284,7 @@ func (self *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { var terminationState *api.ContainerState = nil // Inspect the container. - result := self.inspectContainer(value.ID, dockerContainerName, terminationMessagePath) + result := dm.inspectContainer(value.ID, dockerContainerName, terminationMessagePath) if result.err != nil { return nil, result.err } else if result.status.State.Termination != nil { @@ -347,7 +347,7 @@ func (self *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { // record the pull failure and eliminate the image checking below. image := container.Image // TODO(dchen1107): docker/docker/issues/8365 to figure out if the image exists - _, err := self.client.InspectImage(image) + _, err := dm.client.InspectImage(image) if err == nil { containerStatus.State.Waiting = &api.ContainerStateWaiting{ Reason: fmt.Sprintf("Image: %s is ready, container is creating", image), @@ -364,7 +364,7 @@ func (self *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { for containerName, status := range statuses { if status.State.Waiting != nil { // For containers in the waiting state, fill in a specific reason if it is recorded. - if reason, ok := self.reasonCache.Get(uid, containerName); ok { + if reason, ok := dm.reasonCache.Get(uid, containerName); ok { status.State.Waiting.Reason = reason } } @@ -374,13 +374,13 @@ func (self *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { return &podStatus, nil } -func (self *DockerManager) GetRunningContainers(ids []string) ([]*docker.Container, error) { - result := []*docker.Container{} - if self.client == nil { +func (dm *DockerManager) GetRunningContainers(ids []string) ([]*docker.Container, error) { + var result []*docker.Container + if dm.client == nil { return nil, fmt.Errorf("unexpected nil docker client.") } for ix := range ids { - status, err := self.client.InspectContainer(ids[ix]) + status, err := dm.client.InspectContainer(ids[ix]) if err != nil { return nil, err } @@ -391,20 +391,20 @@ func (self *DockerManager) GetRunningContainers(ids []string) ([]*docker.Contain return result, nil } -func (self *DockerManager) RunContainer(pod *api.Pod, container *api.Container, opts *kubecontainer.RunContainerOptions) (string, error) { - dockerID, err := self.runContainer(pod, container, opts) +func (dm *DockerManager) RunContainer(pod *api.Pod, container *api.Container, opts *kubecontainer.RunContainerOptions) (string, error) { + dockerID, err := dm.runContainer(pod, container, opts) if err != nil { errString := err.Error() if errString != "" { - self.reasonCache.Add(pod.UID, container.Name, errString) + dm.reasonCache.Add(pod.UID, container.Name, errString) } else { - self.reasonCache.Remove(pod.UID, container.Name) + dm.reasonCache.Remove(pod.UID, container.Name) } } return dockerID, err } -func (self *DockerManager) runContainer(pod *api.Pod, container *api.Container, opts *kubecontainer.RunContainerOptions) (string, error) { +func (dm *DockerManager) runContainer(pod *api.Pod, container *api.Container, opts *kubecontainer.RunContainerOptions) (string, error) { ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err) @@ -441,16 +441,16 @@ func (self *DockerManager) runContainer(pod *api.Pod, container *api.Container, glog.V(3).Infof("Container %v/%v/%v: setting entrypoint \"%v\" and command \"%v\"", pod.Namespace, pod.Name, container.Name, dockerOpts.Config.Entrypoint, dockerOpts.Config.Cmd) - dockerContainer, err := self.client.CreateContainer(dockerOpts) + dockerContainer, err := dm.client.CreateContainer(dockerOpts) if err != nil { if ref != nil { - self.recorder.Eventf(ref, "failed", "Failed to create docker container with error: %v", err) + dm.recorder.Eventf(ref, "failed", "Failed to create docker container with error: %v", err) } return "", err } if ref != nil { - self.recorder.Eventf(ref, "created", "Created with docker id %v", dockerContainer.ID) + dm.recorder.Eventf(ref, "created", "Created with docker id %v", dockerContainer.ID) } // The reason we create and mount the log file in here (not in kubelet) is because @@ -495,15 +495,15 @@ func (self *DockerManager) runContainer(pod *api.Pod, container *api.Container, hc.DNSSearch = opts.DNSSearch } - if err = self.client.StartContainer(dockerContainer.ID, hc); err != nil { + if err = dm.client.StartContainer(dockerContainer.ID, hc); err != nil { if ref != nil { - self.recorder.Eventf(ref, "failed", + dm.recorder.Eventf(ref, "failed", "Failed to start with docker id %v with error: %v", dockerContainer.ID, err) } return "", err } if ref != nil { - self.recorder.Eventf(ref, "started", "Started with docker id %v", dockerContainer.ID) + dm.recorder.Eventf(ref, "started", "Started with docker id %v", dockerContainer.ID) } return dockerContainer.ID, nil } @@ -565,11 +565,11 @@ func makeCapabilites(capAdd []api.CapabilityType, capDrop []api.CapabilityType) return addCaps, dropCaps } -func (self *DockerManager) GetPods(all bool) ([]*kubecontainer.Pod, error) { +func (dm *DockerManager) GetPods(all bool) ([]*kubecontainer.Pod, error) { pods := make(map[types.UID]*kubecontainer.Pod) var result []*kubecontainer.Pod - containers, err := GetKubeletDockerContainers(self.client, all) + containers, err := GetKubeletDockerContainers(dm.client, all) if err != nil { return nil, err } @@ -614,20 +614,20 @@ func (self *DockerManager) GetPods(all bool) ([]*kubecontainer.Pod, error) { return result, nil } -func (self *DockerManager) Pull(image string) error { - return self.Puller.Pull(image) +func (dm *DockerManager) Pull(image string) error { + return dm.Puller.Pull(image) } -func (self *DockerManager) IsImagePresent(image string) (bool, error) { - return self.Puller.IsImagePresent(image) +func (dm *DockerManager) IsImagePresent(image string) (bool, error) { + return dm.Puller.IsImagePresent(image) } // PodInfraContainer returns true if the pod infra container has changed. -func (self *DockerManager) PodInfraContainerChanged(pod *api.Pod, podInfraContainer *kubecontainer.Container) (bool, error) { +func (dm *DockerManager) PodInfraContainerChanged(pod *api.Pod, podInfraContainer *kubecontainer.Container) (bool, error) { networkMode := "" var ports []api.ContainerPort - dockerPodInfraContainer, err := self.client.InspectContainer(string(podInfraContainer.ID)) + dockerPodInfraContainer, err := dm.client.InspectContainer(string(podInfraContainer.ID)) if err != nil { return false, err } @@ -650,7 +650,7 @@ func (self *DockerManager) PodInfraContainerChanged(pod *api.Pod, podInfraContai } expectedPodInfraContainer := &api.Container{ Name: PodInfraContainerName, - Image: self.PodInfraContainerImage, + Image: dm.PodInfraContainerImage, Ports: ports, } return podInfraContainer.Hash != HashContainer(expectedPodInfraContainer), nil diff --git a/pkg/kubelet/handlers.go b/pkg/kubelet/handlers.go index 0f37709e70a..44b9a5045d4 100644 --- a/pkg/kubelet/handlers.go +++ b/pkg/kubelet/handlers.go @@ -22,32 +22,52 @@ import ( "strconv" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/types" + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" ) -type execActionHandler struct { - kubelet *Kubelet +type HandlerRunner interface { + Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error } -func (e *execActionHandler) Run(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error { - _, err := e.kubelet.RunInContainer(podFullName, uid, container.Name, handler.Exec.Command) - return err +type handlerRunner struct { + httpGetter httpGetter + commandRunner dockertools.ContainerCommandRunner + containerManager *dockertools.DockerManager } -type httpActionHandler struct { - kubelet *Kubelet - client httpGetter +// TODO(yifan): Merge commandRunner and containerManager once containerManager implements the ContainerCommandRunner interface. +func NewHandlerRunner(httpGetter httpGetter, commandRunner dockertools.ContainerCommandRunner, containerManager *dockertools.DockerManager) *handlerRunner { + return &handlerRunner{ + httpGetter: httpGetter, + commandRunner: commandRunner, + containerManager: containerManager, + } } -// ResolvePort attempts to turn a IntOrString port reference into a concrete port number. +// TODO(yifan): Use a strong type for containerID. +func (hr *handlerRunner) Run(containerID string, pod *api.Pod, container *api.Container, handler *api.Handler) error { + switch { + case handler.Exec != nil: + _, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command) + return err + case handler.HTTPGet != nil: + return hr.runHTTPHandler(pod, container, handler) + default: + err := fmt.Errorf("Invalid handler: %v", handler) + glog.Errorf("Cannot run handler: %v", err) + return err + } +} + +// resolvePort attempts to turn a IntOrString port reference into a concrete port number. // If portReference has an int value, it is treated as a literal, and simply returns that value. // If portReference is a string, an attempt is first made to parse it as an integer. If that fails, // an attempt is made to find a port with the same name in the container spec. // If a port with the same name is found, it's ContainerPort value is returned. If no matching // port is found, an error is returned. -func ResolvePort(portReference util.IntOrString, container *api.Container) (int, error) { +func resolvePort(portReference util.IntOrString, container *api.Container) (int, error) { if portReference.Kind == util.IntstrInt { return portReference.IntVal, nil } else { @@ -66,10 +86,10 @@ func ResolvePort(portReference util.IntOrString, container *api.Container) (int, return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) } -func (h *httpActionHandler) Run(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error { +func (hr *handlerRunner) runHTTPHandler(pod *api.Pod, container *api.Container, handler *api.Handler) error { host := handler.HTTPGet.Host if len(host) == 0 { - status, err := h.kubelet.GetPodStatus(podFullName) + status, err := hr.containerManager.GetPodStatus(pod) if err != nil { glog.Errorf("Unable to get pod info, event handlers may be invalid.") return err @@ -84,12 +104,12 @@ func (h *httpActionHandler) Run(podFullName string, uid types.UID, container *ap port = 80 } else { var err error - port, err = ResolvePort(handler.HTTPGet.Port, container) + port, err = resolvePort(handler.HTTPGet.Port, container) if err != nil { return err } } url := fmt.Sprintf("http://%s/%s", net.JoinHostPort(host, strconv.Itoa(port)), handler.HTTPGet.Path) - _, err := h.client.Get(url) + _, err := hr.httpGetter.Get(url) return err } diff --git a/pkg/kubelet/handlers_test.go b/pkg/kubelet/handlers_test.go index 8f4525a4e7c..87d3e551a12 100644 --- a/pkg/kubelet/handlers_test.go +++ b/pkg/kubelet/handlers_test.go @@ -25,7 +25,7 @@ import ( func TestResolvePortInt(t *testing.T) { expected := 80 - port, err := ResolvePort(util.IntOrString{Kind: util.IntstrInt, IntVal: expected}, &api.Container{}) + port, err := resolvePort(util.IntOrString{Kind: util.IntstrInt, IntVal: expected}, &api.Container{}) if port != expected { t.Errorf("expected: %d, saw: %d", expected, port) } @@ -42,7 +42,7 @@ func TestResolvePortString(t *testing.T) { {Name: name, ContainerPort: expected}, }, } - port, err := ResolvePort(util.IntOrString{Kind: util.IntstrString, StrVal: name}, container) + port, err := resolvePort(util.IntOrString{Kind: util.IntstrString, StrVal: name}, container) if port != expected { t.Errorf("expected: %d, saw: %d", expected, port) } @@ -59,7 +59,7 @@ func TestResolvePortStringUnknown(t *testing.T) { {Name: "bar", ContainerPort: expected}, }, } - port, err := ResolvePort(util.IntOrString{Kind: util.IntstrString, StrVal: name}, container) + port, err := resolvePort(util.IntOrString{Kind: util.IntstrString, StrVal: name}, container) if port != -1 { t.Errorf("expected: -1, saw: %d", port) } diff --git a/pkg/kubelet/image_manager.go b/pkg/kubelet/image_manager.go index a62cc0a5529..c37c7f6f3c7 100644 --- a/pkg/kubelet/image_manager.go +++ b/pkg/kubelet/image_manager.go @@ -112,16 +112,16 @@ func newImageManager(dockerClient dockertools.DockerInterface, cadvisorInterface return im, nil } -func (self *realImageManager) start() error { +func (im *realImageManager) start() error { // Initial detection make detected time "unknown" in the past. var zero time.Time - err := self.detectImages(zero) + err := im.detectImages(zero) if err != nil { return err } go util.Forever(func() { - err := self.detectImages(time.Now()) + err := im.detectImages(time.Now()) if err != nil { glog.Warningf("[ImageManager] Failed to monitor images: %v", err) } @@ -130,12 +130,12 @@ func (self *realImageManager) start() error { return nil } -func (self *realImageManager) detectImages(detected time.Time) error { - images, err := self.dockerClient.ListImages(docker.ListImagesOptions{}) +func (im *realImageManager) detectImages(detected time.Time) error { + images, err := im.dockerClient.ListImages(docker.ListImagesOptions{}) if err != nil { return err } - containers, err := self.dockerClient.ListContainers(docker.ListContainersOptions{ + containers, err := im.dockerClient.ListContainers(docker.ListContainersOptions{ All: true, }) if err != nil { @@ -151,39 +151,39 @@ func (self *realImageManager) detectImages(detected time.Time) error { // Add new images and record those being used. now := time.Now() currentImages := util.NewStringSet() - self.imageRecordsLock.Lock() - defer self.imageRecordsLock.Unlock() + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() for _, image := range images { currentImages.Insert(image.ID) // New image, set it as detected now. - if _, ok := self.imageRecords[image.ID]; !ok { - self.imageRecords[image.ID] = &imageRecord{ + if _, ok := im.imageRecords[image.ID]; !ok { + im.imageRecords[image.ID] = &imageRecord{ detected: detected, } } // Set last used time to now if the image is being used. if isImageUsed(&image, imagesInUse) { - self.imageRecords[image.ID].lastUsed = now + im.imageRecords[image.ID].lastUsed = now } - self.imageRecords[image.ID].size = image.VirtualSize + im.imageRecords[image.ID].size = image.VirtualSize } // Remove old images from our records. - for image := range self.imageRecords { + for image := range im.imageRecords { if !currentImages.Has(image) { - delete(self.imageRecords, image) + delete(im.imageRecords, image) } } return nil } -func (self *realImageManager) GarbageCollect() error { +func (im *realImageManager) GarbageCollect() error { // Get disk usage on disk holding images. - fsInfo, err := self.cadvisor.DockerImagesFsInfo() + fsInfo, err := im.cadvisor.DockerImagesFsInfo() if err != nil { return err } @@ -193,23 +193,23 @@ func (self *realImageManager) GarbageCollect() error { // Check valid capacity. if capacity == 0 { err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint) - self.recorder.Eventf(self.nodeRef, "invalidDiskCapacity", err.Error()) + im.recorder.Eventf(im.nodeRef, "invalidDiskCapacity", err.Error()) return err } // If over the max threshold, free enough to place us at the lower threshold. usagePercent := int(usage * 100 / capacity) - if usagePercent >= self.policy.HighThresholdPercent { - amountToFree := usage - (int64(self.policy.LowThresholdPercent) * capacity / 100) - glog.Infof("[ImageManager]: Disk usage on %q (%s) is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", fsInfo.Device, fsInfo.Mountpoint, usagePercent, self.policy.HighThresholdPercent, amountToFree) - freed, err := self.freeSpace(amountToFree) + if usagePercent >= im.policy.HighThresholdPercent { + amountToFree := usage - (int64(im.policy.LowThresholdPercent) * capacity / 100) + glog.Infof("[ImageManager]: Disk usage on %q (%s) is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", fsInfo.Device, fsInfo.Mountpoint, usagePercent, im.policy.HighThresholdPercent, amountToFree) + freed, err := im.freeSpace(amountToFree) if err != nil { return err } if freed < amountToFree { err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed) - self.recorder.Eventf(self.nodeRef, "freeDiskSpaceFailed", err.Error()) + im.recorder.Eventf(im.nodeRef, "freeDiskSpaceFailed", err.Error()) return err } } @@ -223,19 +223,19 @@ func (self *realImageManager) GarbageCollect() error { // bytes freed is always returned. // Note that error may be nil and the number of bytes free may be less // than bytesToFree. -func (self *realImageManager) freeSpace(bytesToFree int64) (int64, error) { +func (im *realImageManager) freeSpace(bytesToFree int64) (int64, error) { startTime := time.Now() - err := self.detectImages(startTime) + err := im.detectImages(startTime) if err != nil { return 0, err } - self.imageRecordsLock.Lock() - defer self.imageRecordsLock.Unlock() + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() // Get all images in eviction order. - images := make([]evictionInfo, 0, len(self.imageRecords)) - for image, record := range self.imageRecords { + images := make([]evictionInfo, 0, len(im.imageRecords)) + for image, record := range im.imageRecords { images = append(images, evictionInfo{ id: image, imageRecord: *record, @@ -254,12 +254,12 @@ func (self *realImageManager) freeSpace(bytesToFree int64) (int64, error) { // Remove image. Continue despite errors. glog.Infof("[ImageManager]: Removing image %q to free %d bytes", image.id, image.size) - err := self.dockerClient.RemoveImage(image.id) + err := im.dockerClient.RemoveImage(image.id) if err != nil { lastErr = err continue } - delete(self.imageRecords, image.id) + delete(im.imageRecords, image.id) spaceFreed += image.size if spaceFreed >= bytesToFree { @@ -277,14 +277,14 @@ type evictionInfo struct { type byLastUsedAndDetected []evictionInfo -func (self byLastUsedAndDetected) Len() int { return len(self) } -func (self byLastUsedAndDetected) Swap(i, j int) { self[i], self[j] = self[j], self[i] } -func (self byLastUsedAndDetected) Less(i, j int) bool { +func (ev byLastUsedAndDetected) Len() int { return len(ev) } +func (ev byLastUsedAndDetected) Swap(i, j int) { ev[i], ev[j] = ev[j], ev[i] } +func (ev byLastUsedAndDetected) Less(i, j int) bool { // Sort by last used, break ties by detected. - if self[i].lastUsed.Equal(self[j].lastUsed) { - return self[i].detected.Before(self[j].detected) + if ev[i].lastUsed.Equal(ev[j].lastUsed) { + return ev[i].detected.Before(ev[j].detected) } else { - return self[i].lastUsed.Before(self[j].lastUsed) + return ev[i].lastUsed.Before(ev[j].lastUsed) } } diff --git a/pkg/kubelet/image_manager_test.go b/pkg/kubelet/image_manager_test.go index 26e7d424418..95adc452d7e 100644 --- a/pkg/kubelet/image_manager_test.go +++ b/pkg/kubelet/image_manager_test.go @@ -48,15 +48,15 @@ func newRealImageManager(policy ImageGCPolicy) (*realImageManager, *dockertools. } // Accessors used for thread-safe testing. -func (self *realImageManager) imageRecordsLen() int { - self.imageRecordsLock.Lock() - defer self.imageRecordsLock.Unlock() - return len(self.imageRecords) +func (im *realImageManager) imageRecordsLen() int { + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() + return len(im.imageRecords) } -func (self *realImageManager) getImageRecord(name string) (*imageRecord, bool) { - self.imageRecordsLock.Lock() - defer self.imageRecordsLock.Unlock() - v, ok := self.imageRecords[name] +func (im *realImageManager) getImageRecord(name string) (*imageRecord, bool) { + im.imageRecordsLock.Lock() + defer im.imageRecordsLock.Unlock() + v, ok := im.imageRecords[name] vCopy := *v return &vCopy, ok } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index d3269151547..c58189628ab 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -91,7 +91,7 @@ type SyncHandler interface { // Syncs current state to match the specified pods. SyncPodType specified what // type of sync is occuring per pod. StartTime specifies the time at which // syncing began (for use in monitoring). - SyncPods(pods []api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, mirrorPods map[string]api.Pod, + SyncPods(pods []*api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, mirrorPods map[string]*api.Pod, startTime time.Time) error } @@ -203,22 +203,21 @@ func NewMainKubelet( containerManager := dockertools.NewDockerManager(dockerClient, recorder, podInfraContainerImage, pullQPS, pullBurst) klet := &Kubelet{ - hostname: hostname, - dockerClient: dockerClient, - kubeClient: kubeClient, - rootDirectory: rootDirectory, - resyncInterval: resyncInterval, - containerRefManager: kubecontainer.NewRefManager(), - readinessManager: kubecontainer.NewReadinessManager(), - runner: dockertools.NewDockerContainerCommandRunner(dockerClient), - httpClient: &http.Client{}, - sourcesReady: sourcesReady, - clusterDomain: clusterDomain, - clusterDNS: clusterDNS, - serviceLister: serviceLister, - nodeLister: nodeLister, - masterServiceNamespace: masterServiceNamespace, - prober: newProbeHolder(), + hostname: hostname, + dockerClient: dockerClient, + kubeClient: kubeClient, + rootDirectory: rootDirectory, + resyncInterval: resyncInterval, + containerRefManager: kubecontainer.NewRefManager(), + readinessManager: kubecontainer.NewReadinessManager(), + runner: dockertools.NewDockerContainerCommandRunner(dockerClient), + httpClient: &http.Client{}, + sourcesReady: sourcesReady, + clusterDomain: clusterDomain, + clusterDNS: clusterDNS, + serviceLister: serviceLister, + nodeLister: nodeLister, + masterServiceNamespace: masterServiceNamespace, streamingConnectionIdleTimeout: streamingConnectionIdleTimeout, recorder: recorder, cadvisor: cadvisorInterface, @@ -233,6 +232,8 @@ func NewMainKubelet( } klet.podManager = newBasicPodManager(klet.kubeClient) + klet.prober = NewProber(klet.runner, klet.readinessManager, klet.containerRefManager, klet.recorder) + klet.handlerRunner = NewHandlerRunner(klet.httpClient, klet.runner, klet.containerManager) runtimeCache, err := kubecontainer.NewRuntimeCache(containerManager) if err != nil { @@ -312,11 +313,15 @@ type Kubelet struct { // Volume plugins. volumePluginMgr volume.VolumePluginMgr - // Network plugin + // Network plugin. networkPlugin network.NetworkPlugin - // Probe runner holder - prober probeHolder + // Healthy check prober. + prober *Prober + + // Container lifecycle handler runner. + handlerRunner HandlerRunner + // Container readiness state manager. readinessManager *kubecontainer.ReadinessManager @@ -597,31 +602,6 @@ func makeBinds(container *api.Container, podVolumes volumeMap) []string { return binds } -// A basic interface that knows how to execute handlers -type actionHandler interface { - Run(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error -} - -func (kl *Kubelet) newActionHandler(handler *api.Handler) actionHandler { - switch { - case handler.Exec != nil: - return &execActionHandler{kubelet: kl} - case handler.HTTPGet != nil: - return &httpActionHandler{client: kl.httpClient, kubelet: kl} - default: - glog.Errorf("Invalid handler: %v", handler) - return nil - } -} - -func (kl *Kubelet) runHandler(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error { - actionHandler := kl.newActionHandler(handler) - if actionHandler == nil { - return fmt.Errorf("invalid handler") - } - return actionHandler.Run(podFullName, uid, container, handler) -} - // generateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. func (kl *Kubelet) generateRunContainerOptions(pod *api.Pod, container *api.Container, podVolumes volumeMap, netMode, ipcMode string) (*kubecontainer.RunContainerOptions, error) { @@ -677,7 +657,7 @@ func (kl *Kubelet) runContainer(pod *api.Pod, container *api.Container, podVolum } if container.Lifecycle != nil && container.Lifecycle.PostStart != nil { - handlerErr := kl.runHandler(kubecontainer.GetPodFullName(pod), pod.UID, container, container.Lifecycle.PostStart) + handlerErr := kl.handlerRunner.Run(id, pod, container, container.Lifecycle.PostStart) if handlerErr != nil { kl.killContainerByID(id) return dockertools.DockerID(""), fmt.Errorf("failed to call event handler: %v", handlerErr) @@ -937,17 +917,27 @@ func (kl *Kubelet) pullImage(img string, ref *api.ObjectReference) error { } // Kill all running containers in a pod (includes the pod infra container). -func (kl *Kubelet) killPod(runningPod kubecontainer.Pod) error { +func (kl *Kubelet) killPod(pod kubecontainer.Pod) error { // Send the kills in parallel since they may take a long time. - errs := make(chan error, len(runningPod.Containers)) + errs := make(chan error, len(pod.Containers)) wg := sync.WaitGroup{} - for _, container := range runningPod.Containers { + for _, container := range pod.Containers { wg.Add(1) go func(container *kubecontainer.Container) { defer util.HandleCrash() + // Call the networking plugin for teardown. + // TODO: Handle this without signaling the pod infra container to + // adapt to the generic container runtime. + if container.Name == dockertools.PodInfraContainerName { + err := kl.networkPlugin.TearDownPod(pod.Namespace, pod.Name, dockertools.DockerID(container.ID)) + if err != nil { + glog.Errorf("Failed tearing down the infra container: %v", err) + errs <- err + } + } err := kl.killContainer(container) if err != nil { - glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, runningPod.ID) + glog.Errorf("Failed to delete container: %v; Skipping pod %q", err, pod.ID) errs <- err } wg.Done() @@ -982,7 +972,7 @@ func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error { return nil } -func (kl *Kubelet) shouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *api.PodStatus) bool { +func shouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *api.PodStatus, readinessManager *kubecontainer.ReadinessManager) bool { podFullName := kubecontainer.GetPodFullName(pod) // Get all dead container status. @@ -996,20 +986,20 @@ func (kl *Kubelet) shouldContainerBeRestarted(container *api.Container, pod *api // Set dead containers to unready state. for _, c := range resultStatus { // TODO(yifan): Unify the format of container ID. (i.e. including docker:// as prefix). - kl.readinessManager.RemoveReadiness(strings.TrimPrefix(c.ContainerID, dockertools.DockerPrefix)) + readinessManager.RemoveReadiness(strings.TrimPrefix(c.ContainerID, dockertools.DockerPrefix)) } // Check RestartPolicy for dead container. if len(resultStatus) > 0 { if pod.Spec.RestartPolicy == api.RestartPolicyNever { - glog.Infof("Already ran container %q of pod %q, do nothing", container.Name, podFullName) + glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, podFullName) return false } if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure { // Check the exit code of last run. Note: This assumes the result is sorted // by the created time in reverse order. if resultStatus[0].State.Termination.ExitCode == 0 { - glog.Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, podFullName) + glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, podFullName) return false } } @@ -1124,7 +1114,7 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubeconta c := runningPod.FindContainerByName(container.Name) if c == nil { - if kl.shouldContainerBeRestarted(&container, pod, &podStatus) { + if shouldContainerBeRestarted(&container, pod, &podStatus, kl.readinessManager) { // If we are here it means that the container is dead and should be restarted, or never existed and should // be created. We may be inserting this ID again if the container has changed and it has // RestartPolicy::Always, but it's not a big deal. @@ -1159,7 +1149,7 @@ func (kl *Kubelet) computePodContainerChanges(pod *api.Pod, runningPod kubeconta continue } - result, err := kl.probeContainer(pod, podStatus, container, string(c.ID), c.Created) + result, err := kl.prober.Probe(pod, podStatus, container, string(c.ID), c.Created) if err != nil { // TODO(vmarmol): examine this logic. glog.V(2).Infof("probe no-error: %q", container.Name) @@ -1264,7 +1254,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont if err != nil { glog.Errorf("Couldn't make a ref to pod %q: '%v'", podFullName, err) } - glog.Infof("Creating pod infra container for %q", podFullName) + glog.V(4).Infof("Creating pod infra container for %q", podFullName) podInfraContainerID, err = kl.createPodInfraContainer(pod) // Call the networking plugin @@ -1305,7 +1295,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont } if mirrorPod == nil { glog.V(3).Infof("Creating a mirror pod %q", podFullName) - if err := kl.podManager.CreateMirrorPod(*pod); err != nil { + if err := kl.podManager.CreateMirrorPod(pod); err != nil { glog.Errorf("Failed creating a mirror pod %q: %v", podFullName, err) } // Pod status update is edge-triggered. If there is any update of the @@ -1319,7 +1309,7 @@ func (kl *Kubelet) syncPod(pod *api.Pod, mirrorPod *api.Pod, runningPod kubecont // Stores all volumes defined by the set of pods into a map. // Keys for each entry are in the format (POD_ID)/(VOLUME_NAME) -func getDesiredVolumes(pods []api.Pod) map[string]api.Volume { +func getDesiredVolumes(pods []*api.Pod) map[string]api.Volume { desiredVolumes := make(map[string]api.Volume) for _, pod := range pods { for _, volume := range pod.Spec.Volumes { @@ -1330,10 +1320,10 @@ func getDesiredVolumes(pods []api.Pod) map[string]api.Volume { return desiredVolumes } -func (kl *Kubelet) cleanupOrphanedPods(pods []api.Pod) error { +func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod) error { desired := util.NewStringSet() - for i := range pods { - desired.Insert(string(pods[i].UID)) + for _, pod := range pods { + desired.Insert(string(pod.UID)) } found, err := kl.listPodsFromDisk() if err != nil { @@ -1353,7 +1343,7 @@ func (kl *Kubelet) cleanupOrphanedPods(pods []api.Pod) error { // Compares the map of current volumes to the map of desired volumes. // If an active volume does not have a respective desired volume, clean it up. -func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.Pod, running []*docker.Container) error { +func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, running []*docker.Container) error { desiredVolumes := getDesiredVolumes(pods) currentVolumes := kl.getPodVolumesFromDisk() runningSet := util.StringSet{} @@ -1388,8 +1378,8 @@ func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.Pod, running []*docker.Cont } // SyncPods synchronizes the configured list of pods (desired state) with the host current state. -func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, - mirrorPods map[string]api.Pod, start time.Time) error { +func (kl *Kubelet) SyncPods(allPods []*api.Pod, podSyncTypes map[types.UID]metrics.SyncPodType, + mirrorPods map[string]*api.Pod, start time.Time) error { defer func() { metrics.SyncPodsLatency.Observe(metrics.SinceInMicroseconds(start)) }() @@ -1397,15 +1387,15 @@ func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metric // Remove obsolete entries in podStatus where the pod is no longer considered bound to this node. podFullNames := make(map[string]bool) for _, pod := range allPods { - podFullNames[kubecontainer.GetPodFullName(&pod)] = true + podFullNames[kubecontainer.GetPodFullName(pod)] = true } kl.statusManager.RemoveOrphanedStatuses(podFullNames) // Filter out the rejected pod. They don't have running containers. kl.handleNotFittingPods(allPods) - var pods []api.Pod + var pods []*api.Pod for _, pod := range allPods { - status, ok := kl.statusManager.GetPodStatus(kubecontainer.GetPodFullName(&pod)) + status, ok := kl.statusManager.GetPodStatus(kubecontainer.GetPodFullName(pod)) if ok && status.Phase == api.PodFailed { continue } @@ -1423,18 +1413,13 @@ func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metric } // Check for any containers that need starting - for ix := range pods { - pod := &pods[ix] + for _, pod := range pods { podFullName := kubecontainer.GetPodFullName(pod) uid := pod.UID desiredPods[uid] = empty{} // Run the sync in an async manifest worker. - var mirrorPod *api.Pod = nil - if m, ok := mirrorPods[podFullName]; ok { - mirrorPod = &m - } - kl.podWorkers.UpdatePod(pod, mirrorPod, func() { + kl.podWorkers.UpdatePod(pod, mirrorPods[podFullName], func() { metrics.SyncPodLatency.WithLabelValues(podSyncTypes[pod.UID].String()).Observe(metrics.SinceInMicroseconds(start)) }) @@ -1453,48 +1438,25 @@ func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metric return nil } - // Kill any containers we don't need. - killed := []string{} - for _, pod := range runningPods { - if _, found := desiredPods[pod.ID]; found { - // syncPod() will handle this one. - continue - } - - // Kill all the containers in the unidentified pod. - for _, c := range pod.Containers { - // call the networking plugin for teardown - if c.Name == dockertools.PodInfraContainerName { - err := kl.networkPlugin.TearDownPod(pod.Namespace, pod.Name, dockertools.DockerID(c.ID)) - if err != nil { - glog.Errorf("Network plugin pre-delete method returned an error: %v", err) - } - } - glog.V(1).Infof("Killing unwanted container %+v", c) - err = kl.killContainer(c) - if err != nil { - glog.Errorf("Error killing container %+v: %v", c, err) - } else { - killed = append(killed, string(c.ID)) - } - } - } - - running, err := kl.containerManager.GetRunningContainers(killed) + // Kill containers associated with unwanted pods and get a list of + // unwanted containers that are still running. + running, err := kl.killUnwantedPods(desiredPods, runningPods) if err != nil { - glog.Errorf("Failed to poll container state: %v", err) + glog.Errorf("Failed killing unwanted containers: %v", err) return err } // Remove any orphaned volumes. err = kl.cleanupOrphanedVolumes(pods, running) if err != nil { + glog.Errorf("Failed cleaning up orphaned volumes: %v", err) return err } - // Remove any orphaned pods. - err = kl.cleanupOrphanedPods(pods) + // Remove any orphaned pod directories. + err = kl.cleanupOrphanedPodDirs(pods) if err != nil { + glog.Errorf("Failed cleaning up orphaned pod directories: %v", err) return err } @@ -1504,7 +1466,68 @@ func (kl *Kubelet) SyncPods(allPods []api.Pod, podSyncTypes map[types.UID]metric return err } -type podsByCreationTime []api.Pod +// killUnwantedPods kills the unwanted, running pods in parallel, and returns +// containers in those pods that it failed to terminate. +func (kl *Kubelet) killUnwantedPods(desiredPods map[types.UID]empty, + runningPods []*kubecontainer.Pod) ([]*docker.Container, error) { + type result struct { + containers []*docker.Container + err error + } + ch := make(chan result, len(runningPods)) + defer close(ch) + numWorkers := 0 + for _, pod := range runningPods { + if _, found := desiredPods[pod.ID]; found { + // Per-pod workers will handle the desired pods. + continue + } + numWorkers++ + go func(pod *kubecontainer.Pod, ch chan result) { + defer func() { + // Send the IDs of the containers that we failed to killed. + containers, err := kl.getRunningContainersByPod(pod) + ch <- result{containers: containers, err: err} + }() + glog.V(1).Infof("Killing unwanted pod %q", pod.Name) + // Stop the containers. + err := kl.killPod(*pod) + if err != nil { + glog.Errorf("Failed killing the pod %q: %v", pod.Name, err) + return + } + // Remove the pod directory. + err = os.RemoveAll(kl.getPodDir(pod.ID)) + if err != nil { + glog.Errorf("Failed removing pod directory for %q", pod.Name) + return + } + }(pod, ch) + } + + // Aggregate results from the pod killing workers. + var errs []error + var running []*docker.Container + for i := 0; i < numWorkers; i++ { + m := <-ch + if m.err != nil { + errs = append(errs, m.err) + continue + } + running = append(running, m.containers...) + } + return running, utilErrors.NewAggregate(errs) +} + +func (kl *Kubelet) getRunningContainersByPod(pod *kubecontainer.Pod) ([]*docker.Container, error) { + containerIDs := make([]string, len(pod.Containers)) + for i, c := range pod.Containers { + containerIDs[i] = string(c.ID) + } + return kl.containerManager.GetRunningContainers(containerIDs) +} + +type podsByCreationTime []*api.Pod func (s podsByCreationTime) Len() int { return len(s) @@ -1519,31 +1542,30 @@ func (s podsByCreationTime) Less(i, j int) bool { } // checkHostPortConflicts detects pods with conflicted host ports. -func checkHostPortConflicts(pods []api.Pod) (fitting []api.Pod, notFitting []api.Pod) { +func checkHostPortConflicts(pods []*api.Pod) (fitting []*api.Pod, notFitting []*api.Pod) { ports := map[int]bool{} extract := func(p *api.ContainerPort) int { return p.HostPort } // Respect the pod creation order when resolving conflicts. sort.Sort(podsByCreationTime(pods)) - for i := range pods { - pod := &pods[i] + for _, pod := range pods { if errs := validation.AccumulateUniquePorts(pod.Spec.Containers, ports, extract); len(errs) != 0 { glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", kubecontainer.GetPodFullName(pod), errs) - notFitting = append(notFitting, *pod) + notFitting = append(notFitting, pod) continue } - fitting = append(fitting, *pod) + fitting = append(fitting, pod) } return } // checkCapacityExceeded detects pods that exceeds node's resources. -func (kl *Kubelet) checkCapacityExceeded(pods []api.Pod) (fitting []api.Pod, notFitting []api.Pod) { +func (kl *Kubelet) checkCapacityExceeded(pods []*api.Pod) (fitting []*api.Pod, notFitting []*api.Pod) { info, err := kl.GetCachedMachineInfo() if err != nil { glog.Errorf("error getting machine info: %v", err) - return pods, []api.Pod{} + return pods, nil } // Respect the pod creation order when resolving conflicts. @@ -1554,14 +1576,14 @@ func (kl *Kubelet) checkCapacityExceeded(pods []api.Pod) (fitting []api.Pod, not } // checkNodeSelectorMatching detects pods that do not match node's labels. -func (kl *Kubelet) checkNodeSelectorMatching(pods []api.Pod) (fitting []api.Pod, notFitting []api.Pod) { +func (kl *Kubelet) checkNodeSelectorMatching(pods []*api.Pod) (fitting []*api.Pod, notFitting []*api.Pod) { node, err := kl.GetNode() if err != nil { glog.Errorf("error getting node: %v", err) - return pods, []api.Pod{} + return pods, nil } for _, pod := range pods { - if !scheduler.PodMatchesNodeLabels(&pod, node) { + if !scheduler.PodMatchesNodeLabels(pod, node) { notFitting = append(notFitting, pod) continue } @@ -1572,25 +1594,25 @@ func (kl *Kubelet) checkNodeSelectorMatching(pods []api.Pod) (fitting []api.Pod, // handleNotfittingPods handles pods that do not fit on the node. // Currently conflicts on Port.HostPort values, matching node's labels and exceeding node's capacity are handled. -func (kl *Kubelet) handleNotFittingPods(pods []api.Pod) { +func (kl *Kubelet) handleNotFittingPods(pods []*api.Pod) { fitting, notFitting := checkHostPortConflicts(pods) for _, pod := range notFitting { - kl.recorder.Eventf(&pod, "hostPortConflict", "Cannot start the pod due to host port conflict.") - kl.statusManager.SetPodStatus(&pod, api.PodStatus{ + kl.recorder.Eventf(pod, "hostPortConflict", "Cannot start the pod due to host port conflict.") + kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, Message: "Pod cannot be started due to host port conflict"}) } fitting, notFitting = kl.checkNodeSelectorMatching(fitting) for _, pod := range notFitting { - kl.recorder.Eventf(&pod, "nodeSelectorMismatching", "Cannot start the pod due to node selector mismatch.") - kl.statusManager.SetPodStatus(&pod, api.PodStatus{ + kl.recorder.Eventf(pod, "nodeSelectorMismatching", "Cannot start the pod due to node selector mismatch.") + kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, Message: "Pod cannot be started due to node selector mismatch"}) } fitting, notFitting = kl.checkCapacityExceeded(fitting) for _, pod := range notFitting { - kl.recorder.Eventf(&pod, "capacityExceeded", "Cannot start the pod due to exceeded capacity.") - kl.statusManager.SetPodStatus(&pod, api.PodStatus{ + kl.recorder.Eventf(pod, "capacityExceeded", "Cannot start the pod due to exceeded capacity.") + kl.statusManager.SetPodStatus(pod, api.PodStatus{ Phase: api.PodFailed, Message: "Pod cannot be started due to exceeded capacity"}) } @@ -1607,7 +1629,11 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) { unsyncedPod := false podSyncTypes := make(map[types.UID]metrics.SyncPodType) select { - case u := <-updates: + case u, ok := <-updates: + if !ok { + glog.Errorf("Update channel is closed. Exiting the sync loop.") + return + } kl.podManager.UpdatePods(u, podSyncTypes) unsyncedPod = true case <-time.After(kl.resyncInterval): @@ -1625,7 +1651,6 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) { unsyncedPod = false } } - pods, mirrorPods := kl.podManager.GetPodsAndMirrorMap() if err := handler.SyncPods(pods, podSyncTypes, mirrorPods, start); err != nil { glog.Errorf("Couldn't sync containers: %v", err) @@ -1634,7 +1659,7 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) { } // Returns Docker version for this Kubelet. -func (kl *Kubelet) GetDockerVersion() ([]uint, error) { +func (kl *Kubelet) GetDockerVersion() (docker.APIVersion, error) { if kl.dockerClient == nil { return nil, fmt.Errorf("no Docker client") } @@ -1712,7 +1737,7 @@ func (kl *Kubelet) GetHostIP() (net.IP, error) { // GetPods returns all pods bound to the kubelet and their spec, and the mirror // pods. -func (kl *Kubelet) GetPods() []api.Pod { +func (kl *Kubelet) GetPods() []*api.Pod { return kl.podManager.GetPods() } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 7a56906a333..5fc8e740410 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -113,6 +113,9 @@ func newTestKubelet(t *testing.T) *TestKubelet { }, fakeRecorder) kubelet.containerManager.Puller = &dockertools.FakeDockerPuller{} + kubelet.prober = NewProber(nil, kubelet.readinessManager, kubelet.containerRefManager, kubelet.recorder) + kubelet.handlerRunner = NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, kubelet.containerManager) + return &TestKubelet{kubelet, fakeDocker, mockCadvisor, fakeKubeClient, waitGroup, fakeMirrorClient} } @@ -459,7 +462,7 @@ func TestSyncPodsDoesNothing(t *testing.T) { waitGroup := testKubelet.waitGroup container := api.Container{Name: "bar"} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -482,7 +485,7 @@ func TestSyncPodsDoesNothing(t *testing.T) { }, { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"}, ID: "9876", }, } @@ -501,7 +504,7 @@ func TestSyncPodsDoesNothing(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -527,7 +530,7 @@ func TestSyncPodsWithTerminationLog(t *testing.T) { TerminationMessagePath: "/dev/somepath", } fakeDocker.ContainerList = []docker.APIContainers{} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -543,7 +546,7 @@ func TestSyncPodsWithTerminationLog(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -586,7 +589,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) { waitGroup := testKubelet.waitGroup kubelet.containerManager.PodInfraContainerImage = "custom_image_name" fakeDocker.ContainerList = []docker.APIContainers{} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -602,7 +605,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -649,7 +652,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) { puller.HasImages = []string{} kubelet.containerManager.PodInfraContainerImage = "custom_image_name" fakeDocker.ContainerList = []docker.APIContainers{} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -665,7 +668,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) { } waitGroup.Add(1) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -702,7 +705,7 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) { kubelet := testKubelet.kubelet fakeDocker := testKubelet.fakeDocker waitGroup := testKubelet.waitGroup - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -719,7 +722,7 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) { fakeDocker.ContainerList = []docker.APIContainers{ { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"}, ID: "9876", }, } @@ -732,7 +735,7 @@ func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) { } waitGroup.Add(1) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -765,7 +768,8 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) { waitGroup := testKubelet.waitGroup fakeHttp := fakeHTTP{} kubelet.httpClient = &fakeHttp - pods := []api.Pod{ + kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -793,7 +797,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) { fakeDocker.ContainerList = []docker.APIContainers{ { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"}, ID: "9876", }, } @@ -806,7 +810,7 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) { } waitGroup.Add(1) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -841,7 +845,7 @@ func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) { fakeDocker := testKubelet.fakeDocker waitGroup := testKubelet.waitGroup - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -880,7 +884,7 @@ func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) { }, { // format is // k8s___ - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo2_new_87654321_0"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo2_new_87654321_0"}, ID: "8765", }, } @@ -904,7 +908,7 @@ func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) { waitGroup.Add(2) kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -966,7 +970,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { ID: "9876", }, } - if err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil { + if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()); err != nil { t.Errorf("unexpected error: %v", err) } // Validate nothing happened. @@ -974,7 +978,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { fakeDocker.ClearCalls() ready = true - if err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()); err != nil { + if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()); err != nil { t.Errorf("unexpected error: %v", err) } verifyCalls(t, fakeDocker, []string{"list", "stop", "stop", "inspect_container", "inspect_container"}) @@ -1013,7 +1017,7 @@ func TestSyncPodsDeletes(t *testing.T) { ID: "4567", }, } - err := kubelet.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1040,7 +1044,7 @@ func TestSyncPodsDeletesDuplicate(t *testing.T) { fakeDocker := testKubelet.fakeDocker waitGroup := testKubelet.waitGroup - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -1063,7 +1067,7 @@ func TestSyncPodsDeletesDuplicate(t *testing.T) { }, { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_bar_new_12345678_2222"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_bar_new_12345678_2222"}, ID: "9876", }, { @@ -1092,7 +1096,7 @@ func TestSyncPodsDeletesDuplicate(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1121,7 +1125,7 @@ func TestSyncPodsBadHash(t *testing.T) { fakeDocker := testKubelet.fakeDocker waitGroup := testKubelet.waitGroup - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -1144,7 +1148,7 @@ func TestSyncPodsBadHash(t *testing.T) { }, { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_42"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"}, ID: "9876", }, } @@ -1163,7 +1167,7 @@ func TestSyncPodsBadHash(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1192,7 +1196,7 @@ func TestSyncPodsUnhealthy(t *testing.T) { fakeDocker := testKubelet.fakeDocker waitGroup := testKubelet.waitGroup - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -1219,7 +1223,7 @@ func TestSyncPodsUnhealthy(t *testing.T) { }, { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_42"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"}, ID: "9876", }, } @@ -1237,7 +1241,7 @@ func TestSyncPodsUnhealthy(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1593,7 +1597,7 @@ func (f *fakeContainerCommandRunner) RunInContainer(id string, cmd []string) ([] return []byte{}, f.E } -func (f *fakeContainerCommandRunner) GetDockerServerVersion() ([]uint, error) { +func (f *fakeContainerCommandRunner) GetDockerServerVersion() (docker.APIVersion, error) { return nil, nil } @@ -1690,6 +1694,7 @@ func TestRunHandlerExec(t *testing.T) { kubelet := testKubelet.kubelet fakeDocker := testKubelet.fakeDocker kubelet.runner = &fakeCommandRunner + kubelet.handlerRunner = NewHandlerRunner(&fakeHTTP{}, kubelet.runner, kubelet.containerManager) containerID := "abc1234" podName := "podFoo" @@ -1713,7 +1718,12 @@ func TestRunHandlerExec(t *testing.T) { }, }, } - err := kubelet.runHandler(podName+"_"+podNamespace, "", &container, container.Lifecycle.PostStart) + + pod := api.Pod{} + pod.ObjectMeta.Name = podName + pod.ObjectMeta.Namespace = podNamespace + pod.Spec.Containers = []api.Container{container} + err := kubelet.handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1739,7 +1749,9 @@ func TestRunHandlerHttp(t *testing.T) { testKubelet := newTestKubelet(t) kubelet := testKubelet.kubelet kubelet.httpClient = &fakeHttp + kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) + containerID := "abc1234" podName := "podFoo" podNamespace := "nsFoo" containerName := "containerFoo" @@ -1756,7 +1768,12 @@ func TestRunHandlerHttp(t *testing.T) { }, }, } - err := kubelet.runHandler(podName+"_"+podNamespace, "", &container, container.Lifecycle.PostStart) + pod := api.Pod{} + pod.ObjectMeta.Name = podName + pod.ObjectMeta.Namespace = podNamespace + pod.Spec.Containers = []api.Container{container} + err := kubelet.handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) + if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1765,35 +1782,28 @@ func TestRunHandlerHttp(t *testing.T) { } } -func TestNewHandler(t *testing.T) { +func TestRunHandlerNil(t *testing.T) { testKubelet := newTestKubelet(t) kubelet := testKubelet.kubelet - handler := &api.Handler{ - HTTPGet: &api.HTTPGetAction{ - Host: "foo", - Port: util.IntOrString{IntVal: 8080, Kind: util.IntstrInt}, - Path: "bar", + + containerID := "abc1234" + podName := "podFoo" + podNamespace := "nsFoo" + containerName := "containerFoo" + + container := api.Container{ + Name: containerName, + Lifecycle: &api.Lifecycle{ + PostStart: &api.Handler{}, }, } - actionHandler := kubelet.newActionHandler(handler) - if actionHandler == nil { - t.Error("unexpected nil action handler.") - } - - handler = &api.Handler{ - Exec: &api.ExecAction{ - Command: []string{"ls", "-l"}, - }, - } - actionHandler = kubelet.newActionHandler(handler) - if actionHandler == nil { - t.Error("unexpected nil action handler.") - } - - handler = &api.Handler{} - actionHandler = kubelet.newActionHandler(handler) - if actionHandler != nil { - t.Errorf("unexpected non-nil action handler: %v", actionHandler) + pod := api.Pod{} + pod.ObjectMeta.Name = podName + pod.ObjectMeta.Namespace = podNamespace + pod.Spec.Containers = []api.Container{container} + err := kubelet.handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) + if err == nil { + t.Errorf("expect error, but got nil") } } @@ -1807,8 +1817,9 @@ func TestSyncPodEventHandlerFails(t *testing.T) { kubelet.httpClient = &fakeHTTP{ err: fmt.Errorf("test error"), } + kubelet.handlerRunner = NewHandlerRunner(kubelet.httpClient, &fakeContainerCommandRunner{}, kubelet.containerManager) - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -1836,7 +1847,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) { fakeDocker.ContainerList = []docker.APIContainers{ { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_42"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"}, ID: "9876", }, } @@ -1849,7 +1860,7 @@ func TestSyncPodEventHandlerFails(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1892,7 +1903,7 @@ func TestSyncPodsWithPullPolicy(t *testing.T) { kubelet.containerManager.PodInfraContainerImage = "custom_image_name" fakeDocker.ContainerList = []docker.APIContainers{} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -1912,7 +1923,7 @@ func TestSyncPodsWithPullPolicy(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -3005,7 +3016,7 @@ func TestPortForward(t *testing.T) { // Tests that identify the host port conflicts are detected correctly. func TestGetHostPortConflicts(t *testing.T) { - pods := []api.Pod{ + pods := []*api.Pod{ {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}}, {Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 82}}}}}}, @@ -3018,11 +3029,11 @@ func TestGetHostPortConflicts(t *testing.T) { } // The new pod should cause conflict and be reported. - expected := api.Pod{ + expected := &api.Pod{ Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 81}}}}}, } pods = append(pods, expected) - if _, actual := checkHostPortConflicts(pods); !reflect.DeepEqual(actual, []api.Pod{expected}) { + if _, actual := checkHostPortConflicts(pods); !reflect.DeepEqual(actual, []*api.Pod{expected}) { t.Errorf("expected %#v, Got %#v", expected, actual) } } @@ -3034,7 +3045,7 @@ func TestHandlePortConflicts(t *testing.T) { testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) spec := api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "123456789", @@ -3056,7 +3067,7 @@ func TestHandlePortConflicts(t *testing.T) { pods[1].CreationTimestamp = util.NewTime(time.Now()) pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second)) // The newer pod should be rejected. - conflictedPodName := kubecontainer.GetPodFullName(&pods[0]) + conflictedPodName := kubecontainer.GetPodFullName(pods[0]) kl.handleNotFittingPods(pods) // Check pod status stored in the status map. @@ -3087,7 +3098,7 @@ func TestHandleNodeSelector(t *testing.T) { {ObjectMeta: api.ObjectMeta{Name: "testnode", Labels: map[string]string{"key": "B"}}}, }} testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "123456789", @@ -3106,7 +3117,7 @@ func TestHandleNodeSelector(t *testing.T) { }, } // The first pod should be rejected. - notfittingPodName := kubecontainer.GetPodFullName(&pods[0]) + notfittingPodName := kubecontainer.GetPodFullName(pods[0]) kl.handleNotFittingPods(pods) // Check pod status stored in the status map. @@ -3140,7 +3151,7 @@ func TestHandleMemExceeded(t *testing.T) { "memory": resource.MustParse("90"), }, }}}} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "123456789", @@ -3162,7 +3173,7 @@ func TestHandleMemExceeded(t *testing.T) { pods[1].CreationTimestamp = util.NewTime(time.Now()) pods[0].CreationTimestamp = util.NewTime(time.Now().Add(1 * time.Second)) // The newer pod should be rejected. - notfittingPodName := kubecontainer.GetPodFullName(&pods[0]) + notfittingPodName := kubecontainer.GetPodFullName(pods[0]) kl.handleNotFittingPods(pods) // Check pod status stored in the status map. @@ -3191,7 +3202,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) { testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) kl := testKubelet.kubelet - pods := []api.Pod{ + pods := []*api.Pod{ {ObjectMeta: api.ObjectMeta{Name: "pod1"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, {ObjectMeta: api.ObjectMeta{Name: "pod2"}, Spec: api.PodSpec{Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}}, } @@ -3201,7 +3212,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) { t.Fatalf("expected to have status cached for %q: %v", "pod2", err) } // Sync with empty pods so that the entry in status map will be removed. - kl.SyncPods([]api.Pod{}, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + kl.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if _, err := kl.GetPodStatus(kubecontainer.BuildPodFullName("pod2", "")); err == nil { t.Fatalf("expected to not have status cached for %q: %v", "pod2", err) } @@ -3483,7 +3494,7 @@ func TestCreateMirrorPod(t *testing.T) { testKubelet := newTestKubelet(t) kl := testKubelet.kubelet manager := testKubelet.fakeMirrorClient - pod := api.Pod{ + pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "bar", @@ -3493,13 +3504,13 @@ func TestCreateMirrorPod(t *testing.T) { }, }, } - pods := []api.Pod{pod} + pods := []*api.Pod{pod} kl.podManager.SetPods(pods) - err := kl.syncPod(&pod, nil, container.Pod{}) + err := kl.syncPod(pod, nil, container.Pod{}) if err != nil { t.Errorf("unexpected error: %v", err) } - podFullName := kubecontainer.GetPodFullName(&pod) + podFullName := kubecontainer.GetPodFullName(pod) if !manager.HasPod(podFullName) { t.Errorf("expected mirror pod %q to be created", podFullName) } @@ -3513,7 +3524,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) { testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) kl := testKubelet.kubelet manager := testKubelet.fakeMirrorClient - pod := api.Pod{ + pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "foo", @@ -3529,7 +3540,7 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) { }, } // Mirror pod has an outdated spec. - mirrorPod := api.Pod{ + mirrorPod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "11111111", Name: "foo", @@ -3546,13 +3557,13 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) { }, } - pods := []api.Pod{pod, mirrorPod} + pods := []*api.Pod{pod, mirrorPod} kl.podManager.SetPods(pods) - err := kl.syncPod(&pod, &mirrorPod, container.Pod{}) + err := kl.syncPod(pod, mirrorPod, container.Pod{}) if err != nil { t.Errorf("unexpected error: %v", err) } - name := kubecontainer.GetPodFullName(&pod) + name := kubecontainer.GetPodFullName(pod) creates, deletes := manager.GetCounts(name) if creates != 0 || deletes != 1 { t.Errorf("expected 0 creation and 1 deletion of %q, got %d, %d", name, creates, deletes) @@ -3564,7 +3575,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) kl := testKubelet.kubelet manager := testKubelet.fakeMirrorClient - orphanPods := []api.Pod{ + orphanPods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -3600,7 +3611,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { t.Errorf("expected zero mirror pods, got %v", manager.GetPods()) } for _, pod := range orphanPods { - name := kubecontainer.GetPodFullName(&pod) + name := kubecontainer.GetPodFullName(pod) creates, deletes := manager.GetCounts(name) if creates != 0 || deletes != 1 { t.Errorf("expected 0 creation and one deletion of %q, got %d, %d", name, creates, deletes) @@ -3611,7 +3622,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { func TestGetContainerInfoForMirrorPods(t *testing.T) { // pods contain one static and one mirror pod with the same name but // different UIDs. - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "1234", @@ -3685,7 +3696,7 @@ func TestDoNotCacheStatusForStaticPods(t *testing.T) { kubelet := testKubelet.kubelet waitGroup := testKubelet.waitGroup - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -3704,12 +3715,12 @@ func TestDoNotCacheStatusForStaticPods(t *testing.T) { } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("unexpected error: %v", err) } waitGroup.Wait() - podFullName := kubecontainer.GetPodFullName(&pods[0]) + podFullName := kubecontainer.GetPodFullName(pods[0]) status, ok := kubelet.statusManager.GetPodStatus(podFullName) if ok { t.Errorf("unexpected status %#v found for static pod %q", status, podFullName) @@ -3739,7 +3750,7 @@ func TestHostNetworkAllowed(t *testing.T) { HostNetwork: true, }, } - kubelet.podManager.SetPods([]api.Pod{*pod}) + kubelet.podManager.SetPods([]*api.Pod{pod}) err := kubelet.syncPod(pod, nil, container.Pod{}) if err != nil { t.Errorf("expected pod infra creation to succeed: %v", err) @@ -3786,7 +3797,7 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) { {Name: "succeeded"}, {Name: "failed"}, } - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -3802,7 +3813,7 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) { runningAPIContainers := []docker.APIContainers{ { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"}, ID: "9876", }, } @@ -3910,7 +3921,7 @@ func TestSyncPodsWithRestartPolicy(t *testing.T) { kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("%d: unexpected error: %v", i, err) } @@ -4019,7 +4030,7 @@ func TestGetPodStatusWithLastTermination(t *testing.T) { fakeDocker.ExitedContainerList = exitedAPIContainers fakeDocker.ContainerMap = containerMap fakeDocker.ClearCalls() - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", @@ -4035,20 +4046,20 @@ func TestGetPodStatusWithLastTermination(t *testing.T) { fakeDocker.ContainerList = []docker.APIContainers{ { // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(&pods[0]), 16) + "_foo_new_12345678_0"}, + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"}, ID: "9876", }, } kubelet.podManager.SetPods(pods) waitGroup.Add(1) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]api.Pod{}, time.Now()) + err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) if err != nil { t.Errorf("%d: unexpected error: %v", i, err) } waitGroup.Wait() // Check if we can retrieve the pod status from GetPodStatus(). - podName := kubecontainer.GetPodFullName(&pods[0]) + podName := kubecontainer.GetPodFullName(pods[0]) status, err := kubelet.GetPodStatus(podName) if err != nil { t.Fatalf("unable to retrieve pod status for pod %q: %#v.", podName, err) @@ -4084,7 +4095,7 @@ func TestGetPodCreationFailureReason(t *testing.T) { "create": fmt.Errorf("%s", failureReason), } fakeDocker.ContainerList = []docker.APIContainers{} - pod := api.Pod{ + pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "12345678", Name: "bar", @@ -4096,13 +4107,13 @@ func TestGetPodCreationFailureReason(t *testing.T) { }, }, } - pods := []api.Pod{pod} + pods := []*api.Pod{pod} kubelet.podManager.SetPods(pods) - _, err := kubelet.runContainer(&pod, &pod.Spec.Containers[0], make(map[string]volume.Volume), "", "") + _, err := kubelet.runContainer(pod, &pod.Spec.Containers[0], make(map[string]volume.Volume), "", "") if err == nil { t.Errorf("expected error, found nil") } - status, err := kubelet.GetPodStatus(kubecontainer.GetPodFullName(&pod)) + status, err := kubelet.GetPodStatus(kubecontainer.GetPodFullName(pod)) if err != nil { t.Errorf("unexpected error %v", err) } diff --git a/pkg/kubelet/metrics/instrumented_docker.go b/pkg/kubelet/metrics/instrumented_docker.go index 8f464e2c706..6cb233a1570 100644 --- a/pkg/kubelet/metrics/instrumented_docker.go +++ b/pkg/kubelet/metrics/instrumented_docker.go @@ -36,114 +36,114 @@ func NewInstrumentedDockerInterface(dockerClient dockertools.DockerInterface) do } } -func (self instrumentedDockerInterface) ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error) { +func (in instrumentedDockerInterface) ListContainers(options docker.ListContainersOptions) ([]docker.APIContainers, error) { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("list_containers").Observe(SinceInMicroseconds(start)) }() - return self.client.ListContainers(options) + return in.client.ListContainers(options) } -func (self instrumentedDockerInterface) InspectContainer(id string) (*docker.Container, error) { +func (in instrumentedDockerInterface) InspectContainer(id string) (*docker.Container, error) { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("inspect_container").Observe(SinceInMicroseconds(start)) }() - return self.client.InspectContainer(id) + return in.client.InspectContainer(id) } -func (self instrumentedDockerInterface) CreateContainer(opts docker.CreateContainerOptions) (*docker.Container, error) { +func (in instrumentedDockerInterface) CreateContainer(opts docker.CreateContainerOptions) (*docker.Container, error) { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("create_container").Observe(SinceInMicroseconds(start)) }() - return self.client.CreateContainer(opts) + return in.client.CreateContainer(opts) } -func (self instrumentedDockerInterface) StartContainer(id string, hostConfig *docker.HostConfig) error { +func (in instrumentedDockerInterface) StartContainer(id string, hostConfig *docker.HostConfig) error { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("start_container").Observe(SinceInMicroseconds(start)) }() - return self.client.StartContainer(id, hostConfig) + return in.client.StartContainer(id, hostConfig) } -func (self instrumentedDockerInterface) StopContainer(id string, timeout uint) error { +func (in instrumentedDockerInterface) StopContainer(id string, timeout uint) error { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("stop_container").Observe(SinceInMicroseconds(start)) }() - return self.client.StopContainer(id, timeout) + return in.client.StopContainer(id, timeout) } -func (self instrumentedDockerInterface) RemoveContainer(opts docker.RemoveContainerOptions) error { +func (in instrumentedDockerInterface) RemoveContainer(opts docker.RemoveContainerOptions) error { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("remove_container").Observe(SinceInMicroseconds(start)) }() - return self.client.RemoveContainer(opts) + return in.client.RemoveContainer(opts) } -func (self instrumentedDockerInterface) InspectImage(image string) (*docker.Image, error) { +func (in instrumentedDockerInterface) InspectImage(image string) (*docker.Image, error) { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("inspect_image").Observe(SinceInMicroseconds(start)) }() - return self.client.InspectImage(image) + return in.client.InspectImage(image) } -func (self instrumentedDockerInterface) ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error) { +func (in instrumentedDockerInterface) ListImages(opts docker.ListImagesOptions) ([]docker.APIImages, error) { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("list_images").Observe(SinceInMicroseconds(start)) }() - return self.client.ListImages(opts) + return in.client.ListImages(opts) } -func (self instrumentedDockerInterface) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error { +func (in instrumentedDockerInterface) PullImage(opts docker.PullImageOptions, auth docker.AuthConfiguration) error { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("pull_image").Observe(SinceInMicroseconds(start)) }() - return self.client.PullImage(opts, auth) + return in.client.PullImage(opts, auth) } -func (self instrumentedDockerInterface) RemoveImage(image string) error { +func (in instrumentedDockerInterface) RemoveImage(image string) error { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("remove_image").Observe(SinceInMicroseconds(start)) }() - return self.client.RemoveImage(image) + return in.client.RemoveImage(image) } -func (self instrumentedDockerInterface) Logs(opts docker.LogsOptions) error { +func (in instrumentedDockerInterface) Logs(opts docker.LogsOptions) error { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("logs").Observe(SinceInMicroseconds(start)) }() - return self.client.Logs(opts) + return in.client.Logs(opts) } -func (self instrumentedDockerInterface) Version() (*docker.Env, error) { +func (in instrumentedDockerInterface) Version() (*docker.Env, error) { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("version").Observe(SinceInMicroseconds(start)) }() - return self.client.Version() + return in.client.Version() } -func (self instrumentedDockerInterface) CreateExec(opts docker.CreateExecOptions) (*docker.Exec, error) { +func (in instrumentedDockerInterface) CreateExec(opts docker.CreateExecOptions) (*docker.Exec, error) { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("create_exec").Observe(SinceInMicroseconds(start)) }() - return self.client.CreateExec(opts) + return in.client.CreateExec(opts) } -func (self instrumentedDockerInterface) StartExec(startExec string, opts docker.StartExecOptions) error { +func (in instrumentedDockerInterface) StartExec(startExec string, opts docker.StartExecOptions) error { start := time.Now() defer func() { DockerOperationsLatency.WithLabelValues("start_exec").Observe(SinceInMicroseconds(start)) }() - return self.client.StartExec(startExec, opts) + return in.client.StartExec(startExec, opts) } diff --git a/pkg/kubelet/metrics/metrics.go b/pkg/kubelet/metrics/metrics.go index c4debd45b7e..6452642fd2c 100644 --- a/pkg/kubelet/metrics/metrics.go +++ b/pkg/kubelet/metrics/metrics.go @@ -90,8 +90,8 @@ const ( SyncPodSync ) -func (self SyncPodType) String() string { - switch self { +func (sp SyncPodType) String() string { + switch sp { case SyncPodCreate: return "create" case SyncPodUpdate: @@ -132,13 +132,13 @@ var ( nil, nil) ) -func (self *podAndContainerCollector) Describe(ch chan<- *prometheus.Desc) { +func (pc *podAndContainerCollector) Describe(ch chan<- *prometheus.Desc) { ch <- runningPodCountDesc ch <- runningContainerCountDesc } -func (self *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) { - runningPods, err := self.containerCache.GetPods() +func (pc *podAndContainerCollector) Collect(ch chan<- prometheus.Metric) { + runningPods, err := pc.containerCache.GetPods() if err != nil { glog.Warning("Failed to get running container information while collecting metrics: %v", err) return diff --git a/pkg/kubelet/mirror_client.go b/pkg/kubelet/mirror_client.go index 378eede7263..caa12de3007 100644 --- a/pkg/kubelet/mirror_client.go +++ b/pkg/kubelet/mirror_client.go @@ -28,7 +28,7 @@ import ( // Mirror client is used to create/delete a mirror pod. type mirrorClient interface { - CreateMirrorPod(api.Pod) error + CreateMirrorPod(*api.Pod) error DeleteMirrorPod(string) error } @@ -43,19 +43,19 @@ func newBasicMirrorClient(apiserverClient client.Interface) *basicMirrorClient { } // Creates a mirror pod. -func (self *basicMirrorClient) CreateMirrorPod(pod api.Pod) error { - if self.apiserverClient == nil { +func (mc *basicMirrorClient) CreateMirrorPod(pod *api.Pod) error { + if mc.apiserverClient == nil { return nil } pod.Annotations[ConfigMirrorAnnotationKey] = MirrorType - _, err := self.apiserverClient.Pods(NamespaceDefault).Create(&pod) + _, err := mc.apiserverClient.Pods(NamespaceDefault).Create(pod) return err } // Deletes a mirror pod. -func (self *basicMirrorClient) DeleteMirrorPod(podFullName string) error { - if self.apiserverClient == nil { +func (mc *basicMirrorClient) DeleteMirrorPod(podFullName string) error { + if mc.apiserverClient == nil { return nil } name, namespace, err := kubecontainer.ParsePodFullName(podFullName) @@ -64,7 +64,7 @@ func (self *basicMirrorClient) DeleteMirrorPod(podFullName string) error { return err } glog.V(4).Infof("Deleting a mirror pod %q", podFullName) - if err := self.apiserverClient.Pods(namespace).Delete(name); err != nil { + if err := mc.apiserverClient.Pods(namespace).Delete(name); err != nil { glog.Errorf("Failed deleting a mirror pod %q: %v", podFullName, err) } return nil diff --git a/pkg/kubelet/mirror_client_test.go b/pkg/kubelet/mirror_client_test.go index e0a549b8539..78fd607eb8f 100644 --- a/pkg/kubelet/mirror_client_test.go +++ b/pkg/kubelet/mirror_client_test.go @@ -34,20 +34,20 @@ type fakeMirrorClient struct { deleteCounts map[string]int } -func (self *fakeMirrorClient) CreateMirrorPod(pod api.Pod) error { - self.mirrorPodLock.Lock() - defer self.mirrorPodLock.Unlock() - podFullName := kubecontainer.GetPodFullName(&pod) - self.mirrorPods.Insert(podFullName) - self.createCounts[podFullName]++ +func (fmc *fakeMirrorClient) CreateMirrorPod(pod *api.Pod) error { + fmc.mirrorPodLock.Lock() + defer fmc.mirrorPodLock.Unlock() + podFullName := kubecontainer.GetPodFullName(pod) + fmc.mirrorPods.Insert(podFullName) + fmc.createCounts[podFullName]++ return nil } -func (self *fakeMirrorClient) DeleteMirrorPod(podFullName string) error { - self.mirrorPodLock.Lock() - defer self.mirrorPodLock.Unlock() - self.mirrorPods.Delete(podFullName) - self.deleteCounts[podFullName]++ +func (fmc *fakeMirrorClient) DeleteMirrorPod(podFullName string) error { + fmc.mirrorPodLock.Lock() + defer fmc.mirrorPodLock.Unlock() + fmc.mirrorPods.Delete(podFullName) + fmc.deleteCounts[podFullName]++ return nil } @@ -59,28 +59,28 @@ func newFakeMirrorClient() *fakeMirrorClient { return &m } -func (self *fakeMirrorClient) HasPod(podFullName string) bool { - self.mirrorPodLock.RLock() - defer self.mirrorPodLock.RUnlock() - return self.mirrorPods.Has(podFullName) +func (fmc *fakeMirrorClient) HasPod(podFullName string) bool { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.mirrorPods.Has(podFullName) } -func (self *fakeMirrorClient) NumOfPods() int { - self.mirrorPodLock.RLock() - defer self.mirrorPodLock.RUnlock() - return self.mirrorPods.Len() +func (fmc *fakeMirrorClient) NumOfPods() int { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.mirrorPods.Len() } -func (self *fakeMirrorClient) GetPods() []string { - self.mirrorPodLock.RLock() - defer self.mirrorPodLock.RUnlock() - return self.mirrorPods.List() +func (fmc *fakeMirrorClient) GetPods() []string { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.mirrorPods.List() } -func (self *fakeMirrorClient) GetCounts(podFullName string) (int, int) { - self.mirrorPodLock.RLock() - defer self.mirrorPodLock.RUnlock() - return self.createCounts[podFullName], self.deleteCounts[podFullName] +func (fmc *fakeMirrorClient) GetCounts(podFullName string) (int, int) { + fmc.mirrorPodLock.RLock() + defer fmc.mirrorPodLock.RUnlock() + return fmc.createCounts[podFullName], fmc.deleteCounts[podFullName] } func TestParsePodFullName(t *testing.T) { diff --git a/pkg/kubelet/pod_manager.go b/pkg/kubelet/pod_manager.go index 1f261e87641..1b0b1e6dc1c 100644 --- a/pkg/kubelet/pod_manager.go +++ b/pkg/kubelet/pod_manager.go @@ -43,11 +43,11 @@ import ( // also be removed. type podManager interface { - GetPods() []api.Pod + GetPods() []*api.Pod GetPodByFullName(podFullName string) (*api.Pod, bool) GetPodByName(namespace, name string) (*api.Pod, bool) - GetPodsAndMirrorMap() ([]api.Pod, map[string]api.Pod) - SetPods(pods []api.Pod) + GetPodsAndMirrorMap() ([]*api.Pod, map[string]*api.Pod) + SetPods(pods []*api.Pod) UpdatePods(u PodUpdate, podSyncTypes map[types.UID]metrics.SyncPodType) DeleteOrphanedMirrorPods() TranslatePodUID(uid types.UID) types.UID @@ -72,34 +72,34 @@ type basicPodManager struct { mirrorPodByFullName map[string]*api.Pod // A mirror pod client to create/delete mirror pods. - mirrorClient mirrorClient + mirrorClient } func newBasicPodManager(apiserverClient client.Interface) *basicPodManager { pm := &basicPodManager{} pm.mirrorClient = newBasicMirrorClient(apiserverClient) - pm.SetPods([]api.Pod{}) + pm.SetPods(nil) return pm } // Update the internal pods with those provided by the update. -func (self *basicPodManager) UpdatePods(u PodUpdate, podSyncTypes map[types.UID]metrics.SyncPodType) { - self.lock.Lock() - defer self.lock.Unlock() +func (pm *basicPodManager) UpdatePods(u PodUpdate, podSyncTypes map[types.UID]metrics.SyncPodType) { + pm.lock.Lock() + defer pm.lock.Unlock() switch u.Op { case SET: glog.V(3).Infof("SET: Containers changed") // Store the new pods. Don't worry about filtering host ports since those // pods will never be looked up. existingPods := make(map[types.UID]struct{}) - for uid := range self.podByUID { + for uid := range pm.podByUID { existingPods[uid] = struct{}{} } // Update the internal pods. - self.setPods(u.Pods) + pm.setPods(u.Pods) - for uid := range self.podByUID { + for uid := range pm.podByUID { if _, ok := existingPods[uid]; !ok { podSyncTypes[uid] = metrics.SyncPodCreate } @@ -112,14 +112,14 @@ func (self *basicPodManager) UpdatePods(u PodUpdate, podSyncTypes map[types.UID] for i := range u.Pods { podSyncTypes[u.Pods[i].UID] = metrics.SyncPodUpdate } - allPods := applyUpdates(u.Pods, self.getAllPods()) - self.setPods(allPods) + allPods := applyUpdates(u.Pods, pm.getAllPods()) + pm.setPods(allPods) default: panic("syncLoop does not support incremental changes") } // Mark all remaining pods as sync. - for uid := range self.podByUID { + for uid := range pm.podByUID { if _, ok := podSyncTypes[uid]; !ok { podSyncTypes[uid] = metrics.SyncPodSync } @@ -127,51 +127,48 @@ func (self *basicPodManager) UpdatePods(u PodUpdate, podSyncTypes map[types.UID] } // Set the internal pods based on the new pods. -func (self *basicPodManager) SetPods(newPods []api.Pod) { - self.lock.Lock() - defer self.lock.Unlock() - self.setPods(newPods) +func (pm *basicPodManager) SetPods(newPods []*api.Pod) { + pm.lock.Lock() + defer pm.lock.Unlock() + pm.setPods(newPods) } -func (self *basicPodManager) setPods(newPods []api.Pod) { +func (pm *basicPodManager) setPods(newPods []*api.Pod) { podByUID := make(map[types.UID]*api.Pod) mirrorPodByUID := make(map[types.UID]*api.Pod) podByFullName := make(map[string]*api.Pod) mirrorPodByFullName := make(map[string]*api.Pod) - for i := range newPods { - pod := newPods[i] - podFullName := kubecontainer.GetPodFullName(&pod) - if isMirrorPod(&pod) { - mirrorPodByUID[pod.UID] = &pod - mirrorPodByFullName[podFullName] = &pod + for _, pod := range newPods { + podFullName := kubecontainer.GetPodFullName(pod) + if isMirrorPod(pod) { + mirrorPodByUID[pod.UID] = pod + mirrorPodByFullName[podFullName] = pod } else { - podByUID[pod.UID] = &pod - podByFullName[podFullName] = &pod + podByUID[pod.UID] = pod + podByFullName[podFullName] = pod } } - self.podByUID = podByUID - self.podByFullName = podByFullName - self.mirrorPodByUID = mirrorPodByUID - self.mirrorPodByFullName = mirrorPodByFullName + pm.podByUID = podByUID + pm.podByFullName = podByFullName + pm.mirrorPodByUID = mirrorPodByUID + pm.mirrorPodByFullName = mirrorPodByFullName } -func applyUpdates(changed []api.Pod, current []api.Pod) []api.Pod { - updated := []api.Pod{} +func applyUpdates(changed []*api.Pod, current []*api.Pod) []*api.Pod { + updated := []*api.Pod{} m := map[types.UID]*api.Pod{} - for i := range changed { - pod := &changed[i] + for _, pod := range changed { m[pod.UID] = pod } - for i := range current { - pod := ¤t[i] + for _, pod := range current { if m[pod.UID] != nil { - updated = append(updated, *m[pod.UID]) + updated = append(updated, m[pod.UID]) glog.V(4).Infof("pod with UID: %q has a new spec %+v", pod.UID, *m[pod.UID]) } else { - updated = append(updated, *pod) + updated = append(updated, pod) glog.V(4).Infof("pod with UID: %q stay with the same spec %+v", pod.UID, *pod) } } @@ -179,82 +176,72 @@ func applyUpdates(changed []api.Pod, current []api.Pod) []api.Pod { return updated } -func (self *basicPodManager) convertMapToPods(UIDMap map[types.UID]*api.Pod) []api.Pod { - pods := make([]api.Pod, 0, len(UIDMap)) - for _, pod := range UIDMap { - pods = append(pods, *pod) - } - return pods -} - // GetPods returns the regular pods bound to the kubelet and their spec. -func (self *basicPodManager) GetPods() []api.Pod { - self.lock.RLock() - defer self.lock.RUnlock() - return self.convertMapToPods(self.podByUID) +func (pm *basicPodManager) GetPods() []*api.Pod { + pm.lock.RLock() + defer pm.lock.RUnlock() + return podsMapToPods(pm.podByUID) } // Returns all pods (including mirror pods). -func (self *basicPodManager) getAllPods() []api.Pod { - return append(self.convertMapToPods(self.podByUID), self.convertMapToPods(self.mirrorPodByUID)...) +func (pm *basicPodManager) getAllPods() []*api.Pod { + return append(podsMapToPods(pm.podByUID), podsMapToPods(pm.mirrorPodByUID)...) } // GetPodsAndMirrorMap returns the a copy of the regular pods and the mirror // pods indexed by full name. -func (self *basicPodManager) GetPodsAndMirrorMap() ([]api.Pod, map[string]api.Pod) { - self.lock.RLock() - defer self.lock.RUnlock() - mirrorPods := make(map[string]api.Pod) - for key, pod := range self.mirrorPodByFullName { - mirrorPods[key] = *pod +func (pm *basicPodManager) GetPodsAndMirrorMap() ([]*api.Pod, map[string]*api.Pod) { + pm.lock.RLock() + defer pm.lock.RUnlock() + mirrorPods := make(map[string]*api.Pod) + for key, pod := range pm.mirrorPodByFullName { + mirrorPods[key] = pod } - return self.convertMapToPods(self.podByUID), mirrorPods + return podsMapToPods(pm.podByUID), mirrorPods } // GetPodByName provides the (non-mirror) pod that matches namespace and name, // as well as whether the pod was found. -func (self *basicPodManager) GetPodByName(namespace, name string) (*api.Pod, bool) { +func (pm *basicPodManager) GetPodByName(namespace, name string) (*api.Pod, bool) { podFullName := kubecontainer.BuildPodFullName(name, namespace) - return self.GetPodByFullName(podFullName) + return pm.GetPodByFullName(podFullName) } // GetPodByName returns the (non-mirror) pod that matches full name, as well as // whether the pod was found. -func (self *basicPodManager) GetPodByFullName(podFullName string) (*api.Pod, bool) { - self.lock.RLock() - defer self.lock.RUnlock() - if pod, ok := self.podByFullName[podFullName]; ok { - return pod, true - } - return nil, false +func (pm *basicPodManager) GetPodByFullName(podFullName string) (*api.Pod, bool) { + pm.lock.RLock() + defer pm.lock.RUnlock() + pod, ok := pm.podByFullName[podFullName] + return pod, ok } // If the UID belongs to a mirror pod, maps it to the UID of its static pod. // Otherwise, return the original UID. All public-facing functions should // perform this translation for UIDs because user may provide a mirror pod UID, // which is not recognized by internal Kubelet functions. -func (self *basicPodManager) TranslatePodUID(uid types.UID) types.UID { +func (pm *basicPodManager) TranslatePodUID(uid types.UID) types.UID { if uid == "" { return uid } - self.lock.RLock() - defer self.lock.RUnlock() - if mirrorPod, ok := self.mirrorPodByUID[uid]; ok { + pm.lock.RLock() + defer pm.lock.RUnlock() + if mirrorPod, ok := pm.mirrorPodByUID[uid]; ok { podFullName := kubecontainer.GetPodFullName(mirrorPod) - if pod, ok := self.podByFullName[podFullName]; ok { + if pod, ok := pm.podByFullName[podFullName]; ok { return pod.UID } } return uid } -func (self *basicPodManager) getOrphanedMirrorPodNames() []string { - self.lock.RLock() - defer self.lock.RUnlock() +func (pm *basicPodManager) getOrphanedMirrorPodNames() []string { + pm.lock.RLock() + defer pm.lock.RUnlock() var podFullNames []string - for podFullName := range self.mirrorPodByFullName { - if _, ok := self.podByFullName[podFullName]; !ok { + for podFullName := range pm.mirrorPodByFullName { + if _, ok := pm.podByFullName[podFullName]; !ok { podFullNames = append(podFullNames, podFullName) } } @@ -264,28 +251,26 @@ func (self *basicPodManager) getOrphanedMirrorPodNames() []string { // Delete all mirror pods which do not have associated static pods. This method // sends deletion requets to the API server, but does NOT modify the internal // pod storage in basicPodManager. -func (self *basicPodManager) DeleteOrphanedMirrorPods() { - podFullNames := self.getOrphanedMirrorPodNames() +func (pm *basicPodManager) DeleteOrphanedMirrorPods() { + podFullNames := pm.getOrphanedMirrorPodNames() for _, podFullName := range podFullNames { - self.mirrorClient.DeleteMirrorPod(podFullName) + pm.mirrorClient.DeleteMirrorPod(podFullName) } } -// Creates a mirror pod for the given pod. -func (self *basicPodManager) CreateMirrorPod(pod api.Pod) error { - return self.mirrorClient.CreateMirrorPod(pod) -} - -// Delete a mirror pod by name. -func (self *basicPodManager) DeleteMirrorPod(podFullName string) error { - return self.mirrorClient.DeleteMirrorPod(podFullName) -} - // Returns true if mirrorPod is a correct representation of pod; false otherwise. -func (self *basicPodManager) IsMirrorPodOf(mirrorPod, pod *api.Pod) bool { +func (pm *basicPodManager) IsMirrorPodOf(mirrorPod, pod *api.Pod) bool { // Check name and namespace first. if pod.Name != mirrorPod.Name || pod.Namespace != mirrorPod.Namespace { return false } return api.Semantic.DeepEqual(&pod.Spec, &mirrorPod.Spec) } + +func podsMapToPods(UIDMap map[types.UID]*api.Pod) []*api.Pod { + pods := make([]*api.Pod, 0, len(UIDMap)) + for _, pod := range UIDMap { + pods = append(pods, pod) + } + return pods +} diff --git a/pkg/kubelet/pod_manager_test.go b/pkg/kubelet/pod_manager_test.go index 1e4c88b64b6..90fbdee50be 100644 --- a/pkg/kubelet/pod_manager_test.go +++ b/pkg/kubelet/pod_manager_test.go @@ -34,7 +34,7 @@ func newFakePodManager() (*basicPodManager, *fakeMirrorClient) { // Tests that pods/maps are properly set after the pod update, and the basic // methods work correctly. func TestGetSetPods(t *testing.T) { - mirrorPod := api.Pod{ + mirrorPod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "987654321", Name: "bar", @@ -45,7 +45,7 @@ func TestGetSetPods(t *testing.T) { }, }, } - staticPod := api.Pod{ + staticPod := &api.Pod{ ObjectMeta: api.ObjectMeta{ UID: "123456789", Name: "bar", @@ -54,7 +54,7 @@ func TestGetSetPods(t *testing.T) { }, } - expectedPods := []api.Pod{ + expectedPods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "999999999", @@ -98,11 +98,11 @@ func TestGetSetPods(t *testing.T) { // Test the basic Get methods. actualPod, ok := podManager.GetPodByFullName("bar_default") - if !ok || !reflect.DeepEqual(actualPod, &staticPod) { + if !ok || !reflect.DeepEqual(actualPod, staticPod) { t.Errorf("unable to get pod by full name; expected: %#v, got: %#v", staticPod, actualPod) } actualPod, ok = podManager.GetPodByName("default", "bar") - if !ok || !reflect.DeepEqual(actualPod, &staticPod) { + if !ok || !reflect.DeepEqual(actualPod, staticPod) { t.Errorf("unable to get pod by name; expected: %#v, got: %#v", staticPod, actualPod) } diff --git a/pkg/kubelet/probe.go b/pkg/kubelet/probe.go index 4607e0c136e..cc8ac163dbb 100644 --- a/pkg/kubelet/probe.go +++ b/pkg/kubelet/probe.go @@ -22,7 +22,9 @@ import ( "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" + "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/probe" execprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/exec" httprobe "github.com/GoogleCloudPlatform/kubernetes/pkg/probe/http" @@ -35,50 +37,83 @@ import ( const maxProbeRetries = 3 -// probeContainer probes the liveness/readiness of the given container. +// Prober helps to check the liveness/readiness of a container. +// TODO(yifan): Replace the concrete type with interface later. +type Prober struct { + exec execprobe.ExecProber + http httprobe.HTTPProber + tcp tcprobe.TCPProber + runner dockertools.ContainerCommandRunner + + readinessManager *kubecontainer.ReadinessManager + refManager *kubecontainer.RefManager + recorder record.EventRecorder +} + +// NewProber creates a Prober, it takes a command runner and +// several container info managers. +func NewProber( + runner dockertools.ContainerCommandRunner, + readinessManager *kubecontainer.ReadinessManager, + refManager *kubecontainer.RefManager, + recorder record.EventRecorder) *Prober { + + return &Prober{ + exec: execprobe.New(), + http: httprobe.New(), + tcp: tcprobe.New(), + runner: runner, + + readinessManager: readinessManager, + refManager: refManager, + recorder: recorder, + } +} + +// Probe checks the liveness/readiness of the given container. // If the container's liveness probe is unsuccessful, set readiness to false. // If liveness is successful, do a readiness check and set readiness accordingly. -func (kl *Kubelet) probeContainer(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { +func (pb *Prober) Probe(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { // Probe liveness. - live, err := kl.probeContainerLiveness(pod, status, container, createdAt) + live, err := pb.probeLiveness(pod, status, container, containerID, createdAt) if err != nil { glog.V(1).Infof("Liveness probe errored: %v", err) - kl.readinessManager.SetReadiness(containerID, false) + pb.readinessManager.SetReadiness(containerID, false) return probe.Unknown, err } if live != probe.Success { glog.V(1).Infof("Liveness probe unsuccessful: %v", live) - kl.readinessManager.SetReadiness(containerID, false) + pb.readinessManager.SetReadiness(containerID, false) return live, nil } // Probe readiness. - ready, err := kl.probeContainerReadiness(pod, status, container, createdAt) + ready, err := pb.probeReadiness(pod, status, container, containerID, createdAt) if err == nil && ready == probe.Success { glog.V(3).Infof("Readiness probe successful: %v", ready) - kl.readinessManager.SetReadiness(containerID, true) + pb.readinessManager.SetReadiness(containerID, true) return probe.Success, nil } glog.V(1).Infof("Readiness probe failed/errored: %v, %v", ready, err) - kl.readinessManager.SetReadiness(containerID, false) + pb.readinessManager.SetReadiness(containerID, false) - ref, ok := kl.containerRefManager.GetRef(containerID) + ref, ok := pb.refManager.GetRef(containerID) if !ok { glog.Warningf("No ref for pod '%v' - '%v'", containerID, container.Name) return probe.Success, err } if ready != probe.Success { - kl.recorder.Eventf(ref, "unhealthy", "Readiness Probe Failed %v - %v", containerID, container.Name) + pb.recorder.Eventf(ref, "unhealthy", "Readiness Probe Failed %v - %v", containerID, container.Name) } return probe.Success, nil } -// probeContainerLiveness probes the liveness of a container. +// probeLiveness probes the liveness of a container. // If the initalDelay since container creation on liveness probe has not passed the probe will return probe.Success. -func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, container api.Container, createdAt int64) (probe.Result, error) { +func (pb *Prober) probeLiveness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { p := container.LivenessProbe if p == nil { return probe.Success, nil @@ -86,12 +121,12 @@ func (kl *Kubelet) probeContainerLiveness(pod *api.Pod, status api.PodStatus, co if time.Now().Unix()-createdAt < p.InitialDelaySeconds { return probe.Success, nil } - return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries) + return pb.runProbeWithRetries(p, pod, status, container, containerID, maxProbeRetries) } -// probeContainerReadiness probes the readiness of a container. +// probeReadiness probes the readiness of a container. // If the initial delay on the readiness probe has not passed the probe will return probe.Failure. -func (kl *Kubelet) probeContainerReadiness(pod *api.Pod, status api.PodStatus, container api.Container, createdAt int64) (probe.Result, error) { +func (pb *Prober) probeReadiness(pod *api.Pod, status api.PodStatus, container api.Container, containerID string, createdAt int64) (probe.Result, error) { p := container.ReadinessProbe if p == nil { return probe.Success, nil @@ -99,16 +134,16 @@ func (kl *Kubelet) probeContainerReadiness(pod *api.Pod, status api.PodStatus, c if time.Now().Unix()-createdAt < p.InitialDelaySeconds { return probe.Failure, nil } - return kl.runProbeWithRetries(p, pod, status, container, maxProbeRetries) + return pb.runProbeWithRetries(p, pod, status, container, containerID, maxProbeRetries) } // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // if it never succeeds. -func (kl *Kubelet) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, retires int) (probe.Result, error) { +func (pb *Prober) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string, retires int) (probe.Result, error) { var err error var result probe.Result for i := 0; i < retires; i++ { - result, err = kl.runProbe(p, pod, status, container) + result, err = pb.runProbe(p, pod, status, container, containerID) if result == probe.Success { return probe.Success, nil } @@ -116,11 +151,11 @@ func (kl *Kubelet) runProbeWithRetries(p *api.Probe, pod *api.Pod, status api.Po return result, err } -func (kl *Kubelet) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container) (probe.Result, error) { +func (pb *Prober) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, container api.Container, containerID string) (probe.Result, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { glog.V(4).Infof("Exec-Probe Pod: %v, Container: %v", pod, container) - return kl.prober.exec.Probe(kl.newExecInContainer(pod, container)) + return pb.exec.Probe(pb.newExecInContainer(pod, container, containerID)) } if p.HTTPGet != nil { port, err := extractPort(p.HTTPGet.Port, container) @@ -129,7 +164,7 @@ func (kl *Kubelet) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, co } host, port, path := extractGetParams(p.HTTPGet, status, port) glog.V(4).Infof("HTTP-Probe Host: %v, Port: %v, Path: %v", host, port, path) - return kl.prober.http.Probe(host, port, path, timeout) + return pb.http.Probe(host, port, path, timeout) } if p.TCPSocket != nil { port, err := extractPort(p.TCPSocket.Port, container) @@ -137,7 +172,7 @@ func (kl *Kubelet) runProbe(p *api.Probe, pod *api.Pod, status api.PodStatus, co return probe.Unknown, err } glog.V(4).Infof("TCP-Probe PodIP: %v, Port: %v, Timeout: %v", status.PodIP, port, timeout) - return kl.prober.tcp.Probe(status.PodIP, port, timeout) + return pb.tcp.Probe(status.PodIP, port, timeout) } glog.Warningf("Failed to find probe builder for %s %+v", container.Name, container.LivenessProbe) return probe.Unknown, nil @@ -193,11 +228,9 @@ type execInContainer struct { run func() ([]byte, error) } -func (kl *Kubelet) newExecInContainer(pod *api.Pod, container api.Container) exec.Cmd { - uid := pod.UID - podFullName := kubecontainer.GetPodFullName(pod) +func (p *Prober) newExecInContainer(pod *api.Pod, container api.Container, containerID string) exec.Cmd { return execInContainer{func() ([]byte, error) { - return kl.RunInContainer(podFullName, uid, container.Name, container.LivenessProbe.Exec.Command) + return p.runner.RunInContainer(containerID, container.LivenessProbe.Exec.Command) }} } @@ -208,17 +241,3 @@ func (eic execInContainer) CombinedOutput() ([]byte, error) { func (eic execInContainer) SetDir(dir string) { //unimplemented } - -func newProbeHolder() probeHolder { - return probeHolder{ - exec: execprobe.New(), - http: httprobe.New(), - tcp: tcprobe.New(), - } -} - -type probeHolder struct { - exec execprobe.ExecProber - http httprobe.HTTPProber - tcp tcprobe.TCPProber -} diff --git a/pkg/kubelet/probe_test.go b/pkg/kubelet/probe_test.go index a8f3692f32e..b87878cb391 100644 --- a/pkg/kubelet/probe_test.go +++ b/pkg/kubelet/probe_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" "github.com/GoogleCloudPlatform/kubernetes/pkg/probe" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -146,16 +147,21 @@ func (p fakeExecProber) Probe(_ exec.Cmd) (probe.Result, error) { } func makeTestKubelet(result probe.Result, err error) *Kubelet { - return &Kubelet{ - readinessManager: kubecontainer.NewReadinessManager(), - prober: probeHolder{ - exec: fakeExecProber{ - result: result, - err: err, - }, - }, + kl := &Kubelet{ + readinessManager: kubecontainer.NewReadinessManager(), containerRefManager: kubecontainer.NewRefManager(), } + + kl.prober = &Prober{ + exec: fakeExecProber{ + result: result, + err: err, + }, + readinessManager: kl.readinessManager, + refManager: kl.containerRefManager, + recorder: &record.FakeRecorder{}, + } + return kl } // TestProbeContainer tests the functionality of probeContainer. @@ -402,7 +408,7 @@ func TestProbeContainer(t *testing.T) { } else { kl = makeTestKubelet(test.expectedResult, nil) } - result, err := kl.probeContainer(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created) + result, err := kl.prober.Probe(&api.Pod{}, api.PodStatus{}, test.testContainer, dc.ID, dc.Created) if test.expectError && err == nil { t.Error("Expected error but did no error was returned.") } diff --git a/pkg/kubelet/runonce.go b/pkg/kubelet/runonce.go index 67f53de042e..f040c5faa62 100644 --- a/pkg/kubelet/runonce.go +++ b/pkg/kubelet/runonce.go @@ -51,16 +51,15 @@ func (kl *Kubelet) RunOnce(updates <-chan PodUpdate) ([]RunPodResult, error) { } // runOnce runs a given set of pods and returns their status. -func (kl *Kubelet) runOnce(pods []api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { +func (kl *Kubelet) runOnce(pods []*api.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { kl.handleNotFittingPods(pods) ch := make(chan RunPodResult) - for i := range pods { - pod := pods[i] // Make a copy - go func() { + for _, pod := range pods { + go func(pod *api.Pod) { err := kl.runPod(pod, retryDelay) - ch <- RunPodResult{&pod, err} - }() + ch <- RunPodResult{pod, err} + }(pod) } glog.Infof("waiting for %d pods", len(pods)) @@ -84,7 +83,7 @@ func (kl *Kubelet) runOnce(pods []api.Pod, retryDelay time.Duration) (results [] } // runPod runs a single pod and wait until all containers are running. -func (kl *Kubelet) runPod(pod api.Pod, retryDelay time.Duration) error { +func (kl *Kubelet) runPod(pod *api.Pod, retryDelay time.Duration) error { delay := retryDelay retry := 0 for { @@ -104,7 +103,7 @@ func (kl *Kubelet) runPod(pod api.Pod, retryDelay time.Duration) error { glog.Infof("pod %q containers not running: syncing", pod.Name) // We don't create mirror pods in this mode; pass a dummy boolean value // to sycnPod. - if err = kl.syncPod(&pod, nil, p); err != nil { + if err = kl.syncPod(pod, nil, p); err != nil { return fmt.Errorf("error syncing pod: %v", err) } if retry >= RunOnceMaxRetries { @@ -119,7 +118,7 @@ func (kl *Kubelet) runPod(pod api.Pod, retryDelay time.Duration) error { } // isPodRunning returns true if all containers of a manifest are running. -func (kl *Kubelet) isPodRunning(pod api.Pod, runningPod container.Pod) (bool, error) { +func (kl *Kubelet) isPodRunning(pod *api.Pod, runningPod container.Pod) (bool, error) { for _, container := range pod.Spec.Containers { c := runningPod.FindContainerByName(container.Name) if c == nil { diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 75ddab52b69..264ff3b3365 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -148,7 +148,7 @@ func TestRunOnce(t *testing.T) { kb.containerManager = dockertools.NewDockerManager(kb.dockerClient, kb.recorder, dockertools.PodInfraContainerImage, 0, 0) kb.containerManager.Puller = &dockertools.FakeDockerPuller{} - pods := []api.Pod{ + pods := []*api.Pod{ { ObjectMeta: api.ObjectMeta{ UID: "12345678", diff --git a/pkg/kubelet/server.go b/pkg/kubelet/server.go index c051e9f414f..4b98f0232cb 100644 --- a/pkg/kubelet/server.go +++ b/pkg/kubelet/server.go @@ -41,6 +41,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/util/flushwriter" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" + "github.com/fsouza/go-dockerclient" "github.com/golang/glog" cadvisorApi "github.com/google/cadvisor/info/v1" "github.com/prometheus/client_golang/prometheus" @@ -100,9 +101,9 @@ func ListenAndServeKubeletReadOnlyServer(host HostInterface, address net.IP, por type HostInterface interface { GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) - GetDockerVersion() ([]uint, error) + GetDockerVersion() (docker.APIVersion, error) GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error) - GetPods() []api.Pod + GetPods() []*api.Pod GetPodByName(namespace, name string) (*api.Pod, bool) GetPodStatus(name string) (api.PodStatus, error) RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error) @@ -159,31 +160,18 @@ func (s *Server) error(w http.ResponseWriter, err error) { http.Error(w, msg, http.StatusInternalServerError) } -func isValidDockerVersion(ver []uint) (bool, string) { - minAllowedVersion := []uint{1, 15} - for i := 0; i < len(ver) && i < len(minAllowedVersion); i++ { - if ver[i] != minAllowedVersion[i] { - if ver[i] < minAllowedVersion[i] { - versions := make([]string, len(ver)) - for i, v := range ver { - versions[i] = fmt.Sprint(v) - } - return false, strings.Join(versions, ".") - } - return true, "" - } - } - return true, "" +func isValidDockerVersion(ver docker.APIVersion) bool { + minAllowedVersion, _ := docker.NewAPIVersion("1.15") + return ver.GreaterThanOrEqualTo(minAllowedVersion) } func (s *Server) dockerHealthCheck(req *http.Request) error { - versions, err := s.host.GetDockerVersion() + version, err := s.host.GetDockerVersion() if err != nil { return errors.New("unknown Docker version") } - valid, version := isValidDockerVersion(versions) - if !valid { - return fmt.Errorf("Docker version is too old (%v)", version) + if !isValidDockerVersion(version) { + return fmt.Errorf("Docker version is too old (%v)", version.String()) } return nil } @@ -279,8 +267,9 @@ func (s *Server) handleContainerLogs(w http.ResponseWriter, req *http.Request) { // handlePods returns a list of pod bound to the Kubelet and their spec func (s *Server) handlePods(w http.ResponseWriter, req *http.Request) { pods := s.host.GetPods() - podList := &api.PodList{ - Items: pods, + podList := new(api.PodList) + for _, pod := range pods { + podList.Items = append(podList.Items, *pod) } data, err := latest.Codec.Encode(podList) if err != nil { diff --git a/pkg/kubelet/server_test.go b/pkg/kubelet/server_test.go index 935cd6df408..712ffdd9b2b 100644 --- a/pkg/kubelet/server_test.go +++ b/pkg/kubelet/server_test.go @@ -35,6 +35,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" + "github.com/fsouza/go-dockerclient" cadvisorApi "github.com/google/cadvisor/info/v1" ) @@ -44,10 +45,10 @@ type fakeKubelet struct { containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) rootInfoFunc func(query *cadvisorApi.ContainerInfoRequest) (*cadvisorApi.ContainerInfo, error) machineInfoFunc func() (*cadvisorApi.MachineInfo, error) - podsFunc func() []api.Pod + podsFunc func() []*api.Pod logFunc func(w http.ResponseWriter, req *http.Request) runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) - dockerVersionFunc func() ([]uint, error) + dockerVersionFunc func() (docker.APIVersion, error) execFunc func(pod string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool) error portForwardFunc func(name string, uid types.UID, port uint16, stream io.ReadWriteCloser) error containerLogsFunc func(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error @@ -71,7 +72,7 @@ func (fk *fakeKubelet) GetRootInfo(req *cadvisorApi.ContainerInfoRequest) (*cadv return fk.rootInfoFunc(req) } -func (fk *fakeKubelet) GetDockerVersion() ([]uint, error) { +func (fk *fakeKubelet) GetDockerVersion() (docker.APIVersion, error) { return fk.dockerVersionFunc() } @@ -79,7 +80,7 @@ func (fk *fakeKubelet) GetCachedMachineInfo() (*cadvisorApi.MachineInfo, error) return fk.machineInfoFunc() } -func (fk *fakeKubelet) GetPods() []api.Pod { +func (fk *fakeKubelet) GetPods() []*api.Pod { return fk.podsFunc() } @@ -449,8 +450,8 @@ func TestPodsInfo(t *testing.T) { func TestHealthCheck(t *testing.T) { fw := newServerTest() - fw.fakeKubelet.dockerVersionFunc = func() ([]uint, error) { - return []uint{1, 15}, nil + fw.fakeKubelet.dockerVersionFunc = func() (docker.APIVersion, error) { + return docker.NewAPIVersion("1.15") } fw.fakeKubelet.hostnameFunc = func() string { return "127.0.0.1" @@ -489,8 +490,8 @@ func TestHealthCheck(t *testing.T) { } //Test with old docker version - fw.fakeKubelet.dockerVersionFunc = func() ([]uint, error) { - return []uint{1, 1}, nil + fw.fakeKubelet.dockerVersionFunc = func() (docker.APIVersion, error) { + return docker.NewAPIVersion("1.1") } resp, err = http.Get(fw.testHTTPServer.URL + "/healthz") diff --git a/pkg/kubelet/types.go b/pkg/kubelet/types.go index 5d3aa9cc25f..cb0fdba2b69 100644 --- a/pkg/kubelet/types.go +++ b/pkg/kubelet/types.go @@ -64,7 +64,7 @@ const ( // functionally similar, this helps our unit tests properly check that the correct PodUpdates // are generated. type PodUpdate struct { - Pods []api.Pod + Pods []*api.Pod Op PodOperation Source string } diff --git a/pkg/master/master.go b/pkg/master/master.go index a310bd17668..0b4c582f978 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -43,6 +43,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports" + "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/componentstatus" controlleretcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/endpoint" endpointsetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/endpoint/etcd" @@ -416,6 +417,8 @@ func (m *Master) init(c *Config) { "persistentVolumes/status": persistentVolumeStatusStorage, "persistentVolumeClaims": persistentVolumeClaimStorage, "persistentVolumeClaims/status": persistentVolumeClaimStatusStorage, + + "componentStatuses": componentstatus.NewStorage(func() map[string]apiserver.Server { return m.getServersToValidate(c) }), } apiVersions := []string{"v1beta1", "v1beta2"} diff --git a/pkg/namespace/namespace_controller.go b/pkg/namespace/namespace_controller.go index cb3bff21b39..cf8412d5649 100644 --- a/pkg/namespace/namespace_controller.go +++ b/pkg/namespace/namespace_controller.go @@ -62,14 +62,24 @@ func NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration ) return &NamespaceManager{ - controller: controller, - StopEverything: make(chan struct{}), + controller: controller, } } // Run begins observing the system. It starts a goroutine and returns immediately. func (nm *NamespaceManager) Run() { - go nm.controller.Run(nm.StopEverything) + if nm.StopEverything == nil { + nm.StopEverything = make(chan struct{}) + go nm.controller.Run(nm.StopEverything) + } +} + +// Stop gracefully shutsdown this controller +func (nm *NamespaceManager) Stop() { + if nm.StopEverything != nil { + close(nm.StopEverything) + nm.StopEverything = nil + } } // finalized returns true if the spec.finalizers is empty list diff --git a/pkg/namespace/namespace_controller_test.go b/pkg/namespace/namespace_controller_test.go index cb133309701..53fb4daa02b 100644 --- a/pkg/namespace/namespace_controller_test.go +++ b/pkg/namespace/namespace_controller_test.go @@ -18,8 +18,10 @@ package namespace import ( "testing" + "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) @@ -131,3 +133,25 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { t.Errorf("Expected no action from controller, but got: %v", actionSet) } } + +func TestRunStop(t *testing.T) { + o := testclient.NewObjects(api.Scheme) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + nsMgr := NewNamespaceManager(client, 1*time.Second) + + if nsMgr.StopEverything != nil { + t.Errorf("Non-running manager should not have a stop channel. Got %v", nsMgr.StopEverything) + } + + nsMgr.Run() + + if nsMgr.StopEverything == nil { + t.Errorf("Running manager should have a stop channel. Got nil") + } + + nsMgr.Stop() + + if nsMgr.StopEverything != nil { + t.Errorf("Non-running manager should not have a stop channel. Got %v", nsMgr.StopEverything) + } +} diff --git a/www/partials/groups.html b/pkg/registry/componentstatus/doc.go similarity index 54% rename from www/partials/groups.html rename to pkg/registry/componentstatus/doc.go index 8b86e978ea1..ac9f8457305 100644 --- a/www/partials/groups.html +++ b/pkg/registry/componentstatus/doc.go @@ -1,5 +1,5 @@ - -
-
{{selector}}
-
X
-
-
+*/ + +// Package componentstatus provides interfaces and implementation for retrieving cluster +// component status. +package componentstatus diff --git a/pkg/registry/componentstatus/rest.go b/pkg/registry/componentstatus/rest.go new file mode 100644 index 00000000000..79c988fad67 --- /dev/null +++ b/pkg/registry/componentstatus/rest.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentstatus + +import ( + "fmt" + "net/http" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" + "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" + "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" + "github.com/GoogleCloudPlatform/kubernetes/pkg/probe" + "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" +) + +type REST struct { + GetServersToValidate func() map[string]apiserver.Server + rt http.RoundTripper +} + +// NewStorage returns a new REST. +func NewStorage(serverRetriever func() map[string]apiserver.Server) *REST { + return &REST{ + GetServersToValidate: serverRetriever, + rt: http.DefaultTransport, + } +} + +func (rs *REST) New() runtime.Object { + return &api.ComponentStatus{} +} + +func (rs *REST) NewList() runtime.Object { + return &api.ComponentStatusList{} +} + +// Returns the list of component status. Note that the label and field are both ignored. +// Note that this call doesn't support labels or selectors. +func (rs *REST) List(ctx api.Context, label labels.Selector, field fields.Selector) (runtime.Object, error) { + servers := rs.GetServersToValidate() + + // TODO: This should be parallelized. + reply := []api.ComponentStatus{} + for name, server := range servers { + status := rs.getComponentStatus(name, server) + reply = append(reply, *status) + } + return &api.ComponentStatusList{Items: reply}, nil +} + +func (rs *REST) Get(ctx api.Context, name string) (runtime.Object, error) { + servers := rs.GetServersToValidate() + + if server, ok := servers[name]; !ok { + return nil, fmt.Errorf("Component not found: %s", name) + } else { + return rs.getComponentStatus(name, server), nil + } +} + +func ToConditionStatus(s probe.Result) api.ConditionStatus { + switch s { + case probe.Success: + return api.ConditionTrue + case probe.Failure: + return api.ConditionFalse + default: + return api.ConditionUnknown + } +} + +func (rs *REST) getComponentStatus(name string, server apiserver.Server) *api.ComponentStatus { + transport := rs.rt + status, msg, err := server.DoServerCheck(transport) + var errorMsg string + if err != nil { + errorMsg = err.Error() + } else { + errorMsg = "nil" + } + + c := &api.ComponentCondition{ + Type: api.ComponentHealthy, + Status: ToConditionStatus(status), + Message: msg, + Error: errorMsg, + } + + retVal := &api.ComponentStatus{ + Conditions: []api.ComponentCondition{*c}, + } + retVal.Name = name + + return retVal +} diff --git a/pkg/registry/componentstatus/rest_test.go b/pkg/registry/componentstatus/rest_test.go new file mode 100644 index 00000000000..e618b805f45 --- /dev/null +++ b/pkg/registry/componentstatus/rest_test.go @@ -0,0 +1,144 @@ +/* +Copyright 2015 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package componentstatus + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" + "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" + "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" +) + +type fakeRoundTripper struct { + err error + resp *http.Response + url string +} + +func (f *fakeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + f.url = req.URL.String() + return f.resp, f.err +} + +type testResponse struct { + code int + data string + err error +} + +func NewTestREST(resp testResponse) *REST { + return &REST{ + GetServersToValidate: func() map[string]apiserver.Server { + return map[string]apiserver.Server{ + "test1": {Addr: "testserver1", Port: 8000, Path: "/healthz"}, + } + }, + rt: &fakeRoundTripper{ + err: resp.err, + resp: &http.Response{ + Body: ioutil.NopCloser(bytes.NewBufferString(resp.data)), + StatusCode: resp.code, + }, + }, + } +} + +func createTestStatus(name string, status api.ConditionStatus, msg string, err string) *api.ComponentStatus { + retVal := &api.ComponentStatus{ + Conditions: []api.ComponentCondition{ + {Type: api.ComponentHealthy, Status: status, Message: msg, Error: err}, + }, + } + retVal.Name = name + return retVal +} + +func TestList_NoError(t *testing.T) { + r := NewTestREST(testResponse{code: 200, data: "ok"}) + got, err := r.List(api.NewContext(), labels.Everything(), fields.Everything()) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + expect := &api.ComponentStatusList{ + Items: []api.ComponentStatus{*(createTestStatus("test1", api.ConditionTrue, "ok", "nil"))}, + } + if e, a := expect, got; !reflect.DeepEqual(e, a) { + t.Errorf("Got unexpected object. Diff: %s", util.ObjectDiff(e, a)) + } +} + +func TestList_FailedCheck(t *testing.T) { + r := NewTestREST(testResponse{code: 500, data: ""}) + got, err := r.List(api.NewContext(), labels.Everything(), fields.Everything()) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + expect := &api.ComponentStatusList{ + Items: []api.ComponentStatus{ + *(createTestStatus("test1", api.ConditionFalse, "", "unhealthy http status code: 500 ()"))}, + } + if e, a := expect, got; !reflect.DeepEqual(e, a) { + t.Errorf("Got unexpected object. Diff: %s", util.ObjectDiff(e, a)) + } +} + +func TestList_UnknownError(t *testing.T) { + r := NewTestREST(testResponse{code: 500, data: "", err: fmt.Errorf("fizzbuzz error")}) + got, err := r.List(api.NewContext(), labels.Everything(), fields.Everything()) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + expect := &api.ComponentStatusList{ + Items: []api.ComponentStatus{ + *(createTestStatus("test1", api.ConditionUnknown, "", "Get http://testserver1:8000/healthz: fizzbuzz error"))}, + } + if e, a := expect, got; !reflect.DeepEqual(e, a) { + t.Errorf("Got unexpected object. Diff: %s", util.ObjectDiff(e, a)) + } +} + +func TestGet_NoError(t *testing.T) { + r := NewTestREST(testResponse{code: 200, data: "ok"}) + got, err := r.Get(api.NewContext(), "test1") + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + expect := createTestStatus("test1", api.ConditionTrue, "ok", "nil") + if e, a := expect, got; !reflect.DeepEqual(e, a) { + t.Errorf("Got unexpected object. Diff: %s", util.ObjectDiff(e, a)) + } +} + +func TestGet_BadName(t *testing.T) { + r := NewTestREST(testResponse{code: 200, data: "ok"}) + _, err := r.Get(api.NewContext(), "invalidname") + if err == nil { + t.Fatalf("Expected error, but did not get one") + } + if !strings.Contains(err.Error(), "Component not found: invalidname") { + t.Fatalf("Got unexpected error: %v", err) + } +} diff --git a/pkg/registry/etcd/etcd.go b/pkg/registry/etcd/etcd.go index 08f0f169f5c..5ae6da989f1 100644 --- a/pkg/registry/etcd/etcd.go +++ b/pkg/registry/etcd/etcd.go @@ -271,7 +271,8 @@ func (r *Registry) WatchServices(ctx api.Context, label labels.Selector, field f if err != nil { return nil, err } - return r.Watch(key, version), nil + // TODO: use generic.SelectionPredicate + return r.Watch(key, version, tools.Everything) } if field.Empty() { return r.WatchList(makeServiceListKey(ctx), version, tools.Everything) diff --git a/pkg/registry/generic/etcd/etcd.go b/pkg/registry/generic/etcd/etcd.go index aa98f392fe4..5bb51f54917 100644 --- a/pkg/registry/generic/etcd/etcd.go +++ b/pkg/registry/generic/etcd/etcd.go @@ -429,18 +429,7 @@ func (e *Etcd) WatchPredicate(ctx api.Context, m generic.Matcher, resourceVersio return nil, err } - var watchKey string - if name, ok := m.MatchesSingle(); ok { - key, err := e.KeyFunc(ctx, name) - if err != nil { - return nil, err - } - watchKey = key - } else { - watchKey = e.KeyRootFunc(ctx) - } - - return e.Helper.WatchList(watchKey, version, func(obj runtime.Object) bool { + filterFunc := func(obj runtime.Object) bool { matches, err := m.Matches(obj) if err != nil { glog.Errorf("unable to match watch: %v", err) @@ -453,5 +442,15 @@ func (e *Etcd) WatchPredicate(ctx api.Context, m generic.Matcher, resourceVersio } } return matches - }) + } + + if name, ok := m.MatchesSingle(); ok { + key, err := e.KeyFunc(ctx, name) + if err != nil { + return nil, err + } + return e.Helper.Watch(key, version, filterFunc) + } + + return e.Helper.WatchList(e.KeyRootFunc(ctx), version, filterFunc) } diff --git a/pkg/registry/generic/etcd/etcd_test.go b/pkg/registry/generic/etcd/etcd_test.go index 1e586e55637..2725cddd59c 100644 --- a/pkg/registry/generic/etcd/etcd_test.go +++ b/pkg/registry/generic/etcd/etcd_test.go @@ -690,11 +690,16 @@ func TestEtcdWatch(t *testing.T) { for name, m := range table { podA := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, - Spec: api.PodSpec{Host: "machine"}, + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: api.NamespaceDefault, + ResourceVersion: "1", + }, + Spec: api.PodSpec{Host: "machine"}, } respWithPodA := &etcd.Response{ Node: &etcd.Node{ + Key: "/registry/pods/default/foo", Value: runtime.EncodeOrDie(testapi.Codec(), podA), ModifiedIndex: 1, CreatedIndex: 1, diff --git a/pkg/registry/minion/rest.go b/pkg/registry/minion/rest.go index 1c4c013e484..be64f5d7074 100644 --- a/pkg/registry/minion/rest.go +++ b/pkg/registry/minion/rest.go @@ -24,12 +24,14 @@ import ( "strconv" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors" ) @@ -129,12 +131,21 @@ func MatchNode(label labels.Selector, field fields.Selector) generic.Matcher { // ResourceLocation returns a URL to which one can send traffic for the specified node. func ResourceLocation(getter ResourceGetter, connection client.ConnectionInfoGetter, ctx api.Context, id string) (*url.URL, http.RoundTripper, error) { - nodeObj, err := getter.Get(ctx, id) + name, portReq, valid := util.SplitPort(id) + if !valid { + return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid node request %q", id)) + } + + nodeObj, err := getter.Get(ctx, name) if err != nil { return nil, nil, err } node := nodeObj.(*api.Node) - host := node.Name + host := node.Name // TODO: use node's IP, don't expect the name to resolve. + + if portReq != "" { + return &url.URL{Host: net.JoinHostPort(host, portReq)}, nil, nil + } scheme, port, transport, err := connection.GetConnectionInfo(host) if err != nil { diff --git a/pkg/registry/pod/rest.go b/pkg/registry/pod/rest.go index 5f81e9c8436..33738073cd8 100644 --- a/pkg/registry/pod/rest.go +++ b/pkg/registry/pod/rest.go @@ -21,7 +21,6 @@ import ( "net" "net/http" "net/url" - "strings" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" @@ -31,6 +30,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors" ) @@ -150,16 +150,11 @@ func getPod(getter ResourceGetter, ctx api.Context, name string) (*api.Pod, erro func ResourceLocation(getter ResourceGetter, ctx api.Context, id string) (*url.URL, http.RoundTripper, error) { // Allow ID as "podname" or "podname:port". If port is not specified, // try to use the first defined port on the pod. - parts := strings.Split(id, ":") - if len(parts) > 2 { + name, port, valid := util.SplitPort(id) + if !valid { return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid pod request %q", id)) } - name := parts[0] - port := "" - if len(parts) == 2 { - // TODO: if port is not a number but a "(container)/(portname)", do a name lookup. - port = parts[1] - } + // TODO: if port is not a number but a "(container)/(portname)", do a name lookup. pod, err := getPod(getter, ctx, name) if err != nil { @@ -205,7 +200,7 @@ func LogLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ct return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s", name)) } } - nodeHost := pod.Status.HostIP + nodeHost := pod.Spec.Host if len(nodeHost) == 0 { // If pod has not been assigned a host, return an empty location return nil, nil, nil @@ -245,7 +240,7 @@ func ExecLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, c return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s", name)) } } - nodeHost := pod.Status.HostIP + nodeHost := pod.Spec.Host if len(nodeHost) == 0 { // If pod has not been assigned a host, return an empty location return nil, nil, fmt.Errorf("pod %s does not have a host assigned", name) @@ -285,7 +280,7 @@ func PortForwardLocation(getter ResourceGetter, connInfo client.ConnectionInfoGe return nil, nil, err } - nodeHost := pod.Status.HostIP + nodeHost := pod.Spec.Host if len(nodeHost) == 0 { // If pod has not been assigned a host, return an empty location return nil, nil, errors.NewBadRequest(fmt.Sprintf("pod %s does not have a host assigned", name)) diff --git a/pkg/registry/registrytest/scheduler.go b/pkg/registry/registrytest/scheduler.go index 2e44ccbaa32..6a463775320 100644 --- a/pkg/registry/registrytest/scheduler.go +++ b/pkg/registry/registrytest/scheduler.go @@ -23,11 +23,11 @@ import ( type Scheduler struct { Err error - Pod api.Pod + Pod *api.Pod Machine string } -func (s *Scheduler) Schedule(pod api.Pod, lister scheduler.MinionLister) (string, error) { +func (s *Scheduler) Schedule(pod *api.Pod, lister scheduler.MinionLister) (string, error) { s.Pod = pod return s.Machine, s.Err } diff --git a/pkg/registry/service/rest.go b/pkg/registry/service/rest.go index fb5b1195ae8..30e4d4ba600 100644 --- a/pkg/registry/service/rest.go +++ b/pkg/registry/service/rest.go @@ -23,7 +23,6 @@ import ( "net/http" "net/url" "strconv" - "strings" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" @@ -34,6 +33,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/endpoint" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" "github.com/golang/glog" @@ -206,15 +206,10 @@ var _ = rest.Redirector(&REST{}) // ResourceLocation returns a URL to which one can send traffic for the specified service. func (rs *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.RoundTripper, error) { // Allow ID as "svcname" or "svcname:port". - parts := strings.Split(id, ":") - if len(parts) > 2 { + svcName, portStr, valid := util.SplitPort(id) + if !valid { return nil, nil, errors.NewBadRequest(fmt.Sprintf("invalid service request %q", id)) } - svcName := parts[0] - portStr := "" - if len(parts) == 2 { - portStr = parts[1] - } eps, err := rs.endpoints.GetEndpoints(ctx, svcName) if err != nil { diff --git a/pkg/resourcequota/resource_quota_controller.go b/pkg/resourcequota/resource_quota_controller.go index 93006679b88..8998773bb15 100644 --- a/pkg/resourcequota/resource_quota_controller.go +++ b/pkg/resourcequota/resource_quota_controller.go @@ -23,6 +23,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" @@ -81,9 +82,10 @@ func (rm *ResourceQuotaManager) synchronize() { // pods that have a restart policy of always are always returned // pods that are in a failed state, but have a restart policy of on failure are always returned // pods that are not in a success state or a failure state are included in quota -func FilterQuotaPods(pods []api.Pod) []api.Pod { - var result []api.Pod - for _, value := range pods { +func FilterQuotaPods(pods []api.Pod) []*api.Pod { + var result []*api.Pod + for i := range pods { + value := &pods[i] // a pod that has a restart policy always no matter its state counts against usage if value.Spec.RestartPolicy == api.RestartPolicyAlways { result = append(result, value) @@ -108,10 +110,13 @@ func FilterQuotaPods(pods []api.Pod) []api.Pod { // syncResourceQuota runs a complete sync of current status func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err error) { + // quota is dirty if any part of spec hard limits differs from the status hard limits + dirty := !api.Semantic.DeepEqual(quota.Spec.Hard, quota.Status.Hard) + // dirty tracks if the usage status differs from the previous sync, // if so, we send a new usage with latest status // if this is our first sync, it will be dirty by default, since we need track usage - dirty := quota.Status.Hard == nil || quota.Status.Used == nil + dirty = dirty || (quota.Status.Hard == nil || quota.Status.Used == nil) // Create a usage object that is based on the quota resource version usage := api.ResourceQuota{ @@ -167,14 +172,14 @@ func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err value = resource.NewQuantity(int64(len(filteredPods)), resource.DecimalSI) case api.ResourceMemory: val := int64(0) - for i := range filteredPods { - val = val + PodMemory(&filteredPods[i]).Value() + for _, pod := range filteredPods { + val = val + PodMemory(pod).Value() } value = resource.NewQuantity(int64(val), resource.DecimalSI) case api.ResourceCPU: val := int64(0) - for i := range filteredPods { - val = val + PodCPU(&filteredPods[i]).MilliValue() + for _, pod := range filteredPods { + val = val + PodCPU(pod).MilliValue() } value = resource.NewMilliQuantity(int64(val), resource.DecimalSI) case api.ResourceServices: @@ -195,6 +200,18 @@ func (rm *ResourceQuotaManager) syncResourceQuota(quota api.ResourceQuota) (err return err } value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) + case api.ResourceSecrets: + items, err := rm.kubeClient.Secrets(usage.Namespace).List(labels.Everything(), fields.Everything()) + if err != nil { + return err + } + value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) + case api.ResourcePersistentVolumeClaims: + items, err := rm.kubeClient.PersistentVolumeClaims(usage.Namespace).List(labels.Everything(), fields.Everything()) + if err != nil { + return err + } + value = resource.NewQuantity(int64(len(items.Items)), resource.DecimalSI) } // ignore fields we do not understand (assume another controller is tracking it) diff --git a/pkg/resourcequota/resource_quota_controller_test.go b/pkg/resourcequota/resource_quota_controller_test.go index 45813dcaa32..8d03ab44c34 100644 --- a/pkg/resourcequota/resource_quota_controller_test.go +++ b/pkg/resourcequota/resource_quota_controller_test.go @@ -179,3 +179,91 @@ func TestSyncResourceQuota(t *testing.T) { } } + +func TestSyncResourceQuotaSpecChange(t *testing.T) { + quota := api.ResourceQuota{ + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + }, + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("3"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + expectedUsage := api.ResourceQuota{ + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + kubeClient := testclient.NewSimpleFake("a) + + resourceQuotaManager := NewResourceQuotaManager(kubeClient) + err := resourceQuotaManager.syncResourceQuota(quota) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + usage := kubeClient.Actions[1].Value.(*api.ResourceQuota) + + // ensure hard and used limits are what we expected + for k, v := range expectedUsage.Status.Hard { + actual := usage.Status.Hard[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } + for k, v := range expectedUsage.Status.Used { + actual := usage.Status.Used[k] + actualValue := actual.String() + expectedValue := v.String() + if expectedValue != actualValue { + t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue) + } + } + +} + +func TestSyncResourceQuotaNoChange(t *testing.T) { + quota := api.ResourceQuota{ + Spec: api.ResourceQuotaSpec{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + }, + Status: api.ResourceQuotaStatus{ + Hard: api.ResourceList{ + api.ResourceCPU: resource.MustParse("4"), + }, + Used: api.ResourceList{ + api.ResourceCPU: resource.MustParse("0"), + }, + }, + } + + kubeClient := testclient.NewSimpleFake(&api.PodList{}, "a) + + resourceQuotaManager := NewResourceQuotaManager(kubeClient) + err := resourceQuotaManager.syncResourceQuota(quota) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + + if len(kubeClient.Actions) != 1 && kubeClient.Actions[0].Action != "list-pods" { + t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions) + } +} diff --git a/pkg/runtime/helper.go b/pkg/runtime/helper.go index 3d4df08091e..beff579144a 100644 --- a/pkg/runtime/helper.go +++ b/pkg/runtime/helper.go @@ -149,3 +149,25 @@ func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { } return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) } + +// MultiObjectTyper returns the types of objects across multiple schemes in order. +type MultiObjectTyper []ObjectTyper + +func (m MultiObjectTyper) DataVersionAndKind(data []byte) (version, kind string, err error) { + for _, t := range m { + version, kind, err = t.DataVersionAndKind(data) + if err == nil { + return + } + } + return +} +func (m MultiObjectTyper) ObjectVersionAndKind(obj Object) (version, kind string, err error) { + for _, t := range m { + version, kind, err = t.ObjectVersionAndKind(obj) + if err == nil { + return + } + } + return +} diff --git a/pkg/runtime/scheme.go b/pkg/runtime/scheme.go index 0e0ed67070b..b8047c7dc3c 100644 --- a/pkg/runtime/scheme.go +++ b/pkg/runtime/scheme.go @@ -18,9 +18,10 @@ package runtime import ( "fmt" - "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" "net/url" "reflect" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" ) // Scheme defines methods for serializing and deserializing API objects. It @@ -147,8 +148,9 @@ func (self *Scheme) rawExtensionToEmbeddedObject(in *RawExtension, out *Embedded } // runtimeObjectToRawExtensionArray takes a list of objects and encodes them as RawExtension in the output version -// defined by the conversion.Scope. If objects must be encoded to different schema versions you should set them as -// runtime.Unknown in the internal version instead. +// defined by the conversion.Scope. If objects must be encoded to different schema versions than the default, you +// should encode them yourself with runtime.Unknown, or convert the object prior to invoking conversion. Objects +// outside of the current scheme must be added as runtime.Unknown. func (self *Scheme) runtimeObjectToRawExtensionArray(in *[]Object, out *[]RawExtension, s conversion.Scope) error { src := *in dest := make([]RawExtension, len(src)) @@ -160,7 +162,12 @@ func (self *Scheme) runtimeObjectToRawExtensionArray(in *[]Object, out *[]RawExt case *Unknown: dest[i].RawJSON = t.RawJSON default: - data, err := scheme.EncodeToVersion(src[i], outVersion) + version := outVersion + // if the object exists + if inVersion, _, err := scheme.ObjectVersionAndKind(src[i]); err == nil && len(inVersion) != 0 { + version = inVersion + } + data, err := scheme.EncodeToVersion(src[i], version) if err != nil { return err } diff --git a/pkg/scheduler/generic_scheduler.go b/pkg/scheduler/generic_scheduler.go index 7a2542daa0a..3d55baa5e83 100644 --- a/pkg/scheduler/generic_scheduler.go +++ b/pkg/scheduler/generic_scheduler.go @@ -30,7 +30,7 @@ import ( type FailedPredicateMap map[string]util.StringSet type FitError struct { - Pod api.Pod + Pod *api.Pod FailedPredicates FailedPredicateMap } @@ -51,7 +51,7 @@ type genericScheduler struct { randomLock sync.Mutex } -func (g *genericScheduler) Schedule(pod api.Pod, minionLister MinionLister) (string, error) { +func (g *genericScheduler) Schedule(pod *api.Pod, minionLister MinionLister) (string, error) { minions, err := minionLister.List() if err != nil { return "", err @@ -97,7 +97,7 @@ func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, er // Filters the minions to find the ones that fit based on the given predicate functions // Each minion is passed through the predicate functions to determine if it is a fit -func findNodesThatFit(pod api.Pod, podLister PodLister, predicates map[string]FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) { +func findNodesThatFit(pod *api.Pod, podLister PodLister, predicates map[string]FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) { filtered := []api.Node{} machineToPods, err := MapPodsToMachines(podLister) failedPredicateMap := FailedPredicateMap{} @@ -133,7 +133,7 @@ func findNodesThatFit(pod api.Pod, podLister PodLister, predicates map[string]Fi // Each priority function can also have its own weight // The minion scores returned by the priority function are multiplied by the weights to get weighted scores // All scores are finally combined (added) to get the total weighted scores of all minions -func prioritizeNodes(pod api.Pod, podLister PodLister, priorityConfigs []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) { +func prioritizeNodes(pod *api.Pod, podLister PodLister, priorityConfigs []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) { result := HostPriorityList{} // If no priority configs are provided, then the EqualPriority function is applied @@ -177,7 +177,7 @@ func getBestHosts(list HostPriorityList) []string { } // EqualPriority is a prioritizer function that gives an equal weight of one to all nodes -func EqualPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { +func EqualPriority(_ *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { nodes, err := minionLister.List() if err != nil { fmt.Errorf("failed to list nodes: %v", err) diff --git a/pkg/scheduler/generic_scheduler_test.go b/pkg/scheduler/generic_scheduler_test.go index b9cfd2f41c6..8b507bccb83 100644 --- a/pkg/scheduler/generic_scheduler_test.go +++ b/pkg/scheduler/generic_scheduler_test.go @@ -27,19 +27,19 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) -func falsePredicate(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func falsePredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { return false, nil } -func truePredicate(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func truePredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { return true, nil } -func matchesPredicate(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func matchesPredicate(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { return pod.Name == node, nil } -func numericPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { +func numericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { nodes, err := minionLister.List() result := []HostPriority{} @@ -60,7 +60,7 @@ func numericPriority(pod api.Pod, podLister PodLister, minionLister MinionLister return result, nil } -func reverseNumericPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { +func reverseNumericPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { var maxScore float64 minScore := math.MaxFloat64 reverseResult := []HostPriority{} @@ -165,7 +165,7 @@ func TestGenericScheduler(t *testing.T) { predicates map[string]FitPredicate prioritizers []PriorityConfig nodes []string - pod api.Pod + pod *api.Pod expectedHost string expectsErr bool }{ @@ -189,7 +189,7 @@ func TestGenericScheduler(t *testing.T) { predicates: map[string]FitPredicate{"matches": matchesPredicate}, prioritizers: []PriorityConfig{{Function: EqualPriority, Weight: 1}}, nodes: []string{"machine1", "machine2"}, - pod: api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}}, expectedHost: "machine2", name: "test 3", }, @@ -204,7 +204,7 @@ func TestGenericScheduler(t *testing.T) { predicates: map[string]FitPredicate{"matches": matchesPredicate}, prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"3", "2", "1"}, - pod: api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, expectedHost: "2", name: "test 5", }, @@ -212,7 +212,7 @@ func TestGenericScheduler(t *testing.T) { predicates: map[string]FitPredicate{"true": truePredicate}, prioritizers: []PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}}, nodes: []string{"3", "2", "1"}, - pod: api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, expectedHost: "1", name: "test 6", }, @@ -227,7 +227,7 @@ func TestGenericScheduler(t *testing.T) { for _, test := range tests { random := rand.New(rand.NewSource(0)) - scheduler := NewGenericScheduler(test.predicates, test.prioritizers, FakePodLister([]api.Pod{}), random) + scheduler := NewGenericScheduler(test.predicates, test.prioritizers, FakePodLister([]*api.Pod{}), random) machine, err := scheduler.Schedule(test.pod, FakeMinionLister(makeNodeList(test.nodes))) if test.expectsErr { if err == nil { @@ -247,7 +247,7 @@ func TestGenericScheduler(t *testing.T) { func TestFindFitAllError(t *testing.T) { nodes := []string{"3", "2", "1"} predicates := map[string]FitPredicate{"true": truePredicate, "false": falsePredicate} - _, predicateMap, err := findNodesThatFit(api.Pod{}, FakePodLister([]api.Pod{}), predicates, makeNodeList(nodes)) + _, predicateMap, err := findNodesThatFit(&api.Pod{}, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes)) if err != nil { t.Errorf("unexpected error: %v", err) @@ -271,8 +271,8 @@ func TestFindFitAllError(t *testing.T) { func TestFindFitSomeError(t *testing.T) { nodes := []string{"3", "2", "1"} predicates := map[string]FitPredicate{"true": truePredicate, "match": matchesPredicate} - pod := api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}} - _, predicateMap, err := findNodesThatFit(pod, FakePodLister([]api.Pod{}), predicates, makeNodeList(nodes)) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}} + _, predicateMap, err := findNodesThatFit(pod, FakePodLister([]*api.Pod{}), predicates, makeNodeList(nodes)) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/listers.go b/pkg/scheduler/listers.go index 10d887c9417..470e3aeebb5 100644 --- a/pkg/scheduler/listers.go +++ b/pkg/scheduler/listers.go @@ -39,14 +39,14 @@ func (f FakeMinionLister) List() (api.NodeList, error) { // PodLister interface represents anything that can list pods for a scheduler. type PodLister interface { // TODO: make this exactly the same as client's Pods(ns).List() method, by returning a api.PodList - List(labels.Selector) ([]api.Pod, error) + List(labels.Selector) ([]*api.Pod, error) } // FakePodLister implements PodLister on an []api.Pods for test purposes. -type FakePodLister []api.Pod +type FakePodLister []*api.Pod -// List returns []api.Pod matching a query. -func (f FakePodLister) List(s labels.Selector) (selected []api.Pod, err error) { +// List returns []*api.Pod matching a query. +func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) { for _, pod := range f { if s.Matches(labels.Set(pod.Labels)) { selected = append(selected, pod) @@ -60,7 +60,7 @@ type ServiceLister interface { // Lists all the services List() (api.ServiceList, error) // Gets the services for the given pod - GetPodServices(api.Pod) ([]api.Service, error) + GetPodServices(*api.Pod) ([]api.Service, error) } // FakeServiceLister implements ServiceLister on []api.Service for test purposes. @@ -72,7 +72,7 @@ func (f FakeServiceLister) List() (api.ServiceList, error) { } // GetPodServices gets the services that have the selector that match the labels on the given pod -func (f FakeServiceLister) GetPodServices(pod api.Pod) (services []api.Service, err error) { +func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, err error) { var selector labels.Selector for _, service := range f { diff --git a/pkg/scheduler/predicates.go b/pkg/scheduler/predicates.go index b8a8e043b3a..c20f3c6969b 100644 --- a/pkg/scheduler/predicates.go +++ b/pkg/scheduler/predicates.go @@ -80,11 +80,11 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { // are exclusive so if there is already a volume mounted on that node, another pod can't schedule // there. This is GCE specific for now. // TODO: migrate this into some per-volume specific code? -func NoDiskConflict(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func NoDiskConflict(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { manifest := &(pod.Spec) for ix := range manifest.Volumes { for podIx := range existingPods { - if isVolumeConflict(manifest.Volumes[ix], &existingPods[podIx]) { + if isVolumeConflict(manifest.Volumes[ix], existingPods[podIx]) { return false, nil } } @@ -111,31 +111,31 @@ func getResourceRequest(pod *api.Pod) resourceRequest { return result } -func CheckPodsExceedingCapacity(pods []api.Pod, capacity api.ResourceList) (fitting []api.Pod, notFitting []api.Pod) { +func CheckPodsExceedingCapacity(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFitting []*api.Pod) { totalMilliCPU := capacity.Cpu().MilliValue() totalMemory := capacity.Memory().Value() milliCPURequested := int64(0) memoryRequested := int64(0) - for ix := range pods { - podRequest := getResourceRequest(&pods[ix]) + for _, pod := range pods { + podRequest := getResourceRequest(pod) fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory if !fitsCPU || !fitsMemory { // the pod doesn't fit - notFitting = append(notFitting, pods[ix]) + notFitting = append(notFitting, pod) continue } // the pod fits milliCPURequested += podRequest.milliCPU memoryRequested += podRequest.memory - fitting = append(fitting, pods[ix]) + fitting = append(fitting, pod) } return } // PodFitsResources calculates fit based on requested, rather than used resources -func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { - podRequest := getResourceRequest(&pod) +func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { + podRequest := getResourceRequest(pod) if podRequest.milliCPU == 0 && podRequest.memory == 0 { // no resources requested always fits. return true, nil @@ -144,7 +144,7 @@ func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node if err != nil { return false, err } - pods := []api.Pod{} + pods := []*api.Pod{} copy(pods, existingPods) pods = append(existingPods, pod) _, exceeding := CheckPodsExceedingCapacity(pods, info.Status.Capacity) @@ -180,15 +180,15 @@ type NodeSelector struct { info NodeInfo } -func (n *NodeSelector) PodSelectorMatches(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { minion, err := n.info.GetNodeInfo(node) if err != nil { return false, err } - return PodMatchesNodeLabels(&pod, minion), nil + return PodMatchesNodeLabels(pod, minion), nil } -func PodFitsHost(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func PodFitsHost(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { if len(pod.Spec.Host) == 0 { return true, nil } @@ -222,7 +222,7 @@ func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) FitPre // Alternately, eliminating minions that have a certain label, regardless of value, is also useful // A minion may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this minion -func (n *NodeLabelChecker) CheckNodeLabelPresence(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { var exists bool minion, err := n.info.GetNodeInfo(node) if err != nil { @@ -264,7 +264,7 @@ func NewServiceAffinityPredicate(podLister PodLister, serviceLister ServiceListe // - L is listed in the ServiceAffinity object that is passed into the function // - the pod does not have any NodeSelector for L // - some other pod from the same service is already scheduled onto a minion that has value V for label L -func (s *ServiceAffinity) CheckServiceAffinity(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { var affinitySelector labels.Selector // check if the pod being scheduled has the affinity labels specified in its NodeSelector @@ -292,7 +292,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod api.Pod, existingPods []api.P return false, err } // consider only the pods that belong to the same namespace - nsServicePods := []api.Pod{} + nsServicePods := []*api.Pod{} for _, nsPod := range servicePods { if nsPod.Namespace == pod.Namespace { nsServicePods = append(nsServicePods, nsPod) @@ -333,7 +333,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod api.Pod, existingPods []api.P return affinitySelector.Matches(labels.Set(minion.Labels)), nil } -func PodFitsPorts(pod api.Pod, existingPods []api.Pod, node string) (bool, error) { +func PodFitsPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { existingPorts := getUsedPorts(existingPods...) wantPorts := getUsedPorts(pod) for wport := range wantPorts { @@ -347,7 +347,7 @@ func PodFitsPorts(pod api.Pod, existingPods []api.Pod, node string) (bool, error return true, nil } -func getUsedPorts(pods ...api.Pod) map[int]bool { +func getUsedPorts(pods ...*api.Pod) map[int]bool { ports := make(map[int]bool) for _, pod := range pods { for _, container := range pod.Spec.Containers { @@ -361,12 +361,12 @@ func getUsedPorts(pods ...api.Pod) map[int]bool { // MapPodsToMachines obtains a list of pods and pivots that list into a map where the keys are host names // and the values are the list of pods running on that host. -func MapPodsToMachines(lister PodLister) (map[string][]api.Pod, error) { - machineToPods := map[string][]api.Pod{} +func MapPodsToMachines(lister PodLister) (map[string][]*api.Pod, error) { + machineToPods := map[string][]*api.Pod{} // TODO: perform more targeted query... pods, err := lister.List(labels.Everything()) if err != nil { - return map[string][]api.Pod{}, err + return map[string][]*api.Pod{}, err } for _, scheduledPod := range pods { host := scheduledPod.Spec.Host diff --git a/pkg/scheduler/predicates_test.go b/pkg/scheduler/predicates_test.go index e8833c28c5d..56a135b7dd3 100644 --- a/pkg/scheduler/predicates_test.go +++ b/pkg/scheduler/predicates_test.go @@ -52,7 +52,7 @@ func makeResources(milliCPU int64, memory int64) api.NodeResources { } } -func newResourcePod(usage ...resourceRequest) api.Pod { +func newResourcePod(usage ...resourceRequest) *api.Pod { containers := []api.Container{} for _, req := range usage { containers = append(containers, api.Container{ @@ -64,7 +64,7 @@ func newResourcePod(usage ...resourceRequest) api.Pod { }, }) } - return api.Pod{ + return &api.Pod{ Spec: api.PodSpec{ Containers: containers, }, @@ -73,14 +73,14 @@ func newResourcePod(usage ...resourceRequest) api.Pod { func TestPodFitsResources(t *testing.T) { tests := []struct { - pod api.Pod - existingPods []api.Pod + pod *api.Pod + existingPods []*api.Pod fits bool test string }{ { - pod: api.Pod{}, - existingPods: []api.Pod{ + pod: &api.Pod{}, + existingPods: []*api.Pod{ newResourcePod(resourceRequest{milliCPU: 10, memory: 20}), }, fits: true, @@ -88,7 +88,7 @@ func TestPodFitsResources(t *testing.T) { }, { pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newResourcePod(resourceRequest{milliCPU: 10, memory: 20}), }, fits: false, @@ -96,7 +96,7 @@ func TestPodFitsResources(t *testing.T) { }, { pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newResourcePod(resourceRequest{milliCPU: 5, memory: 5}), }, fits: true, @@ -104,7 +104,7 @@ func TestPodFitsResources(t *testing.T) { }, { pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 2}), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newResourcePod(resourceRequest{milliCPU: 5, memory: 19}), }, fits: false, @@ -112,7 +112,7 @@ func TestPodFitsResources(t *testing.T) { }, { pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newResourcePod(resourceRequest{milliCPU: 5, memory: 19}), }, fits: true, @@ -135,19 +135,19 @@ func TestPodFitsResources(t *testing.T) { func TestPodFitsHost(t *testing.T) { tests := []struct { - pod api.Pod + pod *api.Pod node string fits bool test string }{ { - pod: api.Pod{}, + pod: &api.Pod{}, node: "foo", fits: true, test: "no host specified", }, { - pod: api.Pod{ + pod: &api.Pod{ Spec: api.PodSpec{ Host: "foo", }, @@ -157,7 +157,7 @@ func TestPodFitsHost(t *testing.T) { test: "host matches", }, { - pod: api.Pod{ + pod: &api.Pod{ Spec: api.PodSpec{ Host: "bar", }, @@ -169,7 +169,7 @@ func TestPodFitsHost(t *testing.T) { } for _, test := range tests { - result, err := PodFitsHost(test.pod, []api.Pod{}, test.node) + result, err := PodFitsHost(test.pod, []*api.Pod{}, test.node) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -181,20 +181,20 @@ func TestPodFitsHost(t *testing.T) { func TestPodFitsPorts(t *testing.T) { tests := []struct { - pod api.Pod - existingPods []api.Pod + pod *api.Pod + existingPods []*api.Pod fits bool test string }{ { - pod: api.Pod{}, - existingPods: []api.Pod{}, + pod: &api.Pod{}, + existingPods: []*api.Pod{}, fits: true, test: "nothing running", }, { pod: newPod("m1", 8080), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newPod("m1", 9090), }, fits: true, @@ -202,7 +202,7 @@ func TestPodFitsPorts(t *testing.T) { }, { pod: newPod("m1", 8080), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newPod("m1", 8080), }, fits: false, @@ -210,7 +210,7 @@ func TestPodFitsPorts(t *testing.T) { }, { pod: newPod("m1", 8000, 8080), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newPod("m1", 8080), }, fits: false, @@ -218,7 +218,7 @@ func TestPodFitsPorts(t *testing.T) { }, { pod: newPod("m1", 8000, 8080), - existingPods: []api.Pod{ + existingPods: []*api.Pod{ newPod("m1", 8001, 8080), }, fits: false, @@ -238,25 +238,25 @@ func TestPodFitsPorts(t *testing.T) { func TestGetUsedPorts(t *testing.T) { tests := []struct { - pods []api.Pod + pods []*api.Pod ports map[int]bool }{ { - []api.Pod{ + []*api.Pod{ newPod("m1", 9090), }, map[int]bool{9090: true}, }, { - []api.Pod{ + []*api.Pod{ newPod("m1", 9090), newPod("m1", 9091), }, map[int]bool{9090: true, 9091: true}, }, { - []api.Pod{ + []*api.Pod{ newPod("m1", 9090), newPod("m2", 9091), }, @@ -296,15 +296,15 @@ func TestDiskConflicts(t *testing.T) { }, } tests := []struct { - pod api.Pod - existingPods []api.Pod + pod *api.Pod + existingPods []*api.Pod isOk bool test string }{ - {api.Pod{}, []api.Pod{}, true, "nothing"}, - {api.Pod{}, []api.Pod{{Spec: volState}}, true, "one state"}, - {api.Pod{Spec: volState}, []api.Pod{{Spec: volState}}, false, "same state"}, - {api.Pod{Spec: volState2}, []api.Pod{{Spec: volState}}, true, "different state"}, + {&api.Pod{}, []*api.Pod{}, true, "nothing"}, + {&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"}, + {&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"}, + {&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"}, } for _, test := range tests { @@ -345,15 +345,15 @@ func TestAWSDiskConflicts(t *testing.T) { }, } tests := []struct { - pod api.Pod - existingPods []api.Pod + pod *api.Pod + existingPods []*api.Pod isOk bool test string }{ - {api.Pod{}, []api.Pod{}, true, "nothing"}, - {api.Pod{}, []api.Pod{{Spec: volState}}, true, "one state"}, - {api.Pod{Spec: volState}, []api.Pod{{Spec: volState}}, false, "same state"}, - {api.Pod{Spec: volState2}, []api.Pod{{Spec: volState}}, true, "different state"}, + {&api.Pod{}, []*api.Pod{}, true, "nothing"}, + {&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"}, + {&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"}, + {&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"}, } for _, test := range tests { @@ -372,18 +372,18 @@ func TestAWSDiskConflicts(t *testing.T) { func TestPodFitsSelector(t *testing.T) { tests := []struct { - pod api.Pod + pod *api.Pod labels map[string]string fits bool test string }{ { - pod: api.Pod{}, + pod: &api.Pod{}, fits: true, test: "no selector", }, { - pod: api.Pod{ + pod: &api.Pod{ Spec: api.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", @@ -394,7 +394,7 @@ func TestPodFitsSelector(t *testing.T) { test: "missing labels", }, { - pod: api.Pod{ + pod: &api.Pod{ Spec: api.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", @@ -408,7 +408,7 @@ func TestPodFitsSelector(t *testing.T) { test: "same labels", }, { - pod: api.Pod{ + pod: &api.Pod{ Spec: api.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", @@ -423,7 +423,7 @@ func TestPodFitsSelector(t *testing.T) { test: "node labels are superset", }, { - pod: api.Pod{ + pod: &api.Pod{ Spec: api.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", @@ -442,7 +442,7 @@ func TestPodFitsSelector(t *testing.T) { node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}} fit := NodeSelector{FakeNodeInfo(node)} - fits, err := fit.PodSelectorMatches(test.pod, []api.Pod{}, "machine") + fits, err := fit.PodSelectorMatches(test.pod, []*api.Pod{}, "machine") if err != nil { t.Errorf("unexpected error: %v", err) } @@ -455,8 +455,8 @@ func TestPodFitsSelector(t *testing.T) { func TestNodeLabelPresence(t *testing.T) { label := map[string]string{"foo": "bar", "bar": "foo"} tests := []struct { - pod api.Pod - existingPods []api.Pod + pod *api.Pod + existingPods []*api.Pod labels []string presence bool fits bool @@ -536,8 +536,8 @@ func TestServiceAffinity(t *testing.T) { node4 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labels4}} node5 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labels4}} tests := []struct { - pod api.Pod - pods []api.Pod + pod *api.Pod + pods []*api.Pod services []api.Service node string labels []string @@ -545,28 +545,29 @@ func TestServiceAffinity(t *testing.T) { test string }{ { + pod: new(api.Pod), node: "machine1", fits: true, labels: []string{"region"}, test: "nothing scheduled", }, { - pod: api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}}, + pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}}, node: "machine1", fits: true, labels: []string{"region"}, test: "pod with region label match", }, { - pod: api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}}, + pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}}, node: "machine1", fits: false, labels: []string{"region"}, test: "pod with region label mismatch", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, @@ -574,8 +575,8 @@ func TestServiceAffinity(t *testing.T) { test: "service pod on same minion", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, @@ -583,8 +584,8 @@ func TestServiceAffinity(t *testing.T) { test: "service pod on different minion, region match", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: false, @@ -592,8 +593,8 @@ func TestServiceAffinity(t *testing.T) { test: "service pod on different minion, region mismatch", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}}, fits: true, @@ -601,8 +602,8 @@ func TestServiceAffinity(t *testing.T) { test: "service in different namespace, region mismatch", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, fits: true, @@ -610,8 +611,8 @@ func TestServiceAffinity(t *testing.T) { test: "pod in different namespace, region mismatch", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, fits: false, @@ -619,8 +620,8 @@ func TestServiceAffinity(t *testing.T) { test: "service and pod in same namespace, region mismatch", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: false, @@ -628,8 +629,8 @@ func TestServiceAffinity(t *testing.T) { test: "service pod on different minion, multiple labels, not all match", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []api.Pod{{Spec: api.PodSpec{Host: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, + pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine4", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, @@ -641,7 +642,7 @@ func TestServiceAffinity(t *testing.T) { for _, test := range tests { nodes := []api.Node{node1, node2, node3, node4, node5} serviceAffinity := ServiceAffinity{FakePodLister(test.pods), FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels} - fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []api.Pod{}, test.node) + fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []*api.Pod{}, test.node) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/scheduler/priorities.go b/pkg/scheduler/priorities.go index eab28e21cb8..16c2214aa64 100644 --- a/pkg/scheduler/priorities.go +++ b/pkg/scheduler/priorities.go @@ -31,7 +31,7 @@ func calculateScore(requested, capacity int64, node string) int { return 0 } if requested > capacity { - glog.Errorf("Combined requested resources from existing pods exceeds capacity on minion: %s", node) + glog.Infof("Combined requested resources from existing pods exceeds capacity on minion: %s", node) return 0 } return int(((capacity - requested) * 10) / capacity) @@ -39,7 +39,7 @@ func calculateScore(requested, capacity int64, node string) int { // Calculate the occupancy on a node. 'node' has information about the resources on the node. // 'pods' is a list of pods currently scheduled on the node. -func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority { +func calculateOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority { totalMilliCPU := int64(0) totalMemory := int64(0) for _, existingPod := range pods { @@ -61,7 +61,7 @@ func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name) memoryScore := calculateScore(totalMemory, capacityMemory, node.Name) glog.V(4).Infof( - "%v -> %v: Least Requested Priority, AbsoluteRequested: (%d, %d) / (%d, %d) Score: (%d, %d)", + "%v -> %v: Least Requested Priority, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d, %d)", pod.Name, node.Name, totalMilliCPU, totalMemory, capacityMilliCPU, capacityMemory, @@ -78,7 +78,7 @@ func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority // It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes // based on the minimum of the average of the fraction of requested to capacity. // Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50 -func LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { +func LeastRequestedPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { nodes, err := minionLister.List() if err != nil { return HostPriorityList{}, err @@ -108,7 +108,7 @@ func NewNodeLabelPriority(label string, presence bool) PriorityFunction { // CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value. // If presence is true, prioritizes minions that have the specified label, regardless of value. // If presence is false, prioritizes minions that do not have the specified label. -func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { +func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { var score int minions, err := minionLister.List() if err != nil { @@ -141,7 +141,7 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister // close the two metrics are to each other. // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" -func BalancedResourceAllocation(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { +func BalancedResourceAllocation(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { nodes, err := minionLister.List() if err != nil { return HostPriorityList{}, err @@ -155,7 +155,7 @@ func BalancedResourceAllocation(pod api.Pod, podLister PodLister, minionLister M return list, nil } -func calculateBalancedResourceAllocation(pod api.Pod, node api.Node, pods []api.Pod) HostPriority { +func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) HostPriority { totalMilliCPU := int64(0) totalMemory := int64(0) score := int(0) diff --git a/pkg/scheduler/priorities_test.go b/pkg/scheduler/priorities_test.go index 88c5aa31f41..5cddfc05730 100644 --- a/pkg/scheduler/priorities_test.go +++ b/pkg/scheduler/priorities_test.go @@ -98,8 +98,8 @@ func TestLeastRequested(t *testing.T) { }, } tests := []struct { - pod api.Pod - pods []api.Pod + pod *api.Pod + pods []*api.Pod nodes []api.Node expectedList HostPriorityList test string @@ -116,7 +116,7 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 0) *10) / 10000 = 10 Minion2 Score: (10 + 10) / 2 = 10 */ - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "nothing scheduled, nothing requested", @@ -133,7 +133,7 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 5000) *10) / 10000 = 5 Minion2 Score: (5 + 5) / 2 = 5 */ - pod: api.Pod{Spec: cpuAndMemory}, + pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)}, expectedList: []HostPriority{{"machine1", 3}, {"machine2", 5}}, test: "nothing scheduled, resources requested, differently sized machines", @@ -150,11 +150,11 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 0) *10) / 10000 = 10 Minion2 Score: (10 + 10) / 2 = 10 */ - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "no resources requested, pods scheduled", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -173,11 +173,11 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Minion2 Score: (4 + 7.5) / 2 = 5 */ - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, expectedList: []HostPriority{{"machine1", 7}, {"machine2", 5}}, test: "no resources requested, pods scheduled with resources", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -196,11 +196,11 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((20000 - 10000) *10) / 20000 = 5 Minion2 Score: (4 + 5) / 2 = 4 */ - pod: api.Pod{Spec: cpuAndMemory}, + pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, expectedList: []HostPriority{{"machine1", 5}, {"machine2", 4}}, test: "resources requested, pods scheduled with resources", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -217,11 +217,11 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((50000 - 10000) *10) / 50000 = 8 Minion2 Score: (4 + 8) / 2 = 6 */ - pod: api.Pod{Spec: cpuAndMemory}, + pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)}, expectedList: []HostPriority{{"machine1", 5}, {"machine2", 6}}, test: "resources requested, pods scheduled with resources, differently sized machines", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -238,21 +238,21 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 5000) *10) / 10000 = 5 Minion2 Score: (0 + 5) / 2 = 2 */ - pod: api.Pod{Spec: cpuOnly}, + pod: &api.Pod{Spec: cpuOnly}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, expectedList: []HostPriority{{"machine1", 5}, {"machine2", 2}}, test: "requested resources exceed minion capacity", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)}, expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "zero minion resources, pods scheduled with resources", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -275,8 +275,6 @@ func TestNewNodeLabelPriority(t *testing.T) { label2 := map[string]string{"bar": "foo"} label3 := map[string]string{"bar": "baz"} tests := []struct { - pod api.Pod - pods []api.Pod nodes []api.Node label string presence bool @@ -356,7 +354,7 @@ func TestNewNodeLabelPriority(t *testing.T) { label: test.label, presence: test.presence, } - list, err := prioritizer.CalculateNodeLabelPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes})) + list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, FakeMinionLister(api.NodeList{Items: test.nodes})) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -430,8 +428,8 @@ func TestBalancedResourceAllocation(t *testing.T) { }, } tests := []struct { - pod api.Pod - pods []api.Pod + pod *api.Pod + pods []*api.Pod nodes []api.Node expectedList HostPriorityList test string @@ -448,7 +446,7 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 0 / 10000 = 0% Minion2 Score: 10 - (0-0)*10 = 10 */ - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "nothing scheduled, nothing requested", @@ -465,7 +463,7 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 5000/10000 = 50% Minion2 Score: 10 - (0.5-0.5)*10 = 10 */ - pod: api.Pod{Spec: cpuAndMemory}, + pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)}, expectedList: []HostPriority{{"machine1", 7}, {"machine2", 10}}, test: "nothing scheduled, resources requested, differently sized machines", @@ -482,11 +480,11 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 0 / 10000 = 0% Minion2 Score: 10 - (0-0)*10 = 10 */ - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "no resources requested, pods scheduled", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -505,11 +503,11 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 5000 / 20000 = 25% Minion2 Score: 10 - (0.6-0.25)*10 = 6 */ - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, expectedList: []HostPriority{{"machine1", 4}, {"machine2", 6}}, test: "no resources requested, pods scheduled with resources", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -528,11 +526,11 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 10000 / 20000 = 50% Minion2 Score: 10 - (0.6-0.5)*10 = 9 */ - pod: api.Pod{Spec: cpuAndMemory}, + pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, expectedList: []HostPriority{{"machine1", 6}, {"machine2", 9}}, test: "resources requested, pods scheduled with resources", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -549,11 +547,11 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 10000 / 50000 = 20% Minion2 Score: 10 - (0.6-0.2)*10 = 6 */ - pod: api.Pod{Spec: cpuAndMemory}, + pod: &api.Pod{Spec: cpuAndMemory}, nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)}, expectedList: []HostPriority{{"machine1", 6}, {"machine2", 6}}, test: "resources requested, pods scheduled with resources, differently sized machines", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -570,21 +568,21 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction 5000 / 10000 = 50% Minion2 Score: 0 */ - pod: api.Pod{Spec: cpuOnly}, + pod: &api.Pod{Spec: cpuOnly}, nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "requested resources exceed minion capacity", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { - pod: api.Pod{Spec: noResources}, + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)}, expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}}, test: "zero minion resources, pods scheduled with resources", - pods: []api.Pod{ + pods: []*api.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 34e645fbe81..8aea816d788 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -23,5 +23,5 @@ import ( // Scheduler is an interface implemented by things that know how to schedule pods // onto machines. type Scheduler interface { - Schedule(api.Pod, MinionLister) (selectedMachine string, err error) + Schedule(*api.Pod, MinionLister) (selectedMachine string, err error) } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index c4285664487..72d37a8ce68 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -31,7 +31,7 @@ type schedulerTester struct { } // Call if you know exactly where pod should get scheduled. -func (st *schedulerTester) expectSchedule(pod api.Pod, expected string) { +func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { actual, err := st.scheduler.Schedule(pod, st.minionLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) @@ -43,7 +43,7 @@ func (st *schedulerTester) expectSchedule(pod api.Pod, expected string) { } // Call if you can't predict where pod will be scheduled. -func (st *schedulerTester) expectSuccess(pod api.Pod) { +func (st *schedulerTester) expectSuccess(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.minionLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) @@ -52,19 +52,19 @@ func (st *schedulerTester) expectSuccess(pod api.Pod) { } // Call if pod should *not* schedule. -func (st *schedulerTester) expectFailure(pod api.Pod) { +func (st *schedulerTester) expectFailure(pod *api.Pod) { _, err := st.scheduler.Schedule(pod, st.minionLister) if err == nil { st.t.Error("Unexpected non-error") } } -func newPod(host string, hostPorts ...int) api.Pod { +func newPod(host string, hostPorts ...int) *api.Pod { networkPorts := []api.ContainerPort{} for _, port := range hostPorts { networkPorts = append(networkPorts, api.ContainerPort{HostPort: port}) } - return api.Pod{ + return &api.Pod{ Spec: api.PodSpec{ Host: host, Containers: []api.Container{ diff --git a/pkg/scheduler/spreading.go b/pkg/scheduler/spreading.go index 43322703504..711cb7387df 100644 --- a/pkg/scheduler/spreading.go +++ b/pkg/scheduler/spreading.go @@ -34,9 +34,9 @@ func NewServiceSpreadPriority(serviceLister ServiceLister) PriorityFunction { // CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service // on the same machine. -func (s *ServiceSpread) CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { +func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { var maxCount int - var nsServicePods []api.Pod + var nsServicePods []*api.Pod services, err := s.serviceLister.GetPodServices(pod) if err == nil { @@ -101,8 +101,8 @@ func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) P // CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service // on machines with the same value for a particular label. // The label to be considered is provided to the struct (ServiceAntiAffinity). -func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { - var nsServicePods []api.Pod +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) { + var nsServicePods []*api.Pod services, err := s.serviceLister.GetPodServices(pod) if err == nil { diff --git a/pkg/scheduler/spreading_test.go b/pkg/scheduler/spreading_test.go index 00bb7c5b5b6..921fed0248c 100644 --- a/pkg/scheduler/spreading_test.go +++ b/pkg/scheduler/spreading_test.go @@ -40,36 +40,37 @@ func TestServiceSpreadPriority(t *testing.T) { Host: "machine2", } tests := []struct { - pod api.Pod - pods []api.Pod + pod *api.Pod + pods []*api.Pod nodes []string services []api.Service expectedList HostPriorityList test string }{ { + pod: new(api.Pod), nodes: []string{"machine1", "machine2"}, expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "nothing scheduled", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{{Spec: zone1Spec}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{{Spec: zone1Spec}}, nodes: []string{"machine1", "machine2"}, expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "no services", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, nodes: []string{"machine1", "machine2"}, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}}, test: "different services", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, }, @@ -79,8 +80,8 @@ func TestServiceSpreadPriority(t *testing.T) { test: "two pods, one service pod", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, @@ -93,8 +94,8 @@ func TestServiceSpreadPriority(t *testing.T) { test: "five pods, one service pod in no namespace", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, @@ -106,8 +107,8 @@ func TestServiceSpreadPriority(t *testing.T) { test: "four pods, one service pod in default namespace", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}}, @@ -120,8 +121,8 @@ func TestServiceSpreadPriority(t *testing.T) { test: "five pods, one service pod in specific namespace", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -132,8 +133,8 @@ func TestServiceSpreadPriority(t *testing.T) { test: "three pods, two service pods on different machines", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -145,8 +146,8 @@ func TestServiceSpreadPriority(t *testing.T) { test: "four pods, three service pods", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -203,14 +204,15 @@ func TestZoneSpreadPriority(t *testing.T) { "machine21": zone2, "machine22": zone2, } tests := []struct { - pod api.Pod - pods []api.Pod + pod *api.Pod + pods []*api.Pod nodes map[string]map[string]string services []api.Service expectedList HostPriorityList test string }{ { + pod: new(api.Pod), nodes: labeledNodes, expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10}, {"machine21", 10}, {"machine22", 10}, @@ -218,8 +220,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "nothing scheduled", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{{Spec: zone1Spec}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{{Spec: zone1Spec}}, nodes: labeledNodes, expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10}, {"machine21", 10}, {"machine22", 10}, @@ -227,8 +229,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "no services", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, nodes: labeledNodes, services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10}, @@ -237,8 +239,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "different services", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -251,8 +253,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "three pods, one service pod", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -265,8 +267,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "three pods, two service pods on different machines", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -280,8 +282,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "three service label match pods in different namespaces", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -295,8 +297,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "four pods, three service pods", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, @@ -309,8 +311,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "service with partial pod label matches", }, { - pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []api.Pod{ + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ {Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, diff --git a/pkg/scheduler/types.go b/pkg/scheduler/types.go index 471529b491d..c2481a73346 100644 --- a/pkg/scheduler/types.go +++ b/pkg/scheduler/types.go @@ -21,7 +21,7 @@ import ( ) // FitPredicate is a function that indicates if a pod fits into an existing node. -type FitPredicate func(pod api.Pod, existingPods []api.Pod, node string) (bool, error) +type FitPredicate func(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) // HostPriority represents the priority of scheduling to a particular host, lower priority is better. type HostPriority struct { @@ -46,7 +46,7 @@ func (h HostPriorityList) Swap(i, j int) { h[i], h[j] = h[j], h[i] } -type PriorityFunction func(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) +type PriorityFunction func(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) type PriorityConfig struct { Function PriorityFunction diff --git a/pkg/service/endpoints_controller.go b/pkg/service/endpoints_controller.go index 12f12557981..b5c0f317998 100644 --- a/pkg/service/endpoints_controller.go +++ b/pkg/service/endpoints_controller.go @@ -19,6 +19,7 @@ package service import ( "fmt" "reflect" + "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/endpoints" @@ -26,135 +27,347 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" + "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework" + "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" + "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/workqueue" + "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" "github.com/golang/glog" ) -// EndpointController manages selector-based service endpoints. -type EndpointController struct { - client *client.Client -} +const ( + // We'll attempt to recompute EVERY service's endpoints at least this + // often. Higher numbers = lower CPU/network load; lower numbers = + // shorter amount of time before a mistaken endpoint is corrected. + FullServiceResyncPeriod = 30 * time.Second + + // We'll keep pod watches open up to this long. In the unlikely case + // that a watch misdelivers info about a pod, it'll take this long for + // that mistake to be rectified. + PodRelistPeriod = 5 * time.Minute +) + +var ( + keyFunc = framework.DeletionHandlingMetaNamespaceKeyFunc +) // NewEndpointController returns a new *EndpointController. func NewEndpointController(client *client.Client) *EndpointController { - return &EndpointController{ + e := &EndpointController{ client: client, + queue: workqueue.New(), + } + + e.serviceStore.Store, e.serviceController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func() (runtime.Object, error) { + return e.client.Services(api.NamespaceAll).List(labels.Everything()) + }, + WatchFunc: func(rv string) (watch.Interface, error) { + return e.client.Services(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) + }, + }, + &api.Service{}, + FullServiceResyncPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: e.enqueueService, + UpdateFunc: func(old, cur interface{}) { + e.enqueueService(cur) + }, + DeleteFunc: e.enqueueService, + }, + ) + + e.podStore.Store, e.podController = framework.NewInformer( + &cache.ListWatch{ + ListFunc: func() (runtime.Object, error) { + return e.client.Pods(api.NamespaceAll).List(labels.Everything()) + }, + WatchFunc: func(rv string) (watch.Interface, error) { + return e.client.Pods(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv) + }, + }, + &api.Pod{}, + PodRelistPeriod, + framework.ResourceEventHandlerFuncs{ + AddFunc: e.addPod, + UpdateFunc: e.updatePod, + DeleteFunc: e.deletePod, + }, + ) + + return e +} + +// EndpointController manages selector-based service endpoints. +type EndpointController struct { + client *client.Client + + serviceStore cache.StoreToServiceLister + podStore cache.StoreToPodLister + + // Services that need to be updated. A channel is inappropriate here, + // because it allows services with lots of pods to be serviced much + // more often than services with few pods; it also would cause a + // service that's inserted multiple times to be processed more than + // necessary. + queue *workqueue.Type + + // Since we join two objects, we'll watch both of them with + // controllers. + serviceController *framework.Controller + podController *framework.Controller +} + +// Runs e; will not return until stopCh is closed. workers determines how many +// endpoints will be handled in parallel. +func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) { + defer util.HandleCrash() + go e.serviceController.Run(stopCh) + go e.podController.Run(stopCh) + for i := 0; i < workers; i++ { + go util.Until(e.worker, time.Second, stopCh) + } + <-stopCh + e.queue.ShutDown() +} + +func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (util.StringSet, error) { + set := util.StringSet{} + services, err := e.serviceStore.GetPodServices(pod) + if err != nil { + // don't log this error because this function makes pointless + // errors when no services match. + return set, nil + } + for i := range services { + key, err := keyFunc(&services[i]) + if err != nil { + return nil, err + } + set.Insert(key) + } + return set, nil +} + +// When a pod is added, figure out what services it will be a member of and +// enqueue them. obj must have *api.Pod type. +func (e *EndpointController) addPod(obj interface{}) { + pod := obj.(*api.Pod) + services, err := e.getPodServiceMemberships(pod) + if err != nil { + glog.Errorf("Unable to get pod %v/%v's service memberships: %v", pod.Namespace, pod.Name, err) + return + } + for key := range services { + e.queue.Add(key) } } -// SyncServiceEndpoints syncs endpoints for services with selectors. -func (e *EndpointController) SyncServiceEndpoints() error { - services, err := e.client.Services(api.NamespaceAll).List(labels.Everything()) - if err != nil { - glog.Errorf("Failed to list services: %v", err) - return err +// When a pod is updated, figure out what services it used to be a member of +// and what services it will be a member of, and enqueue the union of these. +// old and cur must be *api.Pod types. +func (e *EndpointController) updatePod(old, cur interface{}) { + if api.Semantic.DeepEqual(old, cur) { + return + } + newPod := old.(*api.Pod) + services, err := e.getPodServiceMemberships(newPod) + if err != nil { + glog.Errorf("Unable to get pod %v/%v's service memberships: %v", newPod.Namespace, newPod.Name, err) + return } - var resultErr error - for i := range services.Items { - service := &services.Items[i] - if service.Spec.Selector == nil { - // services without a selector receive no endpoints from this controller; - // these services will receive the endpoints that are created out-of-band via the REST API. - continue - } - - glog.V(5).Infof("About to update endpoints for service %s/%s", service.Namespace, service.Name) - pods, err := e.client.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector()) + oldPod := cur.(*api.Pod) + // Only need to get the old services if the labels changed. + if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) { + oldServices, err := e.getPodServiceMemberships(oldPod) if err != nil { - glog.Errorf("Error syncing service: %s/%s, skipping", service.Namespace, service.Name) - resultErr = err - continue + glog.Errorf("Unable to get pod %v/%v's service memberships: %v", oldPod.Namespace, oldPod.Name, err) + return } + services = services.Union(oldServices) + } + for key := range services { + e.queue.Add(key) + } +} - subsets := []api.EndpointSubset{} - for i := range pods.Items { - pod := &pods.Items[i] +// When a pod is deleted, enqueue the services the pod used to be a member of. +// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. +func (e *EndpointController) deletePod(obj interface{}) { + if _, ok := obj.(*api.Pod); ok { + // Enqueue all the services that the pod used to be a member + // of. This happens to be exactly the same thing we do when a + // pod is added. + e.addPod(obj) + return + } + podKey, err := keyFunc(obj) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + } + glog.Infof("Pod %q was deleted but we don't have a record of its final state, so it will take up to %v before it will be removed from all endpoint records.", podKey, FullServiceResyncPeriod) - for i := range service.Spec.Ports { - servicePort := &service.Spec.Ports[i] + // TODO: keep a map of pods to services to handle this condition. +} - // TODO: Once v1beta1 and v1beta2 are EOL'ed, - // this can safely assume that TargetPort is - // populated, and findPort() can be removed. - _ = v1beta1.Dependency - _ = v1beta2.Dependency +// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item. +func (e *EndpointController) enqueueService(obj interface{}) { + key, err := keyFunc(obj) + if err != nil { + glog.Errorf("Couldn't get key for object %+v: %v", obj, err) + } - portName := servicePort.Name - portProto := servicePort.Protocol - portNum, err := findPort(pod, servicePort) - if err != nil { - glog.Errorf("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err) - continue - } - if len(pod.Status.PodIP) == 0 { - glog.Errorf("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name) - continue - } + e.queue.Add(key) +} - inService := false - for _, c := range pod.Status.Conditions { - if c.Type == api.PodReady && c.Status == api.ConditionTrue { - inService = true - break - } - } - if !inService { - glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name) - continue - } - - epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto} - epa := api.EndpointAddress{IP: pod.Status.PodIP, TargetRef: &api.ObjectReference{ - Kind: "Pod", - Namespace: pod.ObjectMeta.Namespace, - Name: pod.ObjectMeta.Name, - UID: pod.ObjectMeta.UID, - ResourceVersion: pod.ObjectMeta.ResourceVersion, - }} - subsets = append(subsets, api.EndpointSubset{Addresses: []api.EndpointAddress{epa}, Ports: []api.EndpointPort{epp}}) +// worker runs a worker thread that just dequeues items, processes them, and +// marks them done. You may run as many of these in parallel as you wish; the +// workqueue guarantees that they will not end up processing the same service +// at the same time. +func (e *EndpointController) worker() { + for { + func() { + key, quit := e.queue.Get() + if quit { + return } - } - subsets = endpoints.RepackSubsets(subsets) + // Use defer: in the unlikely event that there's a + // panic, we'd still like this to get marked done-- + // otherwise the controller will not be able to sync + // this service again until it is restarted. + defer e.queue.Done(key) + e.syncService(key.(string)) + }() + } +} - // See if there's actually an update here. - currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name) +func (e *EndpointController) syncService(key string) { + startTime := time.Now() + defer func() { + glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime)) + }() + obj, exists, err := e.serviceStore.Store.GetByKey(key) + if err != nil || !exists { + // Delete the corresponding endpoint, as the service has been deleted. + // TODO: Please note that this will delete an endpoint when a + // service is deleted. However, if we're down at the time when + // the service is deleted, we will miss that deletion, so this + // doesn't completely solve the problem. See #6877. + namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - if errors.IsNotFound(err) { - currentEndpoints = &api.Endpoints{ - ObjectMeta: api.ObjectMeta{ - Name: service.Name, - Labels: service.Labels, - }, - } - } else { - glog.Errorf("Error getting endpoints: %v", err) + glog.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err) + // Don't retry, as the key isn't going to magically become understandable. + return + } + err = e.client.Endpoints(namespace).Delete(name) + if err != nil && !errors.IsNotFound(err) { + glog.Errorf("Error deleting endpoint %q: %v", key, err) + e.queue.Add(key) // Retry + } + return + } + + service := obj.(*api.Service) + if service.Spec.Selector == nil { + // services without a selector receive no endpoints from this controller; + // these services will receive the endpoints that are created out-of-band via the REST API. + return + } + + glog.V(5).Infof("About to update endpoints for service %q", key) + pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector()) + if err != nil { + // Since we're getting stuff from a local cache, it is + // basically impossible to get this error. + glog.Errorf("Error syncing service %q: %v", key, err) + e.queue.Add(key) // Retry + return + } + + subsets := []api.EndpointSubset{} + for i := range pods.Items { + pod := &pods.Items[i] + + for i := range service.Spec.Ports { + servicePort := &service.Spec.Ports[i] + + // TODO: Once v1beta1 and v1beta2 are EOL'ed, + // this can safely assume that TargetPort is + // populated, and findPort() can be removed. + _ = v1beta1.Dependency + _ = v1beta2.Dependency + + portName := servicePort.Name + portProto := servicePort.Protocol + portNum, err := findPort(pod, servicePort) + if err != nil { + glog.Errorf("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err) + continue + } + if len(pod.Status.PodIP) == 0 { + glog.Errorf("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name) continue } - } - if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && reflect.DeepEqual(currentEndpoints.Labels, service.Labels) { - glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name) - continue - } - newEndpoints := currentEndpoints - newEndpoints.Subsets = subsets - newEndpoints.Labels = service.Labels - if len(currentEndpoints.ResourceVersion) == 0 { - // No previous endpoints, create them - _, err = e.client.Endpoints(service.Namespace).Create(newEndpoints) - } else { - // Pre-existing - _, err = e.client.Endpoints(service.Namespace).Update(newEndpoints) - } - if err != nil { - glog.Errorf("Error updating endpoints: %v", err) - continue + if !api.IsPodReady(pod) { + glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name) + continue + } + + epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto} + epa := api.EndpointAddress{IP: pod.Status.PodIP, TargetRef: &api.ObjectReference{ + Kind: "Pod", + Namespace: pod.ObjectMeta.Namespace, + Name: pod.ObjectMeta.Name, + UID: pod.ObjectMeta.UID, + ResourceVersion: pod.ObjectMeta.ResourceVersion, + }} + subsets = append(subsets, api.EndpointSubset{Addresses: []api.EndpointAddress{epa}, Ports: []api.EndpointPort{epp}}) } } - return resultErr + subsets = endpoints.RepackSubsets(subsets) + + // See if there's actually an update here. + currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name) + if err != nil { + if errors.IsNotFound(err) { + currentEndpoints = &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: service.Name, + Labels: service.Labels, + }, + } + } else { + glog.Errorf("Error getting endpoints: %v", err) + e.queue.Add(key) // Retry + return + } + } + if reflect.DeepEqual(currentEndpoints.Subsets, subsets) && reflect.DeepEqual(currentEndpoints.Labels, service.Labels) { + glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name) + return + } + newEndpoints := currentEndpoints + newEndpoints.Subsets = subsets + newEndpoints.Labels = service.Labels + + if len(currentEndpoints.ResourceVersion) == 0 { + // No previous endpoints, create them + _, err = e.client.Endpoints(service.Namespace).Create(newEndpoints) + } else { + // Pre-existing + _, err = e.client.Endpoints(service.Namespace).Update(newEndpoints) + } + if err != nil { + glog.Errorf("Error updating endpoints: %v", err) + e.queue.Add(key) // Retry + } } func findDefaultPort(pod *api.Pod, servicePort int, proto api.Protocol) int { diff --git a/pkg/service/endpoints_controller_test.go b/pkg/service/endpoints_controller_test.go index 0794b1b56e5..ade1ce34c79 100644 --- a/pkg/service/endpoints_controller_test.go +++ b/pkg/service/endpoints_controller_test.go @@ -27,16 +27,20 @@ import ( _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) -func newPodList(nPods int, nPorts int) *api.PodList { - pods := []api.Pod{} +func addPods(store cache.Store, namespace string, nPods int, nPorts int) { for i := 0; i < nPods; i++ { - p := api.Pod{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, - ObjectMeta: api.ObjectMeta{Name: fmt.Sprintf("pod%d", i)}, + p := &api.Pod{ + TypeMeta: api.TypeMeta{APIVersion: testapi.Version()}, + ObjectMeta: api.ObjectMeta{ + Namespace: namespace, + Name: fmt.Sprintf("pod%d", i), + Labels: map[string]string{"foo": "bar"}, + }, Spec: api.PodSpec{ Containers: []api.Container{{Ports: []api.ContainerPort{}}}, }, @@ -54,11 +58,7 @@ func newPodList(nPods int, nPorts int) *api.PodList { p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports, api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: 8080 + j}) } - pods = append(pods, p) - } - return &api.PodList{ - TypeMeta: api.TypeMeta{APIVersion: testapi.Version(), Kind: "PodList"}, - Items: pods, + store.Add(p) } } @@ -222,22 +222,12 @@ type serverResponse struct { obj interface{} } -func makeTestServer(t *testing.T, namespace string, podResponse, serviceResponse, endpointsResponse serverResponse) (*httptest.Server, *util.FakeHandler) { - fakePodHandler := util.FakeHandler{ - StatusCode: podResponse.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), podResponse.obj.(runtime.Object)), - } - fakeServiceHandler := util.FakeHandler{ - StatusCode: serviceResponse.statusCode, - ResponseBody: runtime.EncodeOrDie(testapi.Codec(), serviceResponse.obj.(runtime.Object)), - } +func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResponse) (*httptest.Server, *util.FakeHandler) { fakeEndpointsHandler := util.FakeHandler{ StatusCode: endpointsResponse.statusCode, ResponseBody: runtime.EncodeOrDie(testapi.Codec(), endpointsResponse.obj.(runtime.Object)), } mux := http.NewServeMux() - mux.Handle(testapi.ResourcePath("pods", namespace, ""), &fakePodHandler) - mux.Handle(testapi.ResourcePath("services", "", ""), &fakeServiceHandler) mux.Handle(testapi.ResourcePath("endpoints", namespace, ""), &fakeEndpointsHandler) mux.Handle(testapi.ResourcePath("endpoints/", namespace, ""), &fakeEndpointsHandler) mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) { @@ -247,47 +237,13 @@ func makeTestServer(t *testing.T, namespace string, podResponse, serviceResponse return httptest.NewServer(mux), &fakeEndpointsHandler } -func TestSyncEndpointsEmpty(t *testing.T) { - testServer, _ := makeTestServer(t, api.NamespaceDefault, - serverResponse{http.StatusOK, newPodList(0, 0)}, - serverResponse{http.StatusOK, &api.ServiceList{}}, - serverResponse{http.StatusOK, &api.Endpoints{}}) - defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) - endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } -} - -func TestSyncEndpointsError(t *testing.T) { - testServer, _ := makeTestServer(t, api.NamespaceDefault, - serverResponse{http.StatusOK, newPodList(0, 0)}, - serverResponse{http.StatusInternalServerError, &api.ServiceList{}}, - serverResponse{http.StatusOK, &api.Endpoints{}}) - defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) - endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err == nil { - t.Errorf("unexpected non-error") - } -} - func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}}, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, - serverResponse{http.StatusOK, newPodList(0, 0)}, - serverResponse{http.StatusOK, &serviceList}, + ns := api.NamespaceDefault + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ @@ -298,30 +254,21 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}}, + }) + endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) } func TestSyncEndpointsProtocolTCP(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "other"}, - Spec: api.ServiceSpec{ - Selector: map[string]string{}, - Ports: []api.ServicePort{{Port: 80}}, - }, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, "other", - serverResponse{http.StatusOK, newPodList(0, 0)}, - serverResponse{http.StatusOK, &serviceList}, + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ @@ -332,30 +279,24 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) { defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80}}, + }, + }) + endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) } func TestSyncEndpointsProtocolUDP(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "other"}, - Spec: api.ServiceSpec{ - Selector: map[string]string{}, - Ports: []api.ServicePort{{Port: 80}}, - }, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, "other", - serverResponse{http.StatusOK, newPodList(0, 0)}, - serverResponse{http.StatusOK, &serviceList}, + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ @@ -366,30 +307,24 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) { defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80}}, + }, + }) + endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequestCount(t, 0) } func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "other"}, - Spec: api.ServiceSpec{ - Selector: map[string]string{}, - Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, - }, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, "other", - serverResponse{http.StatusOK, newPodList(1, 1)}, - serverResponse{http.StatusOK, &serviceList}, + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{}, @@ -397,40 +332,36 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + addPods(endpoints.podStore.Store, ns, 1, 1) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0"}}}, + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }) - endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", "other", "foo"), "PUT", &data) + endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", ns, "foo"), "PUT", &data) } func TestSyncEndpointsItemsPreexisting(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, - Spec: api.ServiceSpec{ - Selector: map[string]string{"foo": "bar"}, - Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, - }, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, "bar", - serverResponse{http.StatusOK, newPodList(1, 1)}, - serverResponse{http.StatusOK, &serviceList}, + ns := "bar" + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ @@ -441,85 +372,83 @@ func TestSyncEndpointsItemsPreexisting(t *testing.T) { defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + addPods(endpoints.podStore.Store, ns, 1, 1) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", }, Subsets: []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0"}}}, + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }) - endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", "bar", "foo"), "PUT", &data) + endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", ns, "foo"), "PUT", &data) } func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Spec: api.ServiceSpec{ - Selector: map[string]string{"foo": "bar"}, - Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, - }, - }, - }, - } + ns := api.NamespaceDefault testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, - serverResponse{http.StatusOK, newPodList(1, 1)}, - serverResponse{http.StatusOK, &serviceList}, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "1", + Name: "foo", + Namespace: ns, }, Subsets: []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0"}}}, + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1) + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", api.NamespaceDefault, "foo"), "GET", nil) } func TestSyncEndpointsItems(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "other"}, - Spec: api.ServiceSpec{ - Selector: map[string]string{"foo": "bar"}, - Ports: []api.ServicePort{ - {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}, - {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8088)}, - }, - }, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, "other", - serverResponse{http.StatusOK, newPodList(3, 2)}, - serverResponse{http.StatusOK, &serviceList}, + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{}}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + addPods(endpoints.podStore.Store, ns, 3, 2) + addPods(endpoints.podStore.Store, "blah", 5, 2) // make sure these aren't found! + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{ + {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}, + {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8088)}, + }, + }, + }) + endpoints.syncService("other/foo") expectedSubsets := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{ - {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0"}}, - {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1"}}, - {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2"}}, + {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, + {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, + {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, }, Ports: []api.EndpointPort{ {Name: "port0", Port: 8080, Protocol: "TCP"}, @@ -534,69 +463,38 @@ func TestSyncEndpointsItems(t *testing.T) { }) // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". endpointsHandler.ValidateRequestCount(t, 2) - endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", "other", ""), "POST", &data) -} - -func TestSyncEndpointsPodError(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Spec: api.ServiceSpec{ - Selector: map[string]string{"foo": "bar"}, - Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, - }, - }, - }, - } - testServer, _ := makeTestServer(t, api.NamespaceDefault, - serverResponse{http.StatusInternalServerError, &api.PodList{}}, - serverResponse{http.StatusOK, &serviceList}, - serverResponse{http.StatusOK, &api.Endpoints{}}) - defer testServer.Close() - client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) - endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err == nil { - t.Error("Unexpected non-error") - } + endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", ns, ""), "POST", &data) } func TestSyncEndpointsItemsWithLabels(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{ - Name: "foo", - Namespace: "other", - Labels: map[string]string{ - "foo": "bar", - }, - }, - Spec: api.ServiceSpec{ - Selector: map[string]string{"foo": "bar"}, - Ports: []api.ServicePort{ - {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}, - {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8088)}, - }, - }, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, "other", - serverResponse{http.StatusOK, newPodList(3, 2)}, - serverResponse{http.StatusOK, &serviceList}, + ns := "other" + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{}}) defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + addPods(endpoints.podStore.Store, ns, 3, 2) + serviceLabels := map[string]string{"foo": "bar"} + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + Labels: serviceLabels, + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{ + {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}, + {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8088)}, + }, + }, + }) + endpoints.syncService(ns + "/foo") expectedSubsets := []api.EndpointSubset{{ Addresses: []api.EndpointAddress{ - {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0"}}, - {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1"}}, - {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2"}}, + {IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, + {IP: "1.2.3.5", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, + {IP: "1.2.3.6", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, }, Ports: []api.EndpointPort{ {Name: "port0", Port: 8080, Protocol: "TCP"}, @@ -606,39 +504,22 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) { data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ ResourceVersion: "", - Labels: serviceList.Items[0].Labels, + Labels: serviceLabels, }, Subsets: endptspkg.SortSubsets(expectedSubsets), }) // endpointsHandler should get 2 requests - one for "GET" and the next for "POST". endpointsHandler.ValidateRequestCount(t, 2) - endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", "other", ""), "POST", &data) + endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", ns, ""), "POST", &data) } func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { - serviceList := api.ServiceList{ - Items: []api.Service{ - { - ObjectMeta: api.ObjectMeta{ - Name: "foo", - Namespace: "bar", - Labels: map[string]string{ - "baz": "blah", - }, - }, - Spec: api.ServiceSpec{ - Selector: map[string]string{"foo": "bar"}, - Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, - }, - }, - }, - } - testServer, endpointsHandler := makeTestServer(t, "bar", - serverResponse{http.StatusOK, newPodList(1, 1)}, - serverResponse{http.StatusOK, &serviceList}, + ns := "bar" + testServer, endpointsHandler := makeTestServer(t, ns, serverResponse{http.StatusOK, &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", Labels: map[string]string{ "foo": "bar", @@ -652,19 +533,31 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { defer testServer.Close() client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()}) endpoints := NewEndpointController(client) - if err := endpoints.SyncServiceEndpoints(); err != nil { - t.Errorf("unexpected error: %v", err) - } + addPods(endpoints.podStore.Store, ns, 1, 1) + serviceLabels := map[string]string{"baz": "blah"} + endpoints.serviceStore.Store.Add(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: "foo", + Namespace: ns, + Labels: serviceLabels, + }, + Spec: api.ServiceSpec{ + Selector: map[string]string{"foo": "bar"}, + Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: util.NewIntOrStringFromInt(8080)}}, + }, + }) + endpoints.syncService(ns + "/foo") data := runtime.EncodeOrDie(testapi.Codec(), &api.Endpoints{ ObjectMeta: api.ObjectMeta{ Name: "foo", + Namespace: ns, ResourceVersion: "1", - Labels: serviceList.Items[0].Labels, + Labels: serviceLabels, }, Subsets: []api.EndpointSubset{{ - Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0"}}}, + Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, }}, }) - endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", "bar", "foo"), "PUT", &data) + endpointsHandler.ValidateRequest(t, testapi.ResourcePathWithQueryParams("endpoints", ns, "foo"), "PUT", &data) } diff --git a/pkg/tools/etcd_helper.go b/pkg/tools/etcd_helper.go index c1911bf129b..2da0bcc9f13 100644 --- a/pkg/tools/etcd_helper.go +++ b/pkg/tools/etcd_helper.go @@ -160,7 +160,10 @@ func (h *EtcdHelper) ExtractObjToList(key string, listObj runtime.Object) error } response, err := h.Client.Get(key, false, false) - if err != nil && !IsEtcdNotFound(err) { + if err != nil { + if IsEtcdNotFound(err) { + return nil + } return err } diff --git a/pkg/tools/etcd_helper_watch.go b/pkg/tools/etcd_helper_watch.go index f7081134941..0bc6b818792 100644 --- a/pkg/tools/etcd_helper_watch.go +++ b/pkg/tools/etcd_helper_watch.go @@ -79,8 +79,10 @@ func (h *EtcdHelper) WatchList(key string, resourceVersion uint64, filter Filter // Watch begins watching the specified key. Events are decoded into // API objects and sent down the returned watch.Interface. // Errors will be sent down the channel. -func (h *EtcdHelper) Watch(key string, resourceVersion uint64) watch.Interface { - return h.WatchAndTransform(key, resourceVersion, nil) +func (h *EtcdHelper) Watch(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) { + w := newEtcdWatcher(false, nil, filter, h.Codec, h.Versioner, nil) + go w.etcdWatch(h.Client, key, resourceVersion) + return w, nil } // WatchAndTransform begins watching the specified key. Events are decoded into diff --git a/pkg/tools/etcd_helper_watch_test.go b/pkg/tools/etcd_helper_watch_test.go index ebc2f59569b..406b81404cc 100644 --- a/pkg/tools/etcd_helper_watch_test.go +++ b/pkg/tools/etcd_helper_watch_test.go @@ -207,7 +207,13 @@ func TestWatchEtcdError(t *testing.T) { fakeClient.WatchImmediateError = fmt.Errorf("immediate error") h := EtcdHelper{fakeClient, codec, versioner} - got := <-h.Watch("/some/key", 4).ResultChan() + watching, err := h.Watch("/some/key", 4, Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + defer watching.Stop() + + got := <-watching.ResultChan() if got.Type != watch.Error { t.Fatalf("Unexpected non-error") } @@ -229,7 +235,10 @@ func TestWatch(t *testing.T) { fakeClient.expectNotFoundGetSet["/some/key"] = struct{}{} h := EtcdHelper{fakeClient, codec, versioner} - watching := h.Watch("/some/key", 0) + watching, err := h.Watch("/some/key", 0, Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } fakeClient.WaitForWatchCompletion() // when server returns not found, the watch index starts at the next value (1) @@ -398,7 +407,11 @@ func TestWatchEtcdState(t *testing.T) { fakeClient.Data[key] = value } h := EtcdHelper{fakeClient, codec, versioner} - watching := h.Watch("/somekey/foo", testCase.From) + watching, err := h.Watch("/somekey/foo", testCase.From, Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + fakeClient.WaitForWatchCompletion() t.Logf("Testing %v", k) @@ -466,7 +479,10 @@ func TestWatchFromZeroIndex(t *testing.T) { fakeClient.Data["/some/key"] = testCase.Response h := EtcdHelper{fakeClient, codec, versioner} - watching := h.Watch("/some/key", 0) + watching, err := h.Watch("/some/key", 0, Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } fakeClient.WaitForWatchCompletion() if e, a := testCase.Response.R.EtcdIndex+1, fakeClient.WatchIndex; e != a { @@ -612,7 +628,10 @@ func TestWatchFromNotFound(t *testing.T) { } h := EtcdHelper{fakeClient, codec, versioner} - watching := h.Watch("/some/key", 0) + watching, err := h.Watch("/some/key", 0, Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } fakeClient.WaitForWatchCompletion() if fakeClient.WatchIndex != 3 { @@ -635,7 +654,10 @@ func TestWatchFromOtherError(t *testing.T) { } h := EtcdHelper{fakeClient, codec, versioner} - watching := h.Watch("/some/key", 0) + watching, err := h.Watch("/some/key", 0, Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } errEvent := <-watching.ResultChan() if e, a := watch.Error, errEvent.Type; e != a { @@ -665,7 +687,11 @@ func TestWatchPurposefulShutdown(t *testing.T) { fakeClient.expectNotFoundGetSet["/some/key"] = struct{}{} // Test purposeful shutdown - watching := h.Watch("/some/key", 0) + watching, err := h.Watch("/some/key", 0, Everything) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + fakeClient.WaitForWatchCompletion() watching.Stop() diff --git a/pkg/ui/datafile.go b/pkg/ui/datafile.go index 5c77e6c7d01..bfb4d4b9b57 100644 --- a/pkg/ui/datafile.go +++ b/pkg/ui/datafile.go @@ -59,1437 +59,17711 @@ func (fi bindata_file_info) Sys() interface{} { return nil } -var _www_box_ng = []byte(` -
-
- {{ groupName }} - -
-
-
-
-
- {{ item.id }} - {{ item.id }} - {{ item.id }} - {{item.id}} -
-
-
-
-
- - - - - -
-
- {{ item.id }} - {{ item.id }} - {{ item.id }} - {{item.id}} -
-
-
- {{key}} : - {{value}} -
-
-
-
-
-
-
-
-box -list -reset -
-
+var _www_app_assets_css_app_css = []byte(`.nav-back { + width: 80px; + font-size: 14px; + padding-left: 14px; + line-height: 15px; +} +.nav-back { + /* :before */ + /* content: ""; */ + background: url('assets/img/icons/arrow-back.png'); + background-size: 14px 14px; + background-repeat: no-repeat; + display: block; +} +a { + text-decoration: none; +} +.main-fab { + position: absolute; + z-index: 20; + font-size: 30px; + top: 100px; + left: 24px; + transform: scale(0.88, 0.88); +} +.md-breadcrumb { + padding-left: 16px; +} +.md-table { + min-width: 100%; + border-collapse: collapse; +} +.md-table tbody tr:hover, +.md-table tbody tr:focus { + cursor: pointer; + background-color: rgba(63, 81, 181, 0.2); +} +.md-table-header { + border-bottom: 1px solid #e6e6e6; + color: #828282; + text-align: left; + font-size: 0.75em; + font-weight: 700; + padding: 16px 16px 16px 0; +} +.md-table-header a { + text-decoration: none; + color: inherit; +} +.md-table-caret { + display: inline-block; + vertical-align: middle; +} +.md-table-content { + font-size: 0.8em; + padding: 16px 16px 16px 0; + height: 72px; +} +.md-table-td-more { + max-width: 72px; + width: 72px; + padding: 16px; +} +.md-table-thumbs { + max-width: 104px; + width: 104px; + padding: 16px 32px; +} +.md-table-thumbs div { + overflow: hidden; + width: 40px; + height: 40px; + border-radius: 20px; + border: 1px solid rgba(0, 0, 0, 0.2); + background-size: cover; + box-shadow: 0 8px 10px rgba(0, 0, 0, 0.3); + -webkit-box-shadow: 0 8px 10px rgba(0, 0, 0, 0.1); +} +.md-table-footer { + height: 40px; +} +.md-table-count-info { + line-height: 40px; + font-size: .75em; +} +.md-table-footer-item { + width: 40px; + height: 40px; + vertical-align: middle; +} +.md-table-active-page { + font-weight: 700; +} +.bold { + font-weight: 700; +} +.grey, +.gray { + color: #888888; +} +md-input-container.md-default-theme .md-input { + color: white; + border-color: white; + margin-top: 24px; +} +.dashboard-subnav { + font-size: 0.9em; + min-height: 38px; + max-height: 38px; + background-color: #09c1d1 !important; +} +.dashboard-subnav md-select.md-default-theme:focus .md-select-label { + border-bottom: none; + color: white; +} +.selectSubPages p { + text-align: center; + color: #fff; +} +.selectSubPages .md-default-theme .md-select-label.md-placeholder { + color: #fff; +} +.selectSubPages .md-select-label { + padding-top: 0px; + font-size: 1em; + line-height: 1em; + border-bottom: none; + padding-bottom: 0px; +} +.selectSubPages md-select { + margin-top: 10px; + margin-right: 80px; + padding: 0px; +} +md-select-menu { + max-height: none; +} +.md-toolbar-tools { + padding-left: 8px; + padding-right: 8px; +} +.md-toolbar-small { + height: 38px; + min-height: 38px; +} +.md-toolbar-tools-small { + background-color: #09c1d1; +} +/* Begin kubernetes-ui Menu */ +.kubernetes-ui-menu, +.kubernetes-ui-menu ul { + list-style: none; + padding: 0; +} +.kubernetes-ui-menu li { + margin: 0; +} +.kubernetes-ui-menu > li { + border-top: 1px solid rgba(0, 0, 0, 0.12); +} +.kubernetes-ui-menu .md-button { + border-radius: 0; + color: inherit; + cursor: pointer; + font-weight: 400; + line-height: 40px; + margin: 0; + max-height: 40px; + overflow: hidden; + padding: 0px 16px; + text-align: left; + text-decoration: none; + white-space: normal; + width: 100%; +} +.kubernetes-ui-menu a.md-button { + display: block; +} +.kubernetes-ui-menu button.md-button::-moz-focus-inner { + padding: 0; +} +.kubernetes-ui-menu .md-button.active { + color: #03a9f4; +} +.menu-heading { + color: #888; + display: block; + font-size: inherit; + font-weight: 500; + line-height: 40px; + margin: 0; + padding: 0px 16px; + text-align: left; + width: 100%; +} +.kubernetes-ui-menu li.parentActive, +.kubernetes-ui-menu li.parentActive .menu-toggle-list { + background-color: #f6f6f6; +} +.menu-toggle-list { + background: #fff; + max-height: 999px; + overflow: hidden; + position: relative; + z-index: 1; + -webkit-transition: 0.75s cubic-bezier(0.35, 0, 0.25, 1); + -webkit-transition-property: max-height; + -moz-transition: 0.75s cubic-bezier(0.35, 0, 0.25, 1); + -moz-transition-property: max-height; + transition: 0.75s cubic-bezier(0.35, 0, 0.25, 1); + transition-property: max-height; +} +.menu-toggle-list.ng-hide { + max-height: 0; +} +.kubernetes-ui-menu .menu-toggle-list a.md-button { + display: block; + padding: 0 16px 0 32px; + text-transform: none; +} +.md-button-toggle .md-toggle-icon { + background: transparent url(assets/img/icons/list_control_down.png) no-repeat center center; + background-size: 100% auto; + display: inline-block; + height: 24px; + margin: auto 0 auto auto; + speak: none; + width: 24px; + transition: transform 0.3s ease-in-out; + -webkit-transition: -webkit-transform 0.3s ease-in-out; +} +.md-button-toggle .md-toggle-icon.toggled { + transform: rotate(180deg); + -webkit-transform: rotate(180deg); +} +/* End kubernetes-ui Menu */ +.menu-icon { + background: none; + border: none; + margin-right: 16px; + padding: 0; +} +.whiteframedemoBasicUsage md-whiteframe { + background: #fff; + margin: 2px; + padding: 2px; +} +.tabsDefaultTabs { + height: 100%; + width: 100%; + /* + * Animation styles + */ +} +.tabsDefaultTabs .remove-tab { + margin-bottom: 40px; +} +.tabsDefaultTabs .home-buttons .md-button { + display: block; + max-height: 30px; +} +.tabsDefaultTabs .home-buttons .md-button.add-tab { + margin-top: 20px; + max-height: 30px !important; +} +.tabsDefaultTabs .demo-tab { + display: block; + position: relative; + background: white; + border: 0px solid black; + min-height: 0px; + width: 100%; +} +.tabsDefaultTabs .tab0, +.tabsDefaultTabs .tab1, +.tabsDefaultTabs .tab2, +.tabsDefaultTabs .tab3 { + background-color: #bbdefb; +} +.tabsDefaultTabs .md-header { + background-color: #1976D2 !important; +} +.tabsDefaultTabs md-tab { + color: #90caf9 !important; +} +.tabsDefaultTabs md-tab.active, +.tabsDefaultTabs md-tab:focus { + color: white !important; +} +.tabsDefaultTabs md-tab[disabled] { + opacity: 0.5; +} +.tabsDefaultTabs .md-header .md-ripple { + border-color: #FFFF8D !important; +} +.tabsDefaultTabs md-tabs-ink-bar { + background-color: #FFFF8D !important; +} +.tabsDefaultTabs .title { + padding-top: 8px; + padding-right: 8px; + text-align: left; + text-transform: uppercase; + color: #888; + margin-top: 24px; +} +.tabsDefaultTabs [layout-align] > * { + margin-left: 8px; +} +.tabsDefaultTabs form > [layout] > * { + margin-left: 8px; +} +.tabsDefaultTabs .long > input { + width: 264px; +} +.menuBtn { + background-color: transparent; + border: none; + height: 38px; + margin: 16px; + position: absolute; + width: 36px; +} +md-toolbar h1 { + font-size: 1.250em; + font-weight: 400; + margin: auto; +} +md-list .md-button { + color: inherit; + font-weight: 500; + text-align: left; + width: 100%; +} +.visuallyhidden { + border: 0; + clip: rect(0 0 0 0); + height: 1px; + margin: -1px; + overflow: hidden; + padding: 0; + position: absolute; + width: 1px; +} +md-list .md-button { + color: inherit; + font-weight: 500; + text-align: left; + width: 100%; +} +md-list .md-button.selected { + color: #03a9f4; +} +#content { + overflow: hidden; +} +#content md-content { + padding-left: 0px; + padding-right: 0px; + padding-top: 0px; +} +#content .md-button.action { + background-color: transparent; + border: none; + height: 38px; + margin: 8px auto 16px 0; + position: absolute; + top: 10px; + right: 25px; + width: 36px; +} +#content img { + display: block; + height: auto; + max-width: 500px; +} +.content-wrapper { + position: relative; +} +.visuallyhidden { + border: 0; + clip: rect(0 0 0 0); + height: 1px; + margin: -1px; + overflow: hidden; + padding: 0; + position: absolute; + width: 1px; +} +md-toolbar h1 { + font-size: 1.250em; + font-weight: 400; +} +.menuBtn { + background: url(data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHhtbG5zOnhsaW5rPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5L3hsaW5rIiB2ZXJzaW9uPSIxLjEiIHg9IjBweCIgeT0iMHB4IiB3aWR0aD0iMjRweCIgaGVpZ2h0PSIyNHB4IiB2aWV3Qm94PSIwIDAgMjQgMjQiIGVuYWJsZS1iYWNrZ3JvdW5kPSJuZXcgMCAwIDI0IDI0IiB4bWw6c3BhY2U9InByZXNlcnZlIj4KPGcgaWQ9IkhlYWRlciI+CiAgICA8Zz4KICAgICAgICA8cmVjdCB4PSItNjE4IiB5PSItMjIzMiIgZmlsbD0ibm9uZSIgd2lkdGg9IjE0MDAiIGhlaWdodD0iMzYwMCIvPgogICAgPC9nPgo8L2c+CjxnIGlkPSJMYWJlbCI+CjwvZz4KPGcgaWQ9Ikljb24iPgogICAgPGc+CiAgICAgICAgPHJlY3QgZmlsbD0ibm9uZSIgd2lkdGg9IjI0IiBoZWlnaHQ9IjI0Ii8+CiAgICAgICAgPHBhdGggZD0iTTMsMThoMTh2LTJIM1YxOHogTTMsMTNoMTh2LTJIM1YxM3ogTTMsNnYyaDE4VjZIM3oiIHN0eWxlPSJmaWxsOiNmM2YzZjM7Ii8+CiAgICA8L2c+CjwvZz4KPGcgaWQ9IkdyaWQiIGRpc3BsYXk9Im5vbmUiPgogICAgPGcgZGlzcGxheT0iaW5saW5lIj4KICAgIDwvZz4KPC9nPgo8L3N2Zz4=) no-repeat center center; +} +.actionBtn { + background: url(data:image/svg+xml;charset=utf-8;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIzNiIgaGVpZ2h0PSIzNiIgdmlld0JveD0iMCAwIDM2IDM2Ij4NCiAgICA8cGF0aCBkPSJNMCAwaDM2djM2aC0zNnoiIGZpbGw9Im5vbmUiLz4NCiAgICA8cGF0aCBkPSJNNCAyN2gyOHYtM2gtMjh2M3ptMC04aDI4di0zaC0yOHYzem0wLTExdjNoMjh2LTNoLTI4eiIvPg0KPC9zdmc+) no-repeat center center; +} +.kubernetes-ui-logo { + background-image: url("../img/kubernetes.svg"); + background-size: 40px 40px; + width: 40px; + height: 40px; +} +.kubernetes-ui-text { + line-height: 40px; + vertical-align: middle; + padding: 2px; +} +.dashboard .body-wrapper { + padding: 25px; +} +.dashboard [flex-align-self="end"] { + -webkit-align-self: flex-end; + -ms-flex-align-self: end; + align-self: flex-end; +} +.dashboard .back { + font-size: 18px; + line-height: 27px; + margin-bottom: 30px; +} +.dashboard .heading { + font-size: 18px; + line-height: 21px; + color: #222222; + margin-bottom: 25px; +} +.dashboard .heading .label { + color: #777777; +} +.dashboard .clear-bg { + background-color: transparent; +} +.dashboard .list-pods .pod-group { + margin: 25px; +} +.dashboard .list-pods .pod-group md-grid-list { + margin-top: 50px; + color: white; +} +.dashboard .list-pods .pod-group md-grid-list figcaption { + width: 100%; +} +.dashboard .list-pods .pod-group md-grid-list md-grid-tile-header { + padding-left: 10px; +} +.dashboard .list-pods .pod-group md-grid-list md-grid-tile-header .labels { + width: 100%; +} +.dashboard .list-pods .pod-group md-grid-list md-grid-tile { + transition: all 700ms ease-in 50ms; +} +.dashboard .list-pods .pod-group md-grid-list .inner-box { + padding-left: 10px; + padding-right: 10px; +} +.dashboard .list-pods .pod-group md-grid-list md-grid-tile-footer { + background: rgba(0, 0, 0, 0.5); +} +.dashboard .list-pods .pod-group md-grid-list md-grid-tile-footer .pod-title { + margin-left: 10px; +} +.dashboard .list-pods .pod-group md-grid-list md-grid-tile-footer .pod-host { + text-align: right; + padding-right: 15px; +} +.dashboard .list-pods .pod-group md-grid-list md-grid-tile-footer a { + color: white; +} +.dashboard .list-pods .pod-group md-grid-list .restarts { + width: 100%; + text-align: right; + padding-right: 10px; +} +.dashboard .list-pods .pod-group md-grid-list .restarts .restart-button, +.dashboard .list-pods .pod-group md-grid-list .restarts .restart-button:not([disabled]):hover, +.dashboard .list-pods .pod-group md-grid-list .restarts .restart-button:not([disabled]):focus, +.dashboard .list-pods .pod-group md-grid-list .restarts .restart-button:hover, +.dashboard .list-pods .pod-group md-grid-list .restarts .restart-button:focus { + background-color: #ff1744; + width: 30px; + height: 30px; +} +.dashboard .list-pods .gray { + background: #f5f5f5; +} +.dashboard .list-pods .dark-overlay { + background-color: #292935; + opacity: 0.5; +} +.dashboard .list-pods .light-overlay { + background-color: #FFFFFF; + opacity: 0.2; +} +.dashboard .list-pods .color-1 { + background-color: #2962ff; + fill: #2962ff; + stroke: #2962ff; +} +.dashboard .list-pods md-grid-list.list-color-1 md-grid-tile.colored { + background-color: #2962ff; +} +.dashboard .list-pods .color-2 { + background-color: #aa00ff; + fill: #aa00ff; + stroke: #aa00ff; +} +.dashboard .list-pods md-grid-list.list-color-2 md-grid-tile.colored { + background-color: #aa00ff; +} +.dashboard .list-pods .color-3 { + background-color: #00c853; + fill: #00c853; + stroke: #00c853; +} +.dashboard .list-pods md-grid-list.list-color-3 md-grid-tile.colored { + background-color: #00c853; +} +.dashboard .list-pods .color-4 { + background-color: #304ffe; + fill: #304ffe; + stroke: #304ffe; +} +.dashboard .list-pods md-grid-list.list-color-4 md-grid-tile.colored { + background-color: #304ffe; +} +.dashboard .list-pods .color-5 { + background-color: #0091ea; + fill: #0091ea; + stroke: #0091ea; +} +.dashboard .list-pods md-grid-list.list-color-5 md-grid-tile.colored { + background-color: #0091ea; +} +.dashboard .list-pods .color-6 { + background-color: #ff6d00; + fill: #ff6d00; + stroke: #ff6d00; +} +.dashboard .list-pods md-grid-list.list-color-6 md-grid-tile.colored { + background-color: #ff6d00; +} +.dashboard .list-pods .color-7 { + background-color: #00bfa5; + fill: #00bfa5; + stroke: #00bfa5; +} +.dashboard .list-pods md-grid-list.list-color-7 md-grid-tile.colored { + background-color: #00bfa5; +} +.dashboard .list-pods .color-8 { + background-color: #c51162; + fill: #c51162; + stroke: #c51162; +} +.dashboard .list-pods md-grid-list.list-color-8 md-grid-tile.colored { + background-color: #c51162; +} +.dashboard .list-pods .color-9 { + background-color: #64dd17; + fill: #64dd17; + stroke: #64dd17; +} +.dashboard .list-pods md-grid-list.list-color-9 md-grid-tile.colored { + background-color: #64dd17; +} +.dashboard .list-pods .color-10 { + background-color: #6200ea; + fill: #6200ea; + stroke: #6200ea; +} +.dashboard .list-pods md-grid-list.list-color-10 md-grid-tile.colored { + background-color: #6200ea; +} +.dashboard .list-pods .color-11 { + background-color: #ffd600; + fill: #ffd600; + stroke: #ffd600; +} +.dashboard .list-pods md-grid-list.list-color-11 md-grid-tile.colored { + background-color: #ffd600; +} +.dashboard .list-pods .color-12 { + background-color: #00b8d4; + fill: #00b8d4; + stroke: #00b8d4; +} +.dashboard .list-pods md-grid-list.list-color-12 md-grid-tile.colored { + background-color: #00b8d4; +} +.dashboard .list-pods .color-13 { + background-color: #ffab00; + fill: #ffab00; + stroke: #ffab00; +} +.dashboard .list-pods md-grid-list.list-color-13 md-grid-tile.colored { + background-color: #ffab00; +} +.dashboard .list-pods .color-14 { + background-color: #dd2c00; + fill: #dd2c00; + stroke: #dd2c00; +} +.dashboard .list-pods md-grid-list.list-color-14 md-grid-tile.colored { + background-color: #dd2c00; +} +.dashboard .list-pods .color-15 { + background-color: #2979ff; + fill: #2979ff; + stroke: #2979ff; +} +.dashboard .list-pods md-grid-list.list-color-15 md-grid-tile.colored { + background-color: #2979ff; +} +.dashboard .list-pods .color-16 { + background-color: #d500f9; + fill: #d500f9; + stroke: #d500f9; +} +.dashboard .list-pods md-grid-list.list-color-16 md-grid-tile.colored { + background-color: #d500f9; +} +.dashboard .list-pods .color-17 { + background-color: #00e676; + fill: #00e676; + stroke: #00e676; +} +.dashboard .list-pods md-grid-list.list-color-17 md-grid-tile.colored { + background-color: #00e676; +} +.dashboard .list-pods .color-18 { + background-color: #3d5afe; + fill: #3d5afe; + stroke: #3d5afe; +} +.dashboard .list-pods md-grid-list.list-color-18 md-grid-tile.colored { + background-color: #3d5afe; +} +.dashboard .list-pods .color-19 { + background-color: #00b0ff; + fill: #00b0ff; + stroke: #00b0ff; +} +.dashboard .list-pods md-grid-list.list-color-19 md-grid-tile.colored { + background-color: #00b0ff; +} +.dashboard .list-pods .color-20 { + background-color: #ff9100; + fill: #ff9100; + stroke: #ff9100; +} +.dashboard .list-pods md-grid-list.list-color-20 md-grid-tile.colored { + background-color: #ff9100; +} +.dashboard .list-pods .color-21 { + background-color: #1de9b6; + fill: #1de9b6; + stroke: #1de9b6; +} +.dashboard .list-pods md-grid-list.list-color-21 md-grid-tile.colored { + background-color: #1de9b6; +} +.dashboard .list-pods .color-22 { + background-color: #f50057; + fill: #f50057; + stroke: #f50057; +} +.dashboard .list-pods md-grid-list.list-color-22 md-grid-tile.colored { + background-color: #f50057; +} +.dashboard .list-pods .color-23 { + background-color: #76ff03; + fill: #76ff03; + stroke: #76ff03; +} +.dashboard .list-pods md-grid-list.list-color-23 md-grid-tile.colored { + background-color: #76ff03; +} +.dashboard .list-pods .color-24 { + background-color: #651fff; + fill: #651fff; + stroke: #651fff; +} +.dashboard .list-pods md-grid-list.list-color-24 md-grid-tile.colored { + background-color: #651fff; +} +.dashboard .list-pods .color-25 { + background-color: #ffea00; + fill: #ffea00; + stroke: #ffea00; +} +.dashboard .list-pods md-grid-list.list-color-25 md-grid-tile.colored { + background-color: #ffea00; +} +.dashboard .list-pods .color-26 { + background-color: #00e5ff; + fill: #00e5ff; + stroke: #00e5ff; +} +.dashboard .list-pods md-grid-list.list-color-26 md-grid-tile.colored { + background-color: #00e5ff; +} +.dashboard .list-pods .color-27 { + background-color: #ffc400; + fill: #ffc400; + stroke: #ffc400; +} +.dashboard .list-pods md-grid-list.list-color-27 md-grid-tile.colored { + background-color: #ffc400; +} +.dashboard .list-pods .color-28 { + background-color: #ff3d00; + fill: #ff3d00; + stroke: #ff3d00; +} +.dashboard .list-pods md-grid-list.list-color-28 md-grid-tile.colored { + background-color: #ff3d00; +} +.dashboard .list-pods .color-29 { + background-color: #448aff; + fill: #448aff; + stroke: #448aff; +} +.dashboard .list-pods md-grid-list.list-color-29 md-grid-tile.colored { + background-color: #448aff; +} +.dashboard .list-pods .color-30 { + background-color: #e040fb; + fill: #e040fb; + stroke: #e040fb; +} +.dashboard .list-pods md-grid-list.list-color-30 md-grid-tile.colored { + background-color: #e040fb; +} +.dashboard .list-pods .color-31 { + background-color: #69f0ae; + fill: #69f0ae; + stroke: #69f0ae; +} +.dashboard .list-pods md-grid-list.list-color-31 md-grid-tile.colored { + background-color: #69f0ae; +} +.dashboard .list-pods .color-32 { + background-color: #536dfe; + fill: #536dfe; + stroke: #536dfe; +} +.dashboard .list-pods md-grid-list.list-color-32 md-grid-tile.colored { + background-color: #536dfe; +} +.dashboard .list-pods .color-33 { + background-color: #40c4ff; + fill: #40c4ff; + stroke: #40c4ff; +} +.dashboard .list-pods md-grid-list.list-color-33 md-grid-tile.colored { + background-color: #40c4ff; +} +.dashboard .list-pods .color-34 { + background-color: #ffab40; + fill: #ffab40; + stroke: #ffab40; +} +.dashboard .list-pods md-grid-list.list-color-34 md-grid-tile.colored { + background-color: #ffab40; +} +.dashboard .list-pods .color-35 { + background-color: #64ffda; + fill: #64ffda; + stroke: #64ffda; +} +.dashboard .list-pods md-grid-list.list-color-35 md-grid-tile.colored { + background-color: #64ffda; +} +.dashboard .list-pods .color-36 { + background-color: #ff4081; + fill: #ff4081; + stroke: #ff4081; +} +.dashboard .list-pods md-grid-list.list-color-36 md-grid-tile.colored { + background-color: #ff4081; +} +.dashboard .list-pods .color-37 { + background-color: #b2ff59; + fill: #b2ff59; + stroke: #b2ff59; +} +.dashboard .list-pods md-grid-list.list-color-37 md-grid-tile.colored { + background-color: #b2ff59; +} +.dashboard .list-pods .color-38 { + background-color: #7c4dff; + fill: #7c4dff; + stroke: #7c4dff; +} +.dashboard .list-pods md-grid-list.list-color-38 md-grid-tile.colored { + background-color: #7c4dff; +} +.dashboard .list-pods .color-39 { + background-color: #ffff00; + fill: #ffff00; + stroke: #ffff00; +} +.dashboard .list-pods md-grid-list.list-color-39 md-grid-tile.colored { + background-color: #ffff00; +} +.dashboard .list-pods .color-40 { + background-color: #18ffff; + fill: #18ffff; + stroke: #18ffff; +} +.dashboard .list-pods md-grid-list.list-color-40 md-grid-tile.colored { + background-color: #18ffff; +} +.dashboard .list-pods .color-41 { + background-color: #ffd740; + fill: #ffd740; + stroke: #ffd740; +} +.dashboard .list-pods md-grid-list.list-color-41 md-grid-tile.colored { + background-color: #ffd740; +} +.dashboard .list-pods .color-42 { + background-color: #ff6e40; + fill: #ff6e40; + stroke: #ff6e40; +} +.dashboard .list-pods md-grid-list.list-color-42 md-grid-tile.colored { + background-color: #ff6e40; +} +.dashboard .list-pods .color-warning { + background-color: #ff9800 !important; + border-color: #ff9800 !important; + fill: #ff9800 !important; + stroke: #ff9800 !important; +} +.dashboard .list-pods .color-critical { + background-color: #f44336 !important; + border-color: #f44336 !important; + fill: #f44336 !important; + stroke: #f44336 !important; +} +.dashboard .list-pods .status-waiting { + background-color: #2e2e3b !important; + border-color: #dad462 !important; + border-width: 2px !important; + border-style: solid !important; +} +.dashboard .list-pods .status-terminated, +.dashboard .list-pods .status-unknown { + background-color: #ff1744 !important; + border-color: #e3002c !important; + border-width: 1px !important; + border-style: solid !important; +} +.dashboard .dash-table { + min-width: 100%; + border-collapse: collapse; +} +.dashboard .dash-table tbody tr:hover:not(.no-link), +.dashboard .dash-table tbody tr:focus:not(.no-link) { + cursor: pointer; + background-color: rgba(63, 81, 181, 0.2); +} +.dashboard .dash-table .dash-table-header { + border-bottom: 1px solid #e6e6e6; + color: #828282; + text-align: left; + font-size: 0.75em; + font-weight: 700; + padding: 16px 16px 16px 0; +} +.dashboard .dash-table .dash-table-header a { + text-decoration: none; + color: inherit; +} +.dashboard .dash-table .dash-table-caret { + display: inline-block; + vertical-align: middle; +} +.dashboard .dash-table .dash-table-content { + font-size: 0.8em; + padding: 16px 16px 16px 0; + height: 72px; +} +.dashboard .dash-table .dash-table-td-more { + max-width: 72px; + width: 72px; + padding: 16px; +} +.dashboard .dash-table .dash-table-thumbs { + max-width: 104px; + width: 104px; + padding: 16px 32px; +} +.dashboard .dash-table .dash-table-thumbs div { + overflow: hidden; + width: 40px; + height: 40px; + border-radius: 20px; + border: 1px solid rgba(0, 0, 0, 0.2); + background-size: cover; + box-shadow: 0 8px 10px rgba(0, 0, 0, 0.3); + -webkit-box-shadow: 0 8px 10px rgba(0, 0, 0, 0.1); +} +.dashboard .dash-table .dash-table-footer { + height: 40px; +} +.dashboard .dash-table .dash-table-count-info { + line-height: 40px; + font-size: .75em; +} +.dashboard .dash-table .dash-table-footer-item { + width: 40px; + height: 40px; + vertical-align: middle; +} +.dashboard .dash-table .dash-table-active-page { + font-weight: 700; +} +.dashboard .dash-table .bold { + font-weight: 700; +} +.dashboard .dash-table .grey { + color: grey; +} +.dashboard .dash-table md-input-container.md-default-theme .md-input { + color: white; + border-color: white; + margin-top: 24px; +} +.dashboard .server-overview .dark-overlay { + background-color: #292935; + opacity: 0.5; +} +.dashboard .server-overview .light-overlay { + background-color: #FFFFFF; + opacity: 0.2; +} +.dashboard .server-overview .color-1 { + background-color: #2962ff; + fill: #2962ff; + stroke: #2962ff; +} +.dashboard .server-overview md-grid-list.list-color-1 md-grid-tile.colored { + background-color: #2962ff; +} +.dashboard .server-overview .color-2 { + background-color: #aa00ff; + fill: #aa00ff; + stroke: #aa00ff; +} +.dashboard .server-overview md-grid-list.list-color-2 md-grid-tile.colored { + background-color: #aa00ff; +} +.dashboard .server-overview .color-3 { + background-color: #00c853; + fill: #00c853; + stroke: #00c853; +} +.dashboard .server-overview md-grid-list.list-color-3 md-grid-tile.colored { + background-color: #00c853; +} +.dashboard .server-overview .color-4 { + background-color: #304ffe; + fill: #304ffe; + stroke: #304ffe; +} +.dashboard .server-overview md-grid-list.list-color-4 md-grid-tile.colored { + background-color: #304ffe; +} +.dashboard .server-overview .color-5 { + background-color: #0091ea; + fill: #0091ea; + stroke: #0091ea; +} +.dashboard .server-overview md-grid-list.list-color-5 md-grid-tile.colored { + background-color: #0091ea; +} +.dashboard .server-overview .color-6 { + background-color: #ff6d00; + fill: #ff6d00; + stroke: #ff6d00; +} +.dashboard .server-overview md-grid-list.list-color-6 md-grid-tile.colored { + background-color: #ff6d00; +} +.dashboard .server-overview .color-7 { + background-color: #00bfa5; + fill: #00bfa5; + stroke: #00bfa5; +} +.dashboard .server-overview md-grid-list.list-color-7 md-grid-tile.colored { + background-color: #00bfa5; +} +.dashboard .server-overview .color-8 { + background-color: #c51162; + fill: #c51162; + stroke: #c51162; +} +.dashboard .server-overview md-grid-list.list-color-8 md-grid-tile.colored { + background-color: #c51162; +} +.dashboard .server-overview .color-9 { + background-color: #64dd17; + fill: #64dd17; + stroke: #64dd17; +} +.dashboard .server-overview md-grid-list.list-color-9 md-grid-tile.colored { + background-color: #64dd17; +} +.dashboard .server-overview .color-10 { + background-color: #6200ea; + fill: #6200ea; + stroke: #6200ea; +} +.dashboard .server-overview md-grid-list.list-color-10 md-grid-tile.colored { + background-color: #6200ea; +} +.dashboard .server-overview .color-11 { + background-color: #ffd600; + fill: #ffd600; + stroke: #ffd600; +} +.dashboard .server-overview md-grid-list.list-color-11 md-grid-tile.colored { + background-color: #ffd600; +} +.dashboard .server-overview .color-12 { + background-color: #00b8d4; + fill: #00b8d4; + stroke: #00b8d4; +} +.dashboard .server-overview md-grid-list.list-color-12 md-grid-tile.colored { + background-color: #00b8d4; +} +.dashboard .server-overview .color-13 { + background-color: #ffab00; + fill: #ffab00; + stroke: #ffab00; +} +.dashboard .server-overview md-grid-list.list-color-13 md-grid-tile.colored { + background-color: #ffab00; +} +.dashboard .server-overview .color-14 { + background-color: #dd2c00; + fill: #dd2c00; + stroke: #dd2c00; +} +.dashboard .server-overview md-grid-list.list-color-14 md-grid-tile.colored { + background-color: #dd2c00; +} +.dashboard .server-overview .color-15 { + background-color: #2979ff; + fill: #2979ff; + stroke: #2979ff; +} +.dashboard .server-overview md-grid-list.list-color-15 md-grid-tile.colored { + background-color: #2979ff; +} +.dashboard .server-overview .color-16 { + background-color: #d500f9; + fill: #d500f9; + stroke: #d500f9; +} +.dashboard .server-overview md-grid-list.list-color-16 md-grid-tile.colored { + background-color: #d500f9; +} +.dashboard .server-overview .color-17 { + background-color: #00e676; + fill: #00e676; + stroke: #00e676; +} +.dashboard .server-overview md-grid-list.list-color-17 md-grid-tile.colored { + background-color: #00e676; +} +.dashboard .server-overview .color-18 { + background-color: #3d5afe; + fill: #3d5afe; + stroke: #3d5afe; +} +.dashboard .server-overview md-grid-list.list-color-18 md-grid-tile.colored { + background-color: #3d5afe; +} +.dashboard .server-overview .color-19 { + background-color: #00b0ff; + fill: #00b0ff; + stroke: #00b0ff; +} +.dashboard .server-overview md-grid-list.list-color-19 md-grid-tile.colored { + background-color: #00b0ff; +} +.dashboard .server-overview .color-20 { + background-color: #ff9100; + fill: #ff9100; + stroke: #ff9100; +} +.dashboard .server-overview md-grid-list.list-color-20 md-grid-tile.colored { + background-color: #ff9100; +} +.dashboard .server-overview .color-21 { + background-color: #1de9b6; + fill: #1de9b6; + stroke: #1de9b6; +} +.dashboard .server-overview md-grid-list.list-color-21 md-grid-tile.colored { + background-color: #1de9b6; +} +.dashboard .server-overview .color-22 { + background-color: #f50057; + fill: #f50057; + stroke: #f50057; +} +.dashboard .server-overview md-grid-list.list-color-22 md-grid-tile.colored { + background-color: #f50057; +} +.dashboard .server-overview .color-23 { + background-color: #76ff03; + fill: #76ff03; + stroke: #76ff03; +} +.dashboard .server-overview md-grid-list.list-color-23 md-grid-tile.colored { + background-color: #76ff03; +} +.dashboard .server-overview .color-24 { + background-color: #651fff; + fill: #651fff; + stroke: #651fff; +} +.dashboard .server-overview md-grid-list.list-color-24 md-grid-tile.colored { + background-color: #651fff; +} +.dashboard .server-overview .color-25 { + background-color: #ffea00; + fill: #ffea00; + stroke: #ffea00; +} +.dashboard .server-overview md-grid-list.list-color-25 md-grid-tile.colored { + background-color: #ffea00; +} +.dashboard .server-overview .color-26 { + background-color: #00e5ff; + fill: #00e5ff; + stroke: #00e5ff; +} +.dashboard .server-overview md-grid-list.list-color-26 md-grid-tile.colored { + background-color: #00e5ff; +} +.dashboard .server-overview .color-27 { + background-color: #ffc400; + fill: #ffc400; + stroke: #ffc400; +} +.dashboard .server-overview md-grid-list.list-color-27 md-grid-tile.colored { + background-color: #ffc400; +} +.dashboard .server-overview .color-28 { + background-color: #ff3d00; + fill: #ff3d00; + stroke: #ff3d00; +} +.dashboard .server-overview md-grid-list.list-color-28 md-grid-tile.colored { + background-color: #ff3d00; +} +.dashboard .server-overview .color-29 { + background-color: #448aff; + fill: #448aff; + stroke: #448aff; +} +.dashboard .server-overview md-grid-list.list-color-29 md-grid-tile.colored { + background-color: #448aff; +} +.dashboard .server-overview .color-30 { + background-color: #e040fb; + fill: #e040fb; + stroke: #e040fb; +} +.dashboard .server-overview md-grid-list.list-color-30 md-grid-tile.colored { + background-color: #e040fb; +} +.dashboard .server-overview .color-31 { + background-color: #69f0ae; + fill: #69f0ae; + stroke: #69f0ae; +} +.dashboard .server-overview md-grid-list.list-color-31 md-grid-tile.colored { + background-color: #69f0ae; +} +.dashboard .server-overview .color-32 { + background-color: #536dfe; + fill: #536dfe; + stroke: #536dfe; +} +.dashboard .server-overview md-grid-list.list-color-32 md-grid-tile.colored { + background-color: #536dfe; +} +.dashboard .server-overview .color-33 { + background-color: #40c4ff; + fill: #40c4ff; + stroke: #40c4ff; +} +.dashboard .server-overview md-grid-list.list-color-33 md-grid-tile.colored { + background-color: #40c4ff; +} +.dashboard .server-overview .color-34 { + background-color: #ffab40; + fill: #ffab40; + stroke: #ffab40; +} +.dashboard .server-overview md-grid-list.list-color-34 md-grid-tile.colored { + background-color: #ffab40; +} +.dashboard .server-overview .color-35 { + background-color: #64ffda; + fill: #64ffda; + stroke: #64ffda; +} +.dashboard .server-overview md-grid-list.list-color-35 md-grid-tile.colored { + background-color: #64ffda; +} +.dashboard .server-overview .color-36 { + background-color: #ff4081; + fill: #ff4081; + stroke: #ff4081; +} +.dashboard .server-overview md-grid-list.list-color-36 md-grid-tile.colored { + background-color: #ff4081; +} +.dashboard .server-overview .color-37 { + background-color: #b2ff59; + fill: #b2ff59; + stroke: #b2ff59; +} +.dashboard .server-overview md-grid-list.list-color-37 md-grid-tile.colored { + background-color: #b2ff59; +} +.dashboard .server-overview .color-38 { + background-color: #7c4dff; + fill: #7c4dff; + stroke: #7c4dff; +} +.dashboard .server-overview md-grid-list.list-color-38 md-grid-tile.colored { + background-color: #7c4dff; +} +.dashboard .server-overview .color-39 { + background-color: #ffff00; + fill: #ffff00; + stroke: #ffff00; +} +.dashboard .server-overview md-grid-list.list-color-39 md-grid-tile.colored { + background-color: #ffff00; +} +.dashboard .server-overview .color-40 { + background-color: #18ffff; + fill: #18ffff; + stroke: #18ffff; +} +.dashboard .server-overview md-grid-list.list-color-40 md-grid-tile.colored { + background-color: #18ffff; +} +.dashboard .server-overview .color-41 { + background-color: #ffd740; + fill: #ffd740; + stroke: #ffd740; +} +.dashboard .server-overview md-grid-list.list-color-41 md-grid-tile.colored { + background-color: #ffd740; +} +.dashboard .server-overview .color-42 { + background-color: #ff6e40; + fill: #ff6e40; + stroke: #ff6e40; +} +.dashboard .server-overview md-grid-list.list-color-42 md-grid-tile.colored { + background-color: #ff6e40; +} +.dashboard .server-overview .color-warning { + background-color: #ff9800 !important; + border-color: #ff9800 !important; + fill: #ff9800 !important; + stroke: #ff9800 !important; +} +.dashboard .server-overview .color-critical { + background-color: #f44336 !important; + border-color: #f44336 !important; + fill: #f44336 !important; + stroke: #f44336 !important; +} +.dashboard .server-overview .status-waiting { + background-color: #2e2e3b !important; + border-color: #dad462 !important; + border-width: 2px !important; + border-style: solid !important; +} +.dashboard .server-overview .status-terminated, +.dashboard .server-overview .status-unknown { + background-color: #ff1744 !important; + border-color: #e3002c !important; + border-width: 1px !important; + border-style: solid !important; +} +.dashboard .server-overview .color-1 { + background-color: #512DA8; + border-color: #512DA8; + fill: #512DA8; + stroke: #512DA8; +} +.dashboard .server-overview .color-2 { + background-color: #9C27B0; + border-color: #9C27B0; + fill: #9C27B0; + stroke: #9C27B0; +} +.dashboard .server-overview .color-3 { + background-color: #00BCD4; + border-color: #00BCD4; + fill: #00BCD4; + stroke: #00BCD4; +} +.dashboard .server-overview .color-max-1 { + background-color: #c5b6eb; + border-color: #c5b6eb; + fill: #c5b6eb; +} +.dashboard .server-overview .color-max-2 { + background-color: #e6b5ee; + border-color: #e6b5ee; + fill: #e6b5ee; +} +.dashboard .server-overview .color-max-3 { + background-color: #a1f4ff; + border-color: #a1f4ff; + fill: #a1f4ff; +} +.dashboard .server-overview .color-max-warning { + background-color: #ffd699 !important; + border-color: #ffd699 !important; + fill: #ffd699 !important; +} +.dashboard .server-overview .color-max-critical { + background-color: #fccbc7 !important; + border-color: #fccbc7 !important; + fill: #fccbc7 !important; +} +.dashboard .server-overview .max_tick_arc { + stroke: #FFF !important; +} +.dashboard .server-overview .concentricchart .bg-circle { + background: #F9F9F9; + fill: #F9F9F9; + stroke: #FFFFFF; + stroke-width: 1px; +} +.dashboard .server-overview .concentricchart text { + font-size: 12px; + font-family: 'Roboto', sans-serif; +} +.dashboard .server-overview .concentricchart .value_group { + fill: white; +} +.dashboard .server-overview .concentricchart .legend_group rect { + opacity: 0.8; +} +.dashboard .server-overview svg.legend { + height: 115px; +} +.dashboard .server-overview svg.legend text { + font-size: 12px; + font-family: 'Roboto', sans-serif; +} +.dashboard .server-overview svg.legend .hostName { + font-size: 16px; +} +.dashboard .server-overview .minion-name { + text-align: center; + vertical-align: bottom; + width: 100%; +} +.dashboard .server-overview .chart_area { + width: 325px; + height: 425px; +} +.dashboard .groups { + font-size: 13px; +} +.dashboard .groups .header { + line-height: 21px; +} +.dashboard .groups .header a { + padding-left: 5px; + padding-right: 5px; +} +.dashboard .groups .header .selector-area .filter-text { + font-size: 13px; + margin-left: 10px; +} +.dashboard .groups .header .selector-area .cancel-button { + width: 18px; + height: 18px; + padding: 0; +} +.dashboard .groups .header .selector-area .cancel-button:focus, +.dashboard .groups .header .selector-area .cancel-button:hover { + background-color: none !important; +} +.dashboard .groups .header .selector-area .cancel-icon { + width: 15px; + height: 15px; + fill: #777777; +} +.dashboard .groups .select-group-by { + min-width: 110px; + margin-left: 10px; + margin-right: 40px; +} +.dashboard .groups .select-group-by .md-select-label { + padding-top: 6px; + font-size: 13px; +} +.dashboard .groups .group-item { + padding-top: 20px; +} +.dashboard .groups .group-item .filter-button { + height: 18px; + width: 18px; +} +.dashboard .groups .group-item .filter-button .filter-icon { + width: 18px; + height: 18px; +} +.dashboard .groups .icon-area { + min-width: 34px; +} +.dashboard .groups .icon-area .group-icon { + border-radius: 21px; + width: 21px; + height: 21px; +} +.dashboard .groups .group-main-area .subtype { + line-height: 21px; +} +.dashboard .groups md-divider { + margin-top: 40px; + margin-bottom: 30px; +} +.dashboard .groups .group-name { + padding-top: 10px; +} +.dashboard .groups .selectFilter { + padding-top: 10px; + margin-right: 30px; +} +.dashboard .groups .selectFilter .md-select-label { + border-bottom: none !important; + width: 17px; + min-width: 17px; + padding-right: 0; +} +.dashboard .groups md-select-menu { + min-height: 40px; + max-height: 40px; +} +.dashboard .groups .group-link-area { + padding-left: 15px; + padding-bottom: 15px; +} +.dashboard .groups .group-link-area button { + line-height: 12px; +} +.dashboard .groups .group-type-circle { + width: 21px; + height: 21px; +} +.dashboard .groups md-select { + margin-top: 0px; +} +.dashboard .detail { + color: #222222; +} +.dashboard .detail .back { + font-size: 18px; + line-height: 27px; + margin-bottom: 30px; +} +.dashboard .detail .heading { + font-size: 18px; + line-height: 21px; + color: #222222; + margin-bottom: 25px; +} +.dashboard .detail .heading .label { + color: #777777; +} +.dashboard .detail td.name { + font-size: 14px; + color: #222222; + line-height: 24px; +} +.dashboard .detail td.value { + margin-left: 50px; + font-size: 14px; + color: #888888; + line-height: 24px; +} +.dashboard .detail .containerTable td { + padding-right: 20px; +} `) -func www_box_ng_bytes() ([]byte, error) { - return _www_box_ng, nil +func www_app_assets_css_app_css_bytes() ([]byte, error) { + return _www_app_assets_css_app_css, nil } -func www_box_ng() (*asset, error) { - bytes, err := www_box_ng_bytes() +func www_app_assets_css_app_css() (*asset, error) { + bytes, err := www_app_assets_css_app_css_bytes() if err != nil { return nil, err } - info := bindata_file_info{name: "www/box.ng", size: 2526, mode: os.FileMode(416), modTime: time.Unix(1422479417, 0)} + info := bindata_file_info{name: "www/app/assets/css/app.css", size: 37053, mode: os.FileMode(436), modTime: time.Unix(1429574520, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _www_index_html = []byte(` - - - Kubernetes - - - - - - - - - -