diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 98922b6fad6..c3b67b69c39 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -157,8 +157,8 @@ }, { "ImportPath": "github.com/emicklei/go-restful", - "Comment": "v1.1.3-45-gd487287", - "Rev": "d4872876992d385f0e69b007f154e5633bdb40af" + "Comment": "v1.1.3-54-gbdfb7d4", + "Rev": "bdfb7d41639a84ea7c36df648e5865cd9fbf21e2" }, { "ImportPath": "github.com/evanphx/json-patch", @@ -475,7 +475,7 @@ }, { "ImportPath": "golang.org/x/oauth2", - "Rev": "2e66694fea36dc820636630792a55cdc6987e05b" + "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9" }, { "ImportPath": "google.golang.org/appengine", diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-swagger.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-swagger.go new file mode 100644 index 00000000000..7746b5b0705 --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/examples/restful-swagger.go @@ -0,0 +1,61 @@ +package main + +import ( + "log" + "net/http" + + "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful/swagger" +) + +type Book struct { + Title string + Author string +} + +func main() { + ws := new(restful.WebService) + ws.Path("/books") + ws.Consumes(restful.MIME_JSON, restful.MIME_XML) + ws.Produces(restful.MIME_JSON, restful.MIME_XML) + restful.Add(ws) + + ws.Route(ws.GET("/{medium}").To(noop). + Doc("Search all books"). + Param(ws.PathParameter("medium", "digital or paperback").DataType("string")). + Param(ws.QueryParameter("language", "en,nl,de").DataType("string")). + Param(ws.HeaderParameter("If-Modified-Since", "last known timestamp").DataType("datetime")). + Do(returns200, returns500)) + + ws.Route(ws.PUT("/{medium}").To(noop). + Doc("Add a new book"). + Param(ws.PathParameter("medium", "digital or paperback").DataType("string")). + Reads(Book{})) + + // You can install the Swagger Service which provides a nice Web UI on your REST API + // You need to download the Swagger HTML5 assets and change the FilePath location in the config below. + // Open http://localhost:8080/apidocs and enter http://localhost:8080/apidocs.json in the api input field. + config := swagger.Config{ + WebServices: restful.DefaultContainer.RegisteredWebServices(), // you control what services are visible + WebServicesUrl: "http://localhost:8080", + ApiPath: "/apidocs.json", + + // Optionally, specifiy where the UI is located + SwaggerPath: "/apidocs/", + SwaggerFilePath: "/Users/emicklei/xProjects/swagger-ui/dist"} + swagger.RegisterSwaggerService(config, restful.DefaultContainer) + + log.Printf("start listening on localhost:8080") + server := &http.Server{Addr: ":8080", Handler: restful.DefaultContainer} + log.Fatal(server.ListenAndServe()) +} + +func noop(req *restful.Request, resp *restful.Response) {} + +func returns200(b *restful.RouteBuilder) { + b.Returns(http.StatusOK, "OK", Book{}) +} + +func returns500(b *restful.RouteBuilder) { + b.Returns(http.StatusInternalServerError, "Bummer, something went wrong", nil) +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go index 7f38a0a6478..05a9987600f 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/parameter.go @@ -35,6 +35,7 @@ type ParameterData struct { Required bool AllowableValues map[string]string AllowMultiple bool + DefaultValue string } // Data returns the state of the Parameter @@ -70,26 +71,32 @@ func (p *Parameter) beForm() *Parameter { return p } -// Required sets the required field and return the receiver +// Required sets the required field and returns the receiver func (p *Parameter) Required(required bool) *Parameter { p.data.Required = required return p } -// AllowMultiple sets the allowMultiple field and return the receiver +// AllowMultiple sets the allowMultiple field and returns the receiver func (p *Parameter) AllowMultiple(multiple bool) *Parameter { p.data.AllowMultiple = multiple return p } -// AllowableValues sets the allowableValues field and return the receiver +// AllowableValues sets the allowableValues field and returns the receiver func (p *Parameter) AllowableValues(values map[string]string) *Parameter { p.data.AllowableValues = values return p } -// DataType sets the dataType field and return the receiver +// DataType sets the dataType field and returns the receiver func (p *Parameter) DataType(typeName string) *Parameter { p.data.DataType = typeName return p } + +// DefaultValue sets the default value field and returnw the receiver +func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter { + p.data.DefaultValue = stringRepresentation + return p +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go index 78b3be88fd3..6a104aff8ed 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/route_test.go @@ -4,6 +4,14 @@ import ( "testing" ) +// accept should match produces +func TestMatchesAcceptPlainTextWhenProducePlainTextAsLast(t *testing.T) { + r := Route{Produces: []string{"application/json", "text/plain"}} + if !r.matchesAccept("text/plain") { + t.Errorf("accept should match text/plain") + } +} + // accept should match produces func TestMatchesAcceptStar(t *testing.T) { r := Route{Produces: []string{"application/xml"}} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md index 6a392cf2500..c9b49044565 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/CHANGES.md @@ -1,5 +1,10 @@ Change history of swagger = +2015-05-25 +- (api break) changed the type of Properties in Model +- (api break) changed the type of Models in ApiDeclaration +- (api break) changed the parameter type of PostBuildDeclarationMapFunc + 2015-04-09 - add ModelBuildable interface for customization of Model diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md index 2efe8f3a054..9376fc10af6 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/README.md @@ -23,6 +23,6 @@ Now, you can install the Swagger WebService for serving the Swagger specificatio Notes -- -- Use RouteBuilder.Operation(..) to set the Nickname field of the API spec +- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..) - The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints. - Use tag "description" to annotate a struct field with a description to show in the UI \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/api_declaration_list.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/api_declaration_list.go new file mode 100644 index 00000000000..9f4c3690acb --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/api_declaration_list.go @@ -0,0 +1,64 @@ +package swagger + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "encoding/json" +) + +// ApiDeclarationList maintains an ordered list of ApiDeclaration. +type ApiDeclarationList struct { + List []ApiDeclaration +} + +// At returns the ApiDeclaration by its path unless absent, then ok is false +func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) { + for _, each := range l.List { + if each.ResourcePath == path { + return each, true + } + } + return a, false +} + +// Put adds or replaces a ApiDeclaration with this name +func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) { + // maybe replace existing + for i, each := range l.List { + if each.ResourcePath == path { + // replace + l.List[i] = a + return + } + } + // add + l.List = append(l.List, a) +} + +// Do enumerates all the properties, each with its assigned name +func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) { + for _, each := range l.List { + block(each.ResourcePath, each) + } +} + +// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty +func (l ApiDeclarationList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + buf.WriteString("{\n") + for i, each := range l.List { + buf.WriteString("\"") + buf.WriteString(each.ResourcePath) + buf.WriteString("\": ") + encoder.Encode(each) + if i < len(l.List)-1 { + buf.WriteString(",\n") + } + } + buf.WriteString("}") + return buf.Bytes(), nil +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go index c08586bb5ce..b272b7bface 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/config.go @@ -7,7 +7,7 @@ import ( ) // PostBuildDeclarationMapFunc can be used to modify the api declaration map. -type PostBuildDeclarationMapFunc func(apiDeclarationMap map[string]ApiDeclaration) +type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList) type Config struct { // url where the services are available, e.g. http://localhost:8080 diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go index 631ad53a334..2ecc49a6c16 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_builder.go @@ -13,7 +13,7 @@ type ModelBuildable interface { } type modelBuilder struct { - Models map[string]Model + Models *ModelList } // addModelFrom creates and adds a Model to the builder and detects and calls @@ -23,7 +23,7 @@ func (b modelBuilder) addModelFrom(sample interface{}) { // allow customizations if buildable, ok := sample.(ModelBuildable); ok { modelOrNil = buildable.PostBuildModel(modelOrNil) - b.Models[modelOrNil.Id] = *modelOrNil + b.Models.Put(modelOrNil.Id, *modelOrNil) } } } @@ -38,16 +38,16 @@ func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { return nil } // see if we already have visited this model - if _, ok := b.Models[modelName]; ok { + if _, ok := b.Models.At(modelName); ok { return nil } sm := Model{ Id: modelName, Required: []string{}, - Properties: map[string]ModelProperty{}} + Properties: ModelPropertyList{}} // reference the model before further initializing (enables recursive structs) - b.Models[modelName] = sm + b.Models.Put(modelName, sm) // check for slice or array if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { @@ -70,11 +70,11 @@ func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model { if b.isPropertyRequired(field) { sm.Required = append(sm.Required, jsonName) } - sm.Properties[jsonName] = prop + sm.Properties.Put(jsonName, prop) } } // update model builder with completed model - b.Models[modelName] = sm + b.Models.Put(modelName, sm) return &sm } @@ -179,13 +179,13 @@ func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonNam if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) { // embedded struct - sub := modelBuilder{map[string]Model{}} + sub := modelBuilder{new(ModelList)} sub.addModel(fieldType, "") subKey := sub.keyFrom(fieldType) // merge properties from sub - subModel := sub.Models[subKey] - for k, v := range subModel.Properties { - model.Properties[k] = v + subModel, _ := sub.Models.At(subKey) + subModel.Properties.Do(func(k string, v ModelProperty) { + model.Properties.Put(k, v) // if subModel says this property is required then include it required := false for _, each := range subModel.Required { @@ -197,15 +197,15 @@ func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonNam if required { model.Required = append(model.Required, k) } - } + }) // add all new referenced models - for key, sub := range sub.Models { + sub.Models.Do(func(key string, sub Model) { if key != subKey { - if _, ok := b.Models[key]; !ok { - b.Models[key] = sub + if _, ok := b.Models.At(key); !ok { + b.Models.Put(key, sub) } } - } + }) // empty name signals skip property return "", prop } diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_list.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_list.go new file mode 100644 index 00000000000..9bb6cb67850 --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_list.go @@ -0,0 +1,86 @@ +package swagger + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "encoding/json" +) + +// NamedModel associates a name with a Model (not using its Id) +type NamedModel struct { + Name string + Model Model +} + +// ModelList encapsulates a list of NamedModel (association) +type ModelList struct { + List []NamedModel +} + +// Put adds or replaces a Model by its name +func (l *ModelList) Put(name string, model Model) { + for i, each := range l.List { + if each.Name == name { + // replace + l.List[i] = NamedModel{name, model} + return + } + } + // add + l.List = append(l.List, NamedModel{name, model}) +} + +// At returns a Model by its name, ok is false if absent +func (l *ModelList) At(name string) (m Model, ok bool) { + for _, each := range l.List { + if each.Name == name { + return each.Model, true + } + } + return m, false +} + +// Do enumerates all the models, each with its assigned name +func (l *ModelList) Do(block func(name string, value Model)) { + for _, each := range l.List { + block(each.Name, each.Model) + } +} + +// MarshalJSON writes the ModelList as if it was a map[string]Model +func (l ModelList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + buf.WriteString("{\n") + for i, each := range l.List { + buf.WriteString("\"") + buf.WriteString(each.Name) + buf.WriteString("\": ") + encoder.Encode(each.Model) + if i < len(l.List)-1 { + buf.WriteString(",\n") + } + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// UnmarshalJSON reads back a ModelList. This is an expensive operation. +func (l *ModelList) UnmarshalJSON(data []byte) error { + raw := map[string]interface{}{} + json.NewDecoder(bytes.NewReader(data)).Decode(&raw) + for k, v := range raw { + // produces JSON bytes for each value + data, err := json.Marshal(v) + if err != nil { + return err + } + var m Model + json.NewDecoder(bytes.NewReader(data)).Decode(&m) + l.Put(k, m) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_list_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_list_test.go new file mode 100644 index 00000000000..9a9ab919b48 --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_list_test.go @@ -0,0 +1,48 @@ +package swagger + +import ( + "encoding/json" + "testing" +) + +func TestModelList(t *testing.T) { + m := Model{} + m.Id = "m" + l := ModelList{} + l.Put("m", m) + k, ok := l.At("m") + if !ok { + t.Error("want model back") + } + if got, want := k.Id, "m"; got != want { + t.Errorf("got %v want %v", got, want) + } +} + +func TestModelList_Marshal(t *testing.T) { + l := ModelList{} + m := Model{Id: "myid"} + l.Put("myid", m) + data, err := json.Marshal(l) + if err != nil { + t.Error(err) + } + if got, want := string(data), `{"myid":{"id":"myid","properties":{}}}`; got != want { + t.Errorf("got %v want %v", got, want) + } +} + +func TestModelList_Unmarshal(t *testing.T) { + data := `{"myid":{"id":"myid","properties":{}}}` + l := ModelList{} + if err := json.Unmarshal([]byte(data), &l); err != nil { + t.Error(err) + } + m, ok := l.At("myid") + if !ok { + t.Error("expected myid") + } + if got, want := m.Id, "myid"; got != want { + t.Errorf("got %v want %v", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_property_list.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_property_list.go new file mode 100644 index 00000000000..3babb194489 --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_property_list.go @@ -0,0 +1,87 @@ +package swagger + +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +import ( + "bytes" + "encoding/json" +) + +// NamedModelProperty associates a name to a ModelProperty +type NamedModelProperty struct { + Name string + Property ModelProperty +} + +// ModelPropertyList encapsulates a list of NamedModelProperty (association) +type ModelPropertyList struct { + List []NamedModelProperty +} + +// At returns the ModelPropety by its name unless absent, then ok is false +func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) { + for _, each := range l.List { + if each.Name == name { + return each.Property, true + } + } + return p, false +} + +// Put adds or replaces a ModelProperty with this name +func (l *ModelPropertyList) Put(name string, prop ModelProperty) { + // maybe replace existing + for i, each := range l.List { + if each.Name == name { + // replace + l.List[i] = NamedModelProperty{Name: name, Property: prop} + return + } + } + // add + l.List = append(l.List, NamedModelProperty{Name: name, Property: prop}) +} + +// Do enumerates all the properties, each with its assigned name +func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) { + for _, each := range l.List { + block(each.Name, each.Property) + } +} + +// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty +func (l ModelPropertyList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + encoder := json.NewEncoder(&buf) + buf.WriteString("{\n") + for i, each := range l.List { + buf.WriteString("\"") + buf.WriteString(each.Name) + buf.WriteString("\": ") + encoder.Encode(each.Property) + if i < len(l.List)-1 { + buf.WriteString(",\n") + } + } + buf.WriteString("}") + return buf.Bytes(), nil +} + +// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation. +func (l *ModelPropertyList) UnmarshalJSON(data []byte) error { + raw := map[string]interface{}{} + json.NewDecoder(bytes.NewReader(data)).Decode(&raw) + for k, v := range raw { + // produces JSON bytes for each value + data, err := json.Marshal(v) + if err != nil { + return err + } + var m ModelProperty + json.NewDecoder(bytes.NewReader(data)).Decode(&m) + l.Put(k, m) + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_property_list_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_property_list_test.go new file mode 100644 index 00000000000..2833ad8fdaa --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/model_property_list_test.go @@ -0,0 +1,47 @@ +package swagger + +import ( + "encoding/json" + "testing" +) + +func TestModelPropertyList(t *testing.T) { + l := ModelPropertyList{} + p := ModelProperty{Description: "d"} + l.Put("p", p) + q, ok := l.At("p") + if !ok { + t.Error("expected p") + } + if got, want := q.Description, "d"; got != want { + t.Errorf("got %v want %v", got, want) + } +} + +func TestModelPropertyList_Marshal(t *testing.T) { + l := ModelPropertyList{} + p := ModelProperty{Description: "d"} + l.Put("p", p) + data, err := json.Marshal(l) + if err != nil { + t.Error(err) + } + if got, want := string(data), `{"p":{"description":"d"}}`; got != want { + t.Errorf("got %v want %v", got, want) + } +} + +func TestModelPropertyList_Unmarshal(t *testing.T) { + data := `{"p":{"description":"d"}}` + l := ModelPropertyList{} + if err := json.Unmarshal([]byte(data), &l); err != nil { + t.Error(err) + } + m, ok := l.At("p") + if !ok { + t.Error("expected p") + } + if got, want := m.Description, "d"; got != want { + t.Errorf("got %v want %v", got, want) + } +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/ordered_route_map.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/ordered_route_map.go index f57163136ad..b33ccfbeb9e 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/ordered_route_map.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/ordered_route_map.go @@ -1,5 +1,9 @@ package swagger +// Copyright 2015 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + import "github.com/emicklei/go-restful" type orderedRouteMap struct { diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter.go deleted file mode 100644 index 813007b7d7a..00000000000 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter.go +++ /dev/null @@ -1,29 +0,0 @@ -package swagger - -// Copyright 2014 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -type ParameterSorter []Parameter - -func (s ParameterSorter) Len() int { - return len(s) -} -func (s ParameterSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -var typeToSortKey = map[string]string{ - "path": "A", - "query": "B", - "form": "C", - "header": "D", - "body": "E", -} - -func (s ParameterSorter) Less(i, j int) bool { - // use ordering path,query,form,header,body - pi := s[i] - pj := s[j] - return typeToSortKey[pi.ParamType]+pi.Name < typeToSortKey[pj.ParamType]+pj.Name -} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter_test.go deleted file mode 100644 index ef6d9ebd195..00000000000 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/param_sorter_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package swagger - -import ( - "bytes" - "sort" - "testing" -) - -func TestSortParameters(t *testing.T) { - unsorted := []Parameter{ - Parameter{ - Name: "form2", - ParamType: "form", - }, - Parameter{ - Name: "header1", - ParamType: "header", - }, - Parameter{ - Name: "path2", - ParamType: "path", - }, - Parameter{ - Name: "body", - ParamType: "body", - }, - Parameter{ - Name: "path1", - ParamType: "path", - }, - Parameter{ - Name: "form1", - ParamType: "form", - }, - Parameter{ - Name: "query2", - ParamType: "query", - }, - Parameter{ - Name: "query1", - ParamType: "query", - }, - } - sort.Sort(ParameterSorter(unsorted)) - var b bytes.Buffer - for _, p := range unsorted { - b.WriteString(p.Name + ".") - } - if "path1.path2.query1.query2.form1.form2.header1.body." != b.String() { - t.Fatal("sorting has changed:" + b.String()) - } -} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/postbuild_model_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/postbuild_model_test.go index 200b061486d..3e20d2f5b9d 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/postbuild_model_test.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/postbuild_model_test.go @@ -14,12 +14,12 @@ func (b Boat) PostBuildModel(m *Model) *Model { // add model property (just to test is can be added; is this a real usecase?) extraType := "string" - m.Properties["extra"] = ModelProperty{ + m.Properties.Put("extra", ModelProperty{ Description: "extra description", DataTypeFields: DataTypeFields{ Type: &extraType, }, - } + }) return m } diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/resource_sorter.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/resource_sorter.go deleted file mode 100644 index e842b4c290d..00000000000 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/resource_sorter.go +++ /dev/null @@ -1,19 +0,0 @@ -package swagger - -// Copyright 2014 Ernest Micklei. All rights reserved. -// Use of this source code is governed by a license -// that can be found in the LICENSE file. - -type ResourceSorter []Resource - -func (s ResourceSorter) Len() int { - return len(s) -} - -func (s ResourceSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s ResourceSorter) Less(i, j int) bool { - return s[i].Path < s[j].Path -} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go index 4aad3eebf05..288aec67ef6 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger.go @@ -114,15 +114,15 @@ type TokenEndpoint struct { // 5.2 API Declaration type ApiDeclaration struct { - SwaggerVersion string `json:"swaggerVersion"` - ApiVersion string `json:"apiVersion"` - BasePath string `json:"basePath"` - ResourcePath string `json:"resourcePath"` // must start with / - Apis []Api `json:"apis,omitempty"` - Models map[string]Model `json:"models,omitempty"` - Produces []string `json:"produces,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Authorizations []Authorization `json:"authorizations,omitempty"` + SwaggerVersion string `json:"swaggerVersion"` + ApiVersion string `json:"apiVersion"` + BasePath string `json:"basePath"` + ResourcePath string `json:"resourcePath"` // must start with / + Apis []Api `json:"apis,omitempty"` + Models ModelList `json:"models,omitempty"` + Produces []string `json:"produces,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Authorizations []Authorization `json:"authorizations,omitempty"` } // 5.2.2 API Object @@ -166,12 +166,12 @@ type ResponseMessage struct { // 5.2.6, 5.2.7 Models Object type Model struct { - Id string `json:"id"` - Description string `json:"description,omitempty"` - Required []string `json:"required,omitempty"` - Properties map[string]ModelProperty `json:"properties"` - SubTypes []string `json:"subTypes,omitempty"` - Discriminator string `json:"discriminator,omitempty"` + Id string `json:"id"` + Description string `json:"description,omitempty"` + Required []string `json:"required,omitempty"` + Properties ModelPropertyList `json:"properties"` + SubTypes []string `json:"subTypes,omitempty"` + Discriminator string `json:"discriminator,omitempty"` } // 5.2.8 Properties Object diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go index e810f2f35ef..cf38a760e5a 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_test.go @@ -26,7 +26,7 @@ func TestServiceToApi(t *testing.T) { WebServicesUrl: "http://here.com", ApiPath: "/apipath", WebServices: []*restful.WebService{ws}, - PostBuildHandler: func(in map[string]ApiDeclaration) {}, + PostBuildHandler: func(in *ApiDeclarationList) {}, } sws := newSwaggerService(cfg) decl := sws.composeDeclaration(ws, "/tests") @@ -73,7 +73,7 @@ func TestComposeResponseMessages(t *testing.T) { responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: TestItem{}} route := restful.Route{ResponseErrors: responseErrors} decl := new(ApiDeclaration) - decl.Models = map[string]Model{} + decl.Models = ModelList{} msgs := composeResponseMessages(route, decl) if msgs[0].ResponseModel != "swagger.TestItem" { t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel) @@ -86,7 +86,7 @@ func TestComposeResponseMessageArray(t *testing.T) { responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: []TestItem{}} route := restful.Route{ResponseErrors: responseErrors} decl := new(ApiDeclaration) - decl.Models = map[string]Model{} + decl.Models = ModelList{} msgs := composeResponseMessages(route, decl) if msgs[0].ResponseModel != "array[swagger.TestItem]" { t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel) @@ -95,23 +95,23 @@ func TestComposeResponseMessageArray(t *testing.T) { func TestIssue78(t *testing.T) { sws := newSwaggerService(Config{}) - models := map[string]Model{} + models := new(ModelList) sws.addModelFromSampleTo(&Operation{}, true, Response{Items: &[]TestItem{}}, models) - model, ok := models["swagger.Response"] + model, ok := models.At("swagger.Response") if !ok { t.Fatal("missing response model") } if "swagger.Response" != model.Id { t.Fatal("wrong model id:" + model.Id) } - code, ok := model.Properties["Code"] + code, ok := model.Properties.At("Code") if !ok { t.Fatal("missing code") } if "integer" != *code.Type { t.Fatal("wrong code type:" + *code.Type) } - items, ok := model.Properties["Items"] + items, ok := model.Properties.At("Items") if !ok { t.Fatal("missing items") } diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go index 9ac56c2de4e..885f9dc7316 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/swagger_webservice.go @@ -15,13 +15,13 @@ import ( type SwaggerService struct { config Config - apiDeclarationMap map[string]ApiDeclaration + apiDeclarationMap *ApiDeclarationList } func newSwaggerService(config Config) *SwaggerService { return &SwaggerService{ config: config, - apiDeclarationMap: map[string]ApiDeclaration{}} + apiDeclarationMap: new(ApiDeclarationList)} } // LogInfo is the function that is called when this package needs to log. It defaults to log.Printf @@ -66,13 +66,13 @@ func RegisterSwaggerService(config Config, wsContainer *restful.Container) { // use routes for _, route := range each.Routes() { entry := staticPathFromRoute(route) - _, exists := sws.apiDeclarationMap[entry] + _, exists := sws.apiDeclarationMap.At(entry) if !exists { - sws.apiDeclarationMap[entry] = sws.composeDeclaration(each, entry) + sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry)) } } } else { // use root path - sws.apiDeclarationMap[each.RootPath()] = sws.composeDeclaration(each, each.RootPath()) + sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath())) } } } @@ -139,19 +139,22 @@ func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.Fil func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) { listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion} - for k, v := range sws.apiDeclarationMap { + sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) { ref := Resource{Path: k} if len(v.Apis) > 0 { // use description of first (could still be empty) ref.Description = v.Apis[0].Description } listing.Apis = append(listing.Apis, ref) - } - sort.Sort(ResourceSorter(listing.Apis)) + }) resp.WriteAsJson(listing) } func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) { - decl := sws.apiDeclarationMap[composeRootPath(req)] + decl, ok := sws.apiDeclarationMap.At(composeRootPath(req)) + if !ok { + resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found") + return + } // unless WebServicesUrl is given if len(sws.config.WebServicesUrl) == 0 { // update base path from the actual request @@ -180,7 +183,7 @@ func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix SwaggerVersion: swaggerVersion, BasePath: sws.config.WebServicesUrl, ResourcePath: ws.RootPath(), - Models: map[string]Model{}, + Models: ModelList{}, ApiVersion: ws.Version()} // collect any path parameters @@ -218,8 +221,6 @@ func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix for _, param := range route.ParameterDocs { operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data())) } - // sort parameters - sort.Sort(ParameterSorter(operation.Parameters)) sws.addModelsFromRouteTo(&operation, route, &decl) api.Operations = append(api.Operations, operation) @@ -253,7 +254,7 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration) (message if isCollection { modelName = "array[" + modelName + "]" } - modelBuilder{decl.Models}.addModel(st, "") + modelBuilder{&decl.Models}.addModel(st, "") // reference the model message.ResponseModel = modelName } @@ -265,10 +266,10 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration) (message // addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it. func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) { if route.ReadSample != nil { - sws.addModelFromSampleTo(operation, false, route.ReadSample, decl.Models) + sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models) } if route.WriteSample != nil { - sws.addModelFromSampleTo(operation, true, route.WriteSample, decl.Models) + sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models) } } @@ -289,7 +290,7 @@ func detectCollectionType(st reflect.Type) (bool, reflect.Type) { } // addModelFromSample creates and adds (or overwrites) a Model from a sample resource -func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models map[string]Model) { +func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) { st := reflect.TypeOf(sample) isCollection, st := detectCollectionType(st) modelName := modelBuilder{}.keyFrom(st) @@ -305,8 +306,9 @@ func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse func asSwaggerParameter(param restful.ParameterData) Parameter { return Parameter{ DataTypeFields: DataTypeFields{ - Type: ¶m.DataType, - Format: asFormat(param.DataType), + Type: ¶m.DataType, + Format: asFormat(param.DataType), + DefaultValue: Special(param.DefaultValue), }, Name: param.Name, Description: param.Description, diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go index b0544481e1b..6799174db4f 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/swagger/utils_test.go @@ -15,8 +15,8 @@ func testJsonFromStruct(t *testing.T, sample interface{}, expectedJson string) b return compareJson(t, string(data), expectedJson) } -func modelsFromStruct(sample interface{}) map[string]Model { - models := map[string]Model{} +func modelsFromStruct(sample interface{}) *ModelList { + models := new(ModelList) builder := modelBuilder{models} builder.addModelFrom(sample) return models @@ -28,12 +28,12 @@ func compareJson(t *testing.T, actualJsonAsString string, expectedJsonAsString s var expectedMap map[string]interface{} json.Unmarshal([]byte(expectedJsonAsString), &expectedMap) if !reflect.DeepEqual(actualMap, expectedMap) { - fmt.Println("---- expected -----") - fmt.Println(withLineNumbers(expectedJsonAsString)) - fmt.Println("---- actual -----") - fmt.Println(withLineNumbers(actualJsonAsString)) - fmt.Println("---- raw -----") - fmt.Println(actualJsonAsString) + t.Log("---- expected -----") + t.Log(withLineNumbers(expectedJsonAsString)) + t.Log("---- actual -----") + t.Log(withLineNumbers(actualJsonAsString)) + t.Log("---- raw -----") + t.Log(actualJsonAsString) t.Error("there are differences") return false } diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/tracer_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/tracer_test.go new file mode 100644 index 00000000000..60c1e9fc09d --- /dev/null +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/tracer_test.go @@ -0,0 +1,18 @@ +package restful + +import "testing" + +// Use like this: +// +// TraceLogger(testLogger{t}) +type testLogger struct { + t *testing.T +} + +func (l testLogger) Print(v ...interface{}) { + l.t.Log(v...) +} + +func (l testLogger) Printf(format string, v ...interface{}) { + l.t.Logf(format, v...) +} diff --git a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go b/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go index 7d060279c95..876740de5d7 100644 --- a/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go +++ b/Godeps/_workspace/src/github.com/emicklei/go-restful/web_service_test.go @@ -108,6 +108,20 @@ func TestContentType415_POST_Issue170(t *testing.T) { } } +// go test -v -test.run TestContentType406PlainJson ...restful +func TestContentType406PlainJson(t *testing.T) { + tearDown() + TraceLogger(testLogger{t}) + Add(newGetPlainTextOrJsonService()) + httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil) + httpRequest.Header.Set("Accept", "text/plain") + httpWriter := httptest.NewRecorder() + DefaultContainer.dispatch(httpWriter, httpRequest) + if got, want := httpWriter.Code, 200; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + // go test -v -test.run TestContentTypeOctet_Issue170 ...restful func TestContentTypeOctet_Issue170(t *testing.T) { tearDown() @@ -155,6 +169,13 @@ func newGetOnlyJsonOnlyService() *WebService { return ws } +func newGetPlainTextOrJsonService() *WebService { + ws := new(WebService).Path("") + ws.Produces("text/plain", "application/json") + ws.Route(ws.GET("/get").To(doNothing)) + return ws +} + func newGetConsumingOctetStreamService() *WebService { ws := new(WebService).Path("") ws.Consumes("application/octet-stream") diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml index 01bb8d44ee9..a035125c358 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml +++ b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml @@ -8,7 +8,7 @@ install: - export GOPATH="$HOME/gopath" - mkdir -p "$GOPATH/src/golang.org/x" - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - - go get -v -t -d -tags='appengine appenginevm' golang.org/x/oauth2/... + - go get -v -t -d golang.org/x/oauth2/... script: - - go test -v -tags='appengine appenginevm' golang.org/x/oauth2/... + - go test -v golang.org/x/oauth2/... diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md index d76faef21a8..46aa2b12dda 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md +++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md @@ -1,25 +1,31 @@ -# Contributing +# Contributing to Go -We don't use GitHub pull requests but use Gerrit for code reviews, -similar to the Go project. +Go is an open source project. -1. Sign one of the contributor license agreements below. -2. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. -3. Get the package by running `go get -d golang.org/x/oauth2`. -Make changes and create a change by running `git codereview change `, provide a command message, and use `git codereview mail` to create a Gerrit CL. -Keep amending to the change and mail as your recieve feedback. +It is the work of hundreds of contributors. We appreciate your help! -For more information about the workflow, see Go's [Contribution Guidelines](https://golang.org/doc/contribute.html). -Before we can accept any pull requests -we have to jump through a couple of legal hurdles, -primarily a Contributor License Agreement (CLA): +## Filing issues -- **If you are an individual writing original source code** - and you're sure you own the intellectual property, - then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). -- **If you work for a company that wants to allow you to contribute your work**, - then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. -You can sign these electronically (just scroll to the bottom). -After that, we'll be able to accept your pull requests. diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/README.md b/Godeps/_workspace/src/golang.org/x/oauth2/README.md index ecf9c4e022f..0d5141733f5 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/README.md +++ b/Godeps/_workspace/src/golang.org/x/oauth2/README.md @@ -16,3 +16,49 @@ See godoc for further documentation and examples. * [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) +## App Engine + +In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor +of the [`context.Context`](https://golang.org/x/net/context#Context) type from +the `golang.org/x/net/context` package + +This means its no longer possible to use the "Classic App Engine" +`appengine.Context` type with the `oauth2` package. (You're using +Classic App Engine if you import the package `"appengine"`.) + +To work around this, you may use the new `"google.golang.org/appengine"` +package. This package has almost the same API as the `"appengine"` package, +but it can be fetched with `go get` and used on "Managed VMs" and well as +Classic App Engine. + +See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app) +for information on updating your app. + +If you don't want to update your entire app to use the new App Engine packages, +you may use both sets of packages in parallel, using only the new packages +with the `oauth2` package. + + import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + newappengine "google.golang.org/appengine" + newurlfetch "google.golang.org/appengine/urlfetch" + + "appengine" + ) + + func handler(w http.ResponseWriter, r *http.Request) { + var c appengine.Context = appengine.NewContext(r) + c.Infof("Logging a message with the old package") + + var ctx context.Context = newappengine.NewContext(r) + client := &http.Client{ + Transport: &oauth2.Transport{ + Source: google.AppEngineTokenSource(ctx, "scope"), + Base: &newurlfetch.Transport{Context: ctx}, + }, + } + client.Get("...") + } + diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go index d9ce8045ba4..4a554cb9bf6 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go @@ -2,38 +2,24 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine,!appenginevm +// +build appengine appenginevm // App Engine hooks. package oauth2 import ( - "log" "net/http" - "sync" - "appengine" - "appengine/urlfetch" + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" + "google.golang.org/appengine/urlfetch" ) -var warnOnce sync.Once - func init() { - registerContextClientFunc(contextClientAppEngine) + internal.RegisterContextClientFunc(contextClientAppEngine) } -func contextClientAppEngine(ctx Context) (*http.Client, error) { - if actx, ok := ctx.(appengine.Context); ok { - return urlfetch.Client(actx), nil - } - // The user did it wrong. We'll log once (and hope they see it - // in dev_appserver), but stil return (nil, nil) in case some - // other contextClientFunc hook finds a way to proceed. - warnOnce.Do(gaeDoingItWrongHelp) - return nil, nil -} - -func gaeDoingItWrongHelp() { - log.Printf("WARNING: you attempted to use the oauth2 package without passing a valid appengine.Context or *http.Request as the oauth2.Context. App Engine requires that all service RPCs (including urlfetch) be associated with an *http.Request/appengine.Context.") +func contextClientAppEngine(ctx context.Context) (*http.Client, error) { + return urlfetch.Client(ctx), nil } diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go new file mode 100644 index 00000000000..452fb8c1246 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -0,0 +1,112 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package clientcredentials implements the OAuth2.0 "client credentials" token flow, +// also known as the "two-legged OAuth 2.0". +// +// This should be used when the client is acting on its own behalf or when the client +// is the resource owner. It may also be used when requesting access to protected +// resources based on an authorization previously arranged with the authorization +// server. +// +// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4 +package clientcredentials + +import ( + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +// tokenFromInternal maps an *internal.Token struct into +// an *oauth2.Token struct. +func tokenFromInternal(t *internal.Token) *oauth2.Token { + if t == nil { + return nil + } + tk := &oauth2.Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + } + return tk.WithExtra(t.Raw) +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is +// returned along with an error. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} + +// Client Credentials Config describes a 2-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // TokenURL is the resource server's token endpoint + // URL. This is a constant specific to each server. + TokenURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// Token uses client credentials to retreive a token. +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context and the +// client ID and client secret. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + source := &tokenSource{ + ctx: ctx, + conf: c, + } + return oauth2.ReuseTokenSource(nil, source) +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +// Token refreshes the token by using a new client credentials request. +// tokens received this way do not include a refresh token +func (c *tokenSource) Token() (*oauth2.Token, error) { + return retrieveToken(c.ctx, c.conf, url.Values{ + "grant_type": {"client_credentials"}, + "scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")), + }) +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go new file mode 100644 index 00000000000..ab319e0828c --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go @@ -0,0 +1,96 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package clientcredentials + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "golang.org/x/oauth2" +) + +func newConf(url string) *Config { + return &Config{ + ClientID: "CLIENT_ID", + ClientSecret: "CLIENT_SECRET", + Scopes: []string{"scope1", "scope2"}, + TokenURL: url + "/token", + } +} + +type mockTransport struct { + rt func(req *http.Request) (resp *http.Response, err error) +} + +func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + return t.rt(req) +} + +func TestTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/token" { + t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token") + } + headerAuth := r.Header.Get("Authorization") + if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" { + t.Errorf("Unexpected authorization header, %v is found.", headerAuth) + } + if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want { + t.Errorf("Content-Type header = %q; want %q", got, want) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + r.Body.Close() + } + if err != nil { + t.Errorf("failed reading request body: %s.", err) + } + if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("payload = %q; want %q", string(body), "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2") + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.Token(oauth2.NoContext) + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("token invalid. got: %#v", tok) + } + if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" { + t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c") + } + if tok.TokenType != "bearer" { + t.Errorf("token type = %q; want %q", tok.TokenType, "bearer") + } +} + +func TestTokenRefreshRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() == "/somethingelse" { + return + } + if r.URL.String() != "/token" { + t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL) + } + headerContentType := r.Header.Get("Content-Type") + if headerContentType != "application/x-www-form-urlencoded" { + t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType) + } + body, _ := ioutil.ReadAll(r.Body) + if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" { + t.Errorf("Unexpected refresh token payload, %v is found.", string(body)) + } + })) + defer ts.Close() + conf := newConf(ts.URL) + c := conf.Client(oauth2.NoContext) + c.Get(ts.URL + "/somethingelse") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go index e4fef7d772a..8be27885567 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go @@ -7,15 +7,10 @@ package oauth2_test import ( "fmt" "log" - "testing" "golang.org/x/oauth2" ) -// TODO(jbd): Remove after Go 1.4. -// Related to https://codereview.appspot.com/107320046 -func TestA(t *testing.T) {} - func ExampleConfig() { conf := &oauth2.Config{ ClientID: "YOUR_CLIENT_ID", diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go new file mode 100644 index 00000000000..9c816ff8058 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/facebook/facebook.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package facebook provides constants for using OAuth2 to access Facebook. +package facebook + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Facebook's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.facebook.com/dialog/oauth", + TokenURL: "https://graph.facebook.com/oauth/access_token", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go index c6213d9cea3..65dc347314d 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go @@ -2,36 +2,82 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build appengine,!appenginevm - package google import ( + "sort" + "strings" + "sync" "time" - "appengine" - + "golang.org/x/net/context" "golang.org/x/oauth2" ) +// Set at init time by appengine_hook.go. If nil, we're not on App Engine. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + // AppEngineTokenSource returns a token source that fetches tokens // issued to the current App Engine application's service account. // If you are implementing a 3-legged OAuth 2.0 flow on App Engine // that involves user accounts, see oauth2.Config instead. // -// You are required to provide a valid appengine.Context as context. -func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource { +// The provided context must have come from appengine.NewContext. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + scopes := append([]string{}, scope...) + sort.Strings(scopes) return &appEngineTokenSource{ - ctx: ctx, - scopes: scope, - fetcherFunc: aeFetcherFunc, + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), } } -var aeFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) { - c, ok := ctx.(appengine.Context) - if !ok { - return "", time.Time{}, errInvalidContext - } - return appengine.AccessToken(c, scope...) +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type appEngineTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { + if appengineTokenFunc == nil { + panic("google: AppEngineTokenSource can only be used on App Engine.") + } + + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil } diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go new file mode 100644 index 00000000000..2f9b15432fa --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine_hook.go @@ -0,0 +1,13 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine appenginevm + +package google + +import "google.golang.org/appengine" + +func init() { + appengineTokenFunc = appengine.AccessToken +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm.go deleted file mode 100644 index 12af742d2f5..00000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build appenginevm !appengine - -package google - -import ( - "time" - - "golang.org/x/oauth2" - "google.golang.org/appengine" -) - -// AppEngineTokenSource returns a token source that fetches tokens -// issued to the current App Engine application's service account. -// If you are implementing a 3-legged OAuth 2.0 flow on App Engine -// that involves user accounts, see oauth2.Config instead. -// -// You are required to provide a valid appengine.Context as context. -func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource { - return &appEngineTokenSource{ - ctx: ctx, - scopes: scope, - fetcherFunc: aeVMFetcherFunc, - } -} - -var aeVMFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) { - c, ok := ctx.(appengine.Context) - if !ok { - return "", time.Time{}, errInvalidContext - } - return appengine.AccessToken(c, scope...) -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go new file mode 100644 index 00000000000..78f8089853f --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" + "google.golang.org/cloud/compute/metadata" +) + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +// +// This client should be used when developing services +// that run on Google App Engine or Google Compute Engine +// and use "Application Default Credentials." +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource is a token source that uses +// "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine, it fetches credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +// +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + ts, err := tokenSourceFromFile(ctx, filename, scope) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return ts, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + _, err := os.Stat(filename) + if err == nil { + ts, err2 := tokenSourceFromFile(ctx, filename, scope) + if err2 == nil { + return ts, nil + } + err = err2 + } else if os.IsNotExist(err) { + err = nil // ignore this error + } + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on Google App Engine use those credentials. + if appengineTokenFunc != nil { + return AppEngineTokenSource(ctx, scope...), nil + } + + // Fourth, if we're on Google Compute Engine use the metadata server. + if metadata.OnGCE() { + return ComputeTokenSource(""), nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var d struct { + // Common fields + Type string + ClientID string `json:"client_id"` + + // User Credential fields + ClientSecret string `json:"client_secret"` + RefreshToken string `json:"refresh_token"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + } + if err := json.Unmarshal(b, &d); err != nil { + return nil, err + } + switch d.Type { + case "authorized_user": + cfg := &oauth2.Config{ + ClientID: d.ClientID, + ClientSecret: d.ClientSecret, + Scopes: append([]string{}, scopes...), // copy + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: d.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "service_account": + cfg := &jwt.Config{ + Email: d.ClientEmail, + PrivateKey: []byte(d.PrivateKey), + Scopes: append([]string{}, scopes...), // copy + TokenURL: JWTTokenURL, + } + return cfg.TokenSource(ctx), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", d.Type) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go index 2958692cefc..17262802a91 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go @@ -11,7 +11,6 @@ import ( "io/ioutil" "log" "net/http" - "testing" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -20,9 +19,14 @@ import ( "google.golang.org/appengine/urlfetch" ) -// Remove after Go 1.4. -// Related to https://codereview.appspot.com/107320046 -func TestA(t *testing.T) {} +func ExampleDefaultClient() { + client, err := google.DefaultClient(oauth2.NoContext, + "https://www.googleapis.com/auth/devstorage.full_control") + if err != nil { + log.Fatal(err) + } + client.Get("...") +} func Example_webServer() { // Your credentials should be obtained from the Google @@ -74,6 +78,19 @@ func ExampleJWTConfigFromJSON() { client.Get("...") } +func ExampleSDKConfig() { + // The credentials will be obtained from the first account that + // has been authorized with `gcloud auth login`. + conf, err := google.NewSDKConfig("") + if err != nil { + log.Fatal(err) + } + // Initiate an http.Client. The following GET request will be + // authorized and authenticated on the behalf of the SDK user. + client := conf.Client(oauth2.NoContext) + client.Get("...") +} + func Example_serviceAccount() { // Your credentials should be obtained from the Google // Developer Console (https://console.developers.google.com). diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go index 09206c7a8a1..2077d9866fa 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go @@ -2,15 +2,16 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package google provides support for making -// OAuth2 authorized and authenticated HTTP requests -// to Google APIs. It supports Web server, client-side, -// service accounts, Google Compute Engine service accounts, -// and Google App Engine service accounts authorization -// and authentications flows: +// Package google provides support for making OAuth2 authorized and +// authenticated HTTP requests to Google APIs. +// It supports the Web server flow, client-side credentials, service accounts, +// Google Compute Engine service accounts, and Google App Engine service +// accounts. // // For more information, please read -// https://developers.google.com/accounts/docs/OAuth2. +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. package google import ( @@ -25,9 +26,6 @@ import ( "google.golang.org/cloud/compute/metadata" ) -// TODO(bradfitz,jbd): import "google.golang.org/cloud/compute/metadata" instead of -// the metaClient and metadata.google.internal stuff below. - // Endpoint is Google's OAuth 2.0 endpoint. var Endpoint = oauth2.Endpoint{ AuthURL: "https://accounts.google.com/o/oauth2/auth", @@ -37,6 +35,50 @@ var Endpoint = oauth2.Endpoint{ // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloadable from https://console.developers.google.com, +// under "APIs & Auth" > "Credentials". Download the Web application credentials in the +// JSON format and provide the contents of the file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + // JWTConfigFromJSON uses a Google Developers service account JSON key file to read // the credentials that authorize and authenticate the requests. // Create a service account on "Credentials" page under "APIs & Auth" for your diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go new file mode 100644 index 00000000000..4cc01884b2c --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google_test.go @@ -0,0 +1,67 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "strings" + "testing" +) + +var webJSONKey = []byte(` +{ + "web": { + "auth_uri": "https://google.com/o/oauth2/auth", + "client_secret": "3Oknc4jS_wA2r9i", + "token_uri": "https://google.com/o/oauth2/token", + "client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "redirect_uris": ["https://www.example.com/oauth2callback"], + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com", + "client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "javascript_origins": ["https://www.example.com"] + } +}`) + +var installedJSONKey = []byte(`{ + "installed": { + "client_id": "222-installed.apps.googleusercontent.com", + "redirect_uris": ["https://www.example.com/oauth2callback"] + } +}`) + +func TestConfigFromJSON(t *testing.T) { + conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2") + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } + if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want { + t.Errorf("ClientSecret = %q; want %q", got, want) + } + if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want { + t.Errorf("RedictURL = %q; want %q", got, want) + } + if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want { + t.Errorf("Scopes = %q; want %q", got, want) + } + if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want { + t.Errorf("AuthURL = %q; want %q", got, want) + } + if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want { + t.Errorf("TokenURL = %q; want %q", got, want) + } +} + +func TestConfigFromJSON_Installed(t *testing.T) { + conf, err := ConfigFromJSON(installedJSONKey) + if err != nil { + t.Error(err) + } + if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want { + t.Errorf("ClientID = %q; want %q", got, want) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 00000000000..01ba0ecb008 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,168 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := internal.ParseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go new file mode 100644 index 00000000000..79df8896443 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/sdk_test.go @@ -0,0 +1,46 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import "testing" + +func TestSDKConfig(t *testing.T) { + sdkConfigPath = func() (string, error) { + return "testdata/gcloud", nil + } + + tests := []struct { + account string + accessToken string + err bool + }{ + {"", "bar_access_token", false}, + {"foo@example.com", "foo_access_token", false}, + {"bar@example.com", "bar_access_token", false}, + {"baz@serviceaccount.example.com", "", true}, + } + for _, tt := range tests { + c, err := NewSDKConfig(tt.account) + if got, want := err != nil, tt.err; got != want { + if !tt.err { + t.Errorf("expected no error, got error: %v", tt.err, err) + } else { + t.Errorf("expected error, got none") + } + continue + } + if err != nil { + continue + } + tok := c.initialToken + if tok == nil { + t.Errorf("expected token %q, got: nil", tt.accessToken) + continue + } + if tok.AccessToken != tt.accessToken { + t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/source_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/source_appengine.go deleted file mode 100644 index d0eb3da0c6a..00000000000 --- a/Godeps/_workspace/src/golang.org/x/oauth2/google/source_appengine.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2014 The oauth2 Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package google - -import ( - "errors" - "sort" - "strings" - "sync" - "time" - - "golang.org/x/oauth2" -) - -var ( - aeTokensMu sync.Mutex // guards aeTokens and appEngineTokenSource.key - - // aeTokens helps the fetched tokens to be reused until their expiration. - aeTokens = make(map[string]*tokenLock) // key is '\0'-separated scopes -) - -var errInvalidContext = errors.New("oauth2: a valid appengine.Context is required") - -type tokenLock struct { - mu sync.Mutex // guards t; held while updating t - t *oauth2.Token -} - -type appEngineTokenSource struct { - ctx oauth2.Context - - // fetcherFunc makes the actual RPC to fetch a new access - // token with an expiry time. Provider of this function is - // responsible to assert that the given context is valid. - fetcherFunc func(ctx oauth2.Context, scope ...string) (accessToken string, expiry time.Time, err error) - - // scopes and key are guarded by the package-level mutex aeTokensMu - scopes []string - key string -} - -func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) { - aeTokensMu.Lock() - if ts.key == "" { - sort.Sort(sort.StringSlice(ts.scopes)) - ts.key = strings.Join(ts.scopes, string(0)) - } - tok, ok := aeTokens[ts.key] - if !ok { - tok = &tokenLock{} - aeTokens[ts.key] = tok - } - aeTokensMu.Unlock() - - tok.mu.Lock() - defer tok.mu.Unlock() - if tok.t.Valid() { - return tok.t, nil - } - access, exp, err := ts.fetcherFunc(ts.ctx, ts.scopes...) - if err != nil { - return nil, err - } - tok.t = &oauth2.Token{ - AccessToken: access, - Expiry: exp, - } - return tok.t, nil -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials new file mode 100644 index 00000000000..ff5eefbd0a8 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/credentials @@ -0,0 +1,122 @@ +{ + "data": [ + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "foo_access_token", + "client_id": "foo_client_id", + "client_secret": "foo_client_secret", + "id_token": { + "at_hash": "foo_at_hash", + "aud": "foo_aud", + "azp": "foo_azp", + "cid": "foo_cid", + "email": "foo@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "foo_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "foo_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "foo_access_token", + "expires_in": 3600, + "id_token": "foo_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "foo@example.com", + "clientId": "foo_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "OAuth2Credentials", + "_module": "oauth2client.client", + "access_token": "bar_access_token", + "client_id": "bar_client_id", + "client_secret": "bar_client_secret", + "id_token": { + "at_hash": "bar_at_hash", + "aud": "bar_aud", + "azp": "bar_azp", + "cid": "bar_cid", + "email": "bar@example.com", + "email_verified": true, + "exp": 1420573614, + "iat": 1420569714, + "id": "1337", + "iss": "accounts.google.com", + "sub": "1337", + "token_hash": "bar_token_hash", + "verified_email": true + }, + "invalid": false, + "refresh_token": "bar_refresh_token", + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "token_expiry": "2015-01-09T00:51:51Z", + "token_response": { + "access_token": "bar_access_token", + "expires_in": 3600, + "id_token": "bar_id_token", + "token_type": "Bearer" + }, + "token_uri": "https://accounts.google.com/o/oauth2/token", + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "bar@example.com", + "clientId": "bar_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + }, + { + "credential": { + "_class": "ServiceAccountCredentials", + "_kwargs": {}, + "_module": "oauth2client.client", + "_private_key_id": "00000000000000000000000000000000", + "_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n", + "_revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "_service_account_email": "baz@serviceaccount.example.com", + "_service_account_id": "baz.serviceaccount.example.com", + "_token_uri": "https://accounts.google.com/o/oauth2/token", + "_user_agent": "Cloud SDK Command Line Tool", + "access_token": null, + "assertion_type": null, + "client_id": null, + "client_secret": null, + "id_token": null, + "invalid": false, + "refresh_token": null, + "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", + "service_account_name": "baz@serviceaccount.example.com", + "token_expiry": null, + "token_response": null, + "user_agent": "Cloud SDK Command Line Tool" + }, + "key": { + "account": "baz@serviceaccount.example.com", + "clientId": "baz_client_id", + "scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting", + "type": "google-cloud-sdk" + } + } + ], + "file_version": 1 +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties new file mode 100644 index 00000000000..025de886cf7 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/testdata/gcloud/properties @@ -0,0 +1,2 @@ +[core] +account = bar@example.com \ No newline at end of file diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go index 47c8f14317c..dc8ebfc4f76 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go @@ -6,10 +6,14 @@ package internal import ( + "bufio" "crypto/rsa" "crypto/x509" "encoding/pem" "errors" + "fmt" + "io" + "strings" ) // ParseKey converts the binary contents of a private key file @@ -26,12 +30,47 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) { if err != nil { parsedKey, err = x509.ParsePKCS1PrivateKey(key) if err != nil { - return nil, err + return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err) } } parsed, ok := parsedKey.(*rsa.PrivateKey) if !ok { - return nil, errors.New("oauth2: private key is invalid") + return nil, errors.New("private key is invalid") } return parsed, nil } + +func ParseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": map[string]string{}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +func CondVal(v string) []string { + if v == "" { + return nil + } + return []string{v} +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go new file mode 100644 index 00000000000..014a351e006 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2_test.go @@ -0,0 +1,62 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseINI(t *testing.T) { + tests := []struct { + ini string + want map[string]map[string]string + }{ + { + `root = toor +[foo] +bar = hop +ini = nin +`, + map[string]map[string]string{ + "": map[string]string{"root": "toor"}, + "foo": map[string]string{"bar": "hop", "ini": "nin"}, + }, + }, + { + `[empty] +[section] +empty= +`, + map[string]map[string]string{ + "": map[string]string{}, + "empty": map[string]string{}, + "section": map[string]string{"empty": ""}, + }, + }, + { + `ignore +[invalid +=stuff +;comment=true +`, + map[string]map[string]string{ + "": map[string]string{}, + }, + }, + } + for _, tt := range tests { + result, err := ParseINI(strings.NewReader(tt.ini)) + if err != nil { + t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err) + continue + } + if !reflect.DeepEqual(result, tt.want) { + t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000000..ea6716c98c1 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,213 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/net/context" +) + +// Token represents the crendentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + if v := e.Expires; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + *e = expirationTime(i) + return nil +} + +var brokenAuthHeaderProviders = []string{ + "https://accounts.google.com/", + "https://www.googleapis.com/", + "https://github.com/", + "https://api.instagram.com/", + "https://www.douban.com/", + "https://api.dropbox.com/", + "https://api.soundcloud.com/", + "https://www.linkedin.com/", + "https://api.twitch.tv/", + "https://oauth.vk.com/", + "https://api.odnoklassniki.ru/", + "https://connect.stripe.com/", + "https://api.pushbullet.com/", + "https://oauth.sandbox.trainingpeaks.com/", + "https://oauth.trainingpeaks.com/", + "https://www.strava.com/oauth/", + "https://app.box.com/", + "https://test-sandbox.auth.corp.google.com", + "https://user.gini.net/", +} + +// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL +// implements the OAuth2 spec correctly +// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. +// In summary: +// - Reddit only accepts client secret in the Authorization header +// - Dropbox accepts either it in URL param or Auth header, but not both. +// - Google only accepts URL param (not spec compliant?), not Auth header +// - Stripe only accepts client secret in Auth header with Bearer method, not Basic +func providerAuthHeaderWorks(tokenURL string) bool { + for _, s := range brokenAuthHeaderProviders { + if strings.HasPrefix(tokenURL, s) { + // Some sites fail to implement the OAuth2 spec fully. + return false + } + } + + // Assume the provider implements the spec properly + // otherwise. We can add more exceptions as they're + // discovered. We will _not_ be adding configurable hooks + // to this package to let users select server bugs. + return true +} + +func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) { + hc, err := ContextClient(ctx) + if err != nil { + return nil, err + } + v.Set("client_id", ClientID) + bustedAuth := !providerAuthHeaderWorks(TokenURL) + if bustedAuth && ClientSecret != "" { + v.Set("client_secret", ClientSecret) + } + req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if !bustedAuth { + req.SetBasicAuth(ClientID, ClientSecret) + } + r, err := hc.Do(req) + if err != nil { + return nil, err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + if e == "" { + // TODO(jbd): Facebook's OAuth2 implementation is broken and + // returns expires_in field in expires. Remove the fallback to expires, + // when Facebook fixes their implementation. + e = vals.Get("expires") + } + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go new file mode 100644 index 00000000000..864f6fa07ef --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/token_test.go @@ -0,0 +1,28 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "fmt" + "testing" +) + +func Test_providerAuthHeaderWorks(t *testing.T) { + for _, p := range brokenAuthHeaderProviders { + if providerAuthHeaderWorks(p) { + t.Errorf("URL: %s not found in list", p) + } + p := fmt.Sprintf("%ssomesuffix", p) + if providerAuthHeaderWorks(p) { + t.Errorf("URL: %s not found in list", p) + } + } + p := "https://api.not-in-the-list-example.com/" + if !providerAuthHeaderWorks(p) { + t.Errorf("URL: %s found in list", p) + } + +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000000..521e7b49e75 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,67 @@ +// Copyright 2014 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +// ContextClientFunc is a func which tries to return an *http.Client +// given a Context value. If it returns an error, the search stops +// with that error. If it returns (nil, nil), the search continues +// down the list of registered funcs. +type ContextClientFunc func(context.Context) (*http.Client, error) + +var contextClientFuncs []ContextClientFunc + +func RegisterContextClientFunc(fn ContextClientFunc) { + contextClientFuncs = append(contextClientFuncs, fn) +} + +func ContextClient(ctx context.Context) (*http.Client, error) { + for _, fn := range contextClientFuncs { + c, err := fn(ctx) + if err != nil { + return nil, err + } + if c != nil { + return c, nil + } + } + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc, nil + } + return http.DefaultClient, nil +} + +func ContextTransport(ctx context.Context) http.RoundTripper { + hc, err := ContextClient(ctx) + // This is a rare error case (somebody using nil on App Engine). + if err != nil { + return ErrorTransport{err} + } + return hc.Transport +} + +// ErrorTransport returns the specified error on RoundTrip. +// This RoundTripper should be used in rare error cases where +// error handling can be postponed to response handling time. +type ErrorTransport struct{ Err error } + +func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) { + return nil, t.Err +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go index a8e21388fc6..205d23ed438 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go @@ -18,6 +18,7 @@ import ( "strings" "time" + "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/internal" "golang.org/x/oauth2/jws" @@ -57,7 +58,7 @@ type Config struct { // TokenSource returns a JWT TokenSource using the configuration // in c and the HTTP client from the provided context. -func (c *Config) TokenSource(ctx oauth2.Context) oauth2.TokenSource { +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) } @@ -66,14 +67,14 @@ func (c *Config) TokenSource(ctx oauth2.Context) oauth2.TokenSource { // obtained from c. // // The returned client and its Transport should not be modified. -func (c *Config) Client(ctx oauth2.Context) *http.Client { +func (c *Config) Client(ctx context.Context) *http.Client { return oauth2.NewClient(ctx, c.TokenSource(ctx)) } // jwtSource is a source that always does a signed JWT request for a token. // It should typically be wrapped with a reuseTokenSource. type jwtSource struct { - ctx oauth2.Context + ctx context.Context conf *Config } diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go new file mode 100644 index 00000000000..d93fded6ad5 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/linkedin/linkedin.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package linkedin provides constants for using OAuth2 to access LinkedIn. +package linkedin + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is LinkedIn's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.linkedin.com/uas/oauth2/authorization", + TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go index 53c755328ca..dfcf238d230 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go @@ -9,30 +9,19 @@ package oauth2 import ( "bytes" - "encoding/json" "errors" - "fmt" - "io" - "io/ioutil" - "mime" "net/http" "net/url" - "strconv" "strings" "sync" - "time" "golang.org/x/net/context" + "golang.org/x/oauth2/internal" ) -// Context can be an golang.org/x/net.Context, or an App Engine Context. -// If you don't care and aren't running on App Engine, you may use NoContext. -type Context interface{} - -// NoContext is the default context. If you're not running this code -// on App Engine or not using golang.org/x/net.Context to provide a custom -// HTTP client, you should use NoContext. -var NoContext Context = nil +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +var NoContext = context.TODO() // Config describes a typical 3-legged OAuth2 flow, with both the // client application information and the server's endpoint URLs. @@ -78,28 +67,34 @@ var ( // "access_type" field that gets sent in the URL returned by // AuthCodeURL. // - // Online (the default if neither is specified) is the default. - // If your application needs to refresh access tokens when the - // user is not present at the browser, then use offline. This - // will result in your application obtaining a refresh token - // the first time your application exchanges an authorization + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization // code for a user. - AccessTypeOnline AuthCodeOption = setParam{"access_type", "online"} - AccessTypeOffline AuthCodeOption = setParam{"access_type", "offline"} + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") // ApprovalForce forces the users to view the consent dialog // and confirm the permissions request at the URL returned // from AuthCodeURL, even if they've already done so. - ApprovalForce AuthCodeOption = setParam{"approval_prompt", "force"} + ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force") ) +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + type setParam struct{ k, v string } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } -// An AuthCodeOption is passed to Config.AuthCodeURL. -type AuthCodeOption interface { - setValue(url.Values) +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} } // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page @@ -118,9 +113,9 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v := url.Values{ "response_type": {"code"}, "client_id": {c.ClientID}, - "redirect_uri": condVal(c.RedirectURL), - "scope": condVal(strings.Join(c.Scopes, " ")), - "state": condVal(state), + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + "state": internal.CondVal(state), } for _, opt := range opts { opt.setValue(v) @@ -134,118 +129,106 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { return buf.String() } +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The HTTP client to use is derived from the context. +// If nil, http.DefaultClient is used. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + return retrieveToken(ctx, c, url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), + }) +} + // Exchange converts an authorization code into a token. // // It is used after a resource provider redirects the user back // to the Redirect URI (the URL obtained from AuthCodeURL). // -// The HTTP client to use is derived from the context. If nil, -// http.DefaultClient is used. See the Context type's documentation. +// The HTTP client to use is derived from the context. +// If a client is not provided via the context, http.DefaultClient is used. // // The code will be in the *http.Request.FormValue("code"). Before // calling Exchange, be sure to validate FormValue("state"). -func (c *Config) Exchange(ctx Context, code string) (*Token, error) { +func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) { return retrieveToken(ctx, c, url.Values{ "grant_type": {"authorization_code"}, "code": {code}, - "redirect_uri": condVal(c.RedirectURL), - "scope": condVal(strings.Join(c.Scopes, " ")), + "redirect_uri": internal.CondVal(c.RedirectURL), + "scope": internal.CondVal(strings.Join(c.Scopes, " ")), }) } -// contextClientFunc is a func which tries to return an *http.Client -// given a Context value. If it returns an error, the search stops -// with that error. If it returns (nil, nil), the search continues -// down the list of registered funcs. -type contextClientFunc func(Context) (*http.Client, error) - -var contextClientFuncs []contextClientFunc - -func registerContextClientFunc(fn contextClientFunc) { - contextClientFuncs = append(contextClientFuncs, fn) -} - -func contextClient(ctx Context) (*http.Client, error) { - for _, fn := range contextClientFuncs { - c, err := fn(ctx) - if err != nil { - return nil, err - } - if c != nil { - return c, nil - } - } - if xc, ok := ctx.(context.Context); ok { - if hc, ok := xc.Value(HTTPClient).(*http.Client); ok { - return hc, nil - } - } - return http.DefaultClient, nil -} - -func contextTransport(ctx Context) http.RoundTripper { - hc, err := contextClient(ctx) - if err != nil { - // This is a rare error case (somebody using nil on App Engine), - // so I'd rather not everybody do an error check on this Client - // method. They can get the error that they're doing it wrong - // later, at client.Get/PostForm time. - return errorTransport{err} - } - return hc.Transport -} - // Client returns an HTTP client using the provided token. // The token will auto-refresh as necessary. The underlying // HTTP transport will be obtained using the provided context. // The returned client and its Transport should not be modified. -func (c *Config) Client(ctx Context, t *Token) *http.Client { +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { return NewClient(ctx, c.TokenSource(ctx, t)) } // TokenSource returns a TokenSource that returns t until t expires, // automatically refreshing it as necessary using the provided context. -// See the the Context documentation. // // Most users will use Config.Client instead. -func (c *Config) TokenSource(ctx Context, t *Token) TokenSource { - nwn := &reuseTokenSource{t: t} - nwn.new = tokenRefresher{ - ctx: ctx, - conf: c, - oldToken: &nwn.t, +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, } - return nwn } // tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" // HTTP requests to renew a token using a RefreshToken. type tokenRefresher struct { - ctx Context // used to get HTTP requests - conf *Config - oldToken **Token // pointer to old *Token w/ RefreshToken + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string } -func (tf tokenRefresher) Token() (*Token, error) { - t := *tf.oldToken - if t == nil { - return nil, errors.New("oauth2: attempted use of nil Token") - } - if t.RefreshToken == "" { +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { return nil, errors.New("oauth2: token expired and refresh token is not set") } - return retrieveToken(tf.ctx, tf.conf, url.Values{ + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ "grant_type": {"refresh_token"}, - "refresh_token": {t.RefreshToken}, + "refresh_token": {tf.refreshToken}, }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err } // reuseTokenSource is a TokenSource that holds a single token in memory // and validates its expiry before each call to retrieve it with // Token. If it's expired, it will be auto-refreshed using the // new TokenSource. -// -// The first call to TokenRefresher must be SetToken. type reuseTokenSource struct { new TokenSource // called when t is expired. @@ -270,145 +253,25 @@ func (s *reuseTokenSource) Token() (*Token, error) { return t, nil } -func retrieveToken(ctx Context, c *Config, v url.Values) (*Token, error) { - hc, err := contextClient(ctx) - if err != nil { - return nil, err - } - v.Set("client_id", c.ClientID) - bustedAuth := !providerAuthHeaderWorks(c.Endpoint.TokenURL) - if bustedAuth && c.ClientSecret != "" { - v.Set("client_secret", c.ClientSecret) - } - req, err := http.NewRequest("POST", c.Endpoint.TokenURL, strings.NewReader(v.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - if !bustedAuth && c.ClientSecret != "" { - req.SetBasicAuth(c.ClientID, c.ClientSecret) - } - r, err := hc.Do(req) - if err != nil { - return nil, err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) - if err != nil { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) - } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body) - } - - var token *Token - content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) - switch content { - case "application/x-www-form-urlencoded", "text/plain": - vals, err := url.ParseQuery(string(body)) - if err != nil { - return nil, err - } - token = &Token{ - AccessToken: vals.Get("access_token"), - TokenType: vals.Get("token_type"), - RefreshToken: vals.Get("refresh_token"), - raw: vals, - } - e := vals.Get("expires_in") - if e == "" { - // TODO(jbd): Facebook's OAuth2 implementation is broken and - // returns expires_in field in expires. Remove the fallback to expires, - // when Facebook fixes their implementation. - e = vals.Get("expires") - } - expires, _ := strconv.Atoi(e) - if expires != 0 { - token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) - } - default: - var tj tokenJSON - if err = json.Unmarshal(body, &tj); err != nil { - return nil, err - } - token = &Token{ - AccessToken: tj.AccessToken, - TokenType: tj.TokenType, - RefreshToken: tj.RefreshToken, - Expiry: tj.expiry(), - raw: make(map[string]interface{}), - } - json.Unmarshal(body, &token.raw) // no error checks for optional fields - } - // Don't overwrite `RefreshToken` with an empty value - // if this was a token refreshing request. - if token.RefreshToken == "" { - token.RefreshToken = v.Get("refresh_token") - } - return token, nil +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} } -// tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. -type tokenJSON struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int32 `json:"expires_in"` - Expires int32 `json:"expires"` // broken Facebook spelling of expires_in +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token } -func (e *tokenJSON) expiry() (t time.Time) { - if v := e.ExpiresIn; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - if v := e.Expires; v != 0 { - return time.Now().Add(time.Duration(v) * time.Second) - } - return -} - -func condVal(v string) []string { - if v == "" { - return nil - } - return []string{v} -} - -// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL -// implements the OAuth2 spec correctly -// See https://code.google.com/p/goauth2/issues/detail?id=31 for background. -// In summary: -// - Reddit only accepts client secret in the Authorization header -// - Dropbox accepts either it in URL param or Auth header, but not both. -// - Google only accepts URL param (not spec compliant?), not Auth header -func providerAuthHeaderWorks(tokenURL string) bool { - if strings.HasPrefix(tokenURL, "https://accounts.google.com/") || - strings.HasPrefix(tokenURL, "https://github.com/") || - strings.HasPrefix(tokenURL, "https://api.instagram.com/") || - strings.HasPrefix(tokenURL, "https://www.douban.com/") || - strings.HasPrefix(tokenURL, "https://api.dropbox.com/") || - strings.HasPrefix(tokenURL, "https://api.soundcloud.com/") || - strings.HasPrefix(tokenURL, "https://www.linkedin.com/") { - // Some sites fail to implement the OAuth2 spec fully. - return false - } - - // Assume the provider implements the spec properly - // otherwise. We can add more exceptions as they're - // discovered. We will _not_ be adding configurable hooks - // to this package to let users select server bugs. - return true +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil } // HTTPClient is the context key to use with golang.org/x/net/context's // WithValue function to associate an *http.Client value with a context. -var HTTPClient contextKey - -// contextKey is just an empty struct. It exists so HTTPClient can be -// an immutable public variable with a unique type. It's immutable -// because nobody else can create a contextKey, being unexported. -type contextKey struct{} +var HTTPClient internal.ContextKey // NewClient creates an *http.Client from a Context and TokenSource. // The returned client is not valid beyond the lifetime of the context. @@ -416,17 +279,17 @@ type contextKey struct{} // As a special case, if src is nil, a non-OAuth2 client is returned // using the provided context. This exists to support related OAuth2 // packages. -func NewClient(ctx Context, src TokenSource) *http.Client { +func NewClient(ctx context.Context, src TokenSource) *http.Client { if src == nil { - c, err := contextClient(ctx) + c, err := internal.ContextClient(ctx) if err != nil { - return &http.Client{Transport: errorTransport{err}} + return &http.Client{Transport: internal.ErrorTransport{err}} } return c } return &http.Client{ Transport: &Transport{ - Base: contextTransport(ctx), + Base: internal.ContextTransport(ctx), Source: ReuseTokenSource(nil, src), }, } diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go index 804098ac9df..2f7d731c1b5 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go @@ -5,11 +5,16 @@ package oauth2 import ( + "encoding/json" "errors" + "fmt" "io/ioutil" "net/http" "net/http/httptest" + "reflect" + "strconv" "testing" + "time" "golang.org/x/net/context" ) @@ -56,6 +61,15 @@ func TestAuthCodeURL(t *testing.T) { } } +func TestAuthCodeURL_CustomParam(t *testing.T) { + conf := newConf("server") + param := SetAuthURLParam("foo", "bar") + url := conf.AuthCodeURL("baz", param) + if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" { + t.Errorf("Auth code URL doesn't match the expected, found: %v", url) + } +} + func TestAuthCodeURL_Optional(t *testing.T) { conf := &Config{ ClientID: "CLIENT_ID", @@ -158,6 +172,60 @@ func TestExchangeRequest_JSONResponse(t *testing.T) { } } +const day = 24 * time.Hour + +func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) { + seconds := int32(day.Seconds()) + jsonNumberType := reflect.TypeOf(json.Number("0")) + for _, c := range []struct { + expires string + expect error + }{ + {fmt.Sprintf(`"expires_in": %d`, seconds), nil}, + {fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case + {fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case + {`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type + {`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type + {`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value + } { + testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect) + } +} + +func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp))) + })) + defer ts.Close() + conf := newConf(ts.URL) + t1 := time.Now().Add(day) + tok, err := conf.Exchange(NoContext, "exchange-code") + t2 := time.Now().Add(day) + // Do a fmt.Sprint comparison so either side can be + // nil. fmt.Sprint just stringifies them to "", and no + // non-nil expected error ever stringifies as "", so this + // isn't terribly disgusting. We do this because Go 1.4 and + // Go 1.5 return a different deep value for + // json.UnmarshalTypeError. In Go 1.5, the + // json.UnmarshalTypeError contains a new field with a new + // non-zero value. Rather than ignore it here with reflect or + // add new files and +build tags, just look at the strings. + if fmt.Sprint(err) != fmt.Sprint(expect) { + t.Errorf("Error = %v; want %v", err, expect) + } + if err != nil { + return + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expiry := tok.Expiry + if expiry.Before(t1) || expiry.After(t2) { + t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2) + } +} + func TestExchangeRequest_BadResponse(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") @@ -210,6 +278,53 @@ func TestExchangeRequest_NonBasicAuth(t *testing.T) { conf.Exchange(ctx, "code") } +func TestPasswordCredentialsTokenRequest(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer r.Body.Close() + expected := "/token" + if r.URL.String() != expected { + t.Errorf("URL = %q; want %q", r.URL, expected) + } + headerAuth := r.Header.Get("Authorization") + expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" + if headerAuth != expected { + t.Errorf("Authorization header = %q; want %q", headerAuth, expected) + } + headerContentType := r.Header.Get("Content-Type") + expected = "application/x-www-form-urlencoded" + if headerContentType != expected { + t.Errorf("Content-Type header = %q; want %q", headerContentType, expected) + } + body, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Failed reading request body: %s.", err) + } + expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1" + if string(body) != expected { + t.Errorf("res.Body = %q; want %q", string(body), expected) + } + w.Header().Set("Content-Type", "application/x-www-form-urlencoded") + w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer")) + })) + defer ts.Close() + conf := newConf(ts.URL) + tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1") + if err != nil { + t.Error(err) + } + if !tok.Valid() { + t.Fatalf("Token invalid. Got: %#v", tok) + } + expected := "90d64460d14870c08c81352a05dedd3465940a7c" + if tok.AccessToken != expected { + t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected) + } + expected = "bearer" + if tok.TokenType != expected { + t.Errorf("TokenType = %q; want %q", tok.TokenType, expected) + } +} + func TestTokenRefreshRequest(t *testing.T) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.String() == "/somethingelse" { @@ -258,3 +373,50 @@ func TestFetchWithNoRefreshToken(t *testing.T) { t.Errorf("Fetch should return an error if no refresh token is set") } } + +func TestRefreshToken_RefreshTokenReplacement(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`)) + return + })) + defer ts.Close() + conf := newConf(ts.URL) + tkr := tokenRefresher{ + conf: conf, + ctx: NoContext, + refreshToken: "OLD REFRESH TOKEN", + } + tk, err := tkr.Token() + if err != nil { + t.Errorf("Unexpected refreshToken error returned: %v", err) + return + } + if tk.RefreshToken != tkr.refreshToken { + t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken) + } +} + +func TestConfigClientWithToken(t *testing.T) { + tok := &Token{ + AccessToken: "abc123", + } + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want { + t.Errorf("Authorization header = %q; want %q", got, want) + } + return + })) + defer ts.Close() + conf := newConf(ts.URL) + + c := conf.Client(NoContext, tok) + req, err := http.NewRequest("GET", ts.URL, nil) + if err != nil { + t.Error(err) + } + _, err = c.Do(req) + if err != nil { + t.Error(err) + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go new file mode 100644 index 00000000000..f0b66f97def --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki. +package odnoklassniki + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is Odnoklassniki's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.odnoklassniki.ru/oauth/authorize", + TokenURL: "https://api.odnoklassniki.ru/oauth/token.do", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go new file mode 100644 index 00000000000..a99366b6e24 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/paypal/paypal.go @@ -0,0 +1,22 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package paypal provides constants for using OAuth2 to access PayPal. +package paypal + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice", +} + +// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment. +var SandboxEndpoint = oauth2.Endpoint{ + AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize", + TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice", +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/token.go index e04a2dd9cfc..ebbdddbdceb 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/token.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/token.go @@ -7,9 +7,18 @@ package oauth2 import ( "net/http" "net/url" + "strings" "time" + + "golang.org/x/net/context" + "golang.org/x/oauth2/internal" ) +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + // Token represents the crendentials used to authorize // the requests to access protected resources on the OAuth 2.0 // provider's backend. @@ -45,6 +54,15 @@ type Token struct { // Type returns t.TokenType if non-empty, else "Bearer". func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } if t.TokenType != "" { return t.TokenType } @@ -90,10 +108,36 @@ func (t *Token) expired() bool { if t.Expiry.IsZero() { return false } - return t.Expiry.Before(time.Now()) + return t.Expiry.Add(-expiryDelta).Before(time.Now()) } // Valid reports whether t is non-nil, has an AccessToken, and is not expired. func (t *Token) Valid() bool { return t != nil && t.AccessToken != "" && !t.expired() } + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v) + if err != nil { + return nil, err + } + return tokenFromInternal(tk), nil +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go index 74d6366568a..739eeb2a205 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go @@ -4,7 +4,10 @@ package oauth2 -import "testing" +import ( + "testing" + "time" +) func TestTokenExtra(t *testing.T) { type testCase struct { @@ -28,3 +31,20 @@ func TestTokenExtra(t *testing.T) { } } } + +func TestTokenExpiry(t *testing.T) { + now := time.Now() + cases := []struct { + name string + tok *Token + want bool + }{ + {name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false}, + {name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true}, + } + for _, tc := range cases { + if got, want := tc.tok.expired(), tc.want; got != want { + t.Errorf("expired (%q) = %v; want %v", tc.name, got, want) + } + } +} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go index 10339a0be7d..90db088332b 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go @@ -130,9 +130,3 @@ func (r *onEOFReader) runFunc() { r.fn = nil } } - -type errorTransport struct{ err error } - -func (t errorTransport) RoundTrip(*http.Request) (*http.Response, error) { - return nil, t.err -} diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go index efb8232ac4c..35cb25ed56e 100644 --- a/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go +++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go @@ -32,6 +32,39 @@ func TestTransportTokenSource(t *testing.T) { client.Get(server.URL) } +// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113 +func TestTransportTokenSourceTypes(t *testing.T) { + const val = "abc" + tests := []struct { + key string + val string + want string + }{ + {key: "bearer", val: val, want: "Bearer abc"}, + {key: "mac", val: val, want: "MAC abc"}, + {key: "basic", val: val, want: "Basic abc"}, + } + for _, tc := range tests { + ts := &tokenSource{ + token: &Token{ + AccessToken: tc.val, + TokenType: tc.key, + }, + } + tr := &Transport{ + Source: ts, + } + server := newMockServer(func(w http.ResponseWriter, r *http.Request) { + if got, want := r.Header.Get("Authorization"), tc.want; got != want { + t.Errorf("Authorization header (%q) = %q; want %q", val, got, want) + } + }) + defer server.Close() + client := http.Client{Transport: tr} + client.Get(server.URL) + } +} + func TestTokenValidNoAccessToken(t *testing.T) { token := &Token{} if token.Valid() { diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go new file mode 100644 index 00000000000..00e929357a2 --- /dev/null +++ b/Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go @@ -0,0 +1,16 @@ +// Copyright 2015 The oauth2 Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vk provides constants for using OAuth2 to access VK.com. +package vk + +import ( + "golang.org/x/oauth2" +) + +// Endpoint is VK's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://oauth.vk.com/authorize", + TokenURL: "https://oauth.vk.com/access_token", +} diff --git a/build/common.sh b/build/common.sh index 07f06325033..022e95227d0 100644 --- a/build/common.sh +++ b/build/common.sh @@ -631,6 +631,9 @@ function kube::release::create_docker_images_for_server() { echo $md5_sum > ${1}/${binary_name}.docker_tag rm -rf ${docker_build_path} + + kube::log::status "Deleting docker image ${docker_image_tag}" + "${DOCKER[@]}" rmi ${docker_image_tag} ) & done diff --git a/build/make-release-notes.sh b/build/make-release-notes.sh index 0e2730639b1..d6359674aa1 100755 --- a/build/make-release-notes.sh +++ b/build/make-release-notes.sh @@ -24,19 +24,18 @@ function pop_dir { } KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. +source "${KUBE_ROOT}/hack/lib/init.sh" if [[ -z "${1:-}" ]]; then - echo "Usage: ${0} " + echo "Usage: ${0} [opts]" exit 1 fi pushd . > /dev/null trap 'pop_dir' INT TERM EXIT -cd ${KUBE_ROOT}/contrib/release-notes -# TODO: vendor these dependencies, but using godep again will be annoying... -GOPATH=$PWD go get github.com/google/go-github/github -GOPATH=$PWD go get github.com/google/go-querystring/query -GOPATH=$PWD go build release-notes.go -./release-notes --last-release-pr=${1} +kube::golang::build_binaries contrib/release-notes +kube::golang::place_bins +releasenotes=$(kube::util::find-binary "release-notes") +"${releasenotes}" --last-release-pr=${1} ${@} diff --git a/cluster/addons/dns/kube2sky/kube2sky.go b/cluster/addons/dns/kube2sky/kube2sky.go index f574174dd00..aea59b6d5fa 100644 --- a/cluster/addons/dns/kube2sky/kube2sky.go +++ b/cluster/addons/dns/kube2sky/kube2sky.go @@ -213,7 +213,7 @@ func (ks *kube2sky) handleEndpointAdd(obj interface{}) { func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service) error { for i := range service.Spec.Ports { - b, err := json.Marshal(getSkyMsg(service.Spec.PortalIP, service.Spec.Ports[i].Port)) + b, err := json.Marshal(getSkyMsg(service.Spec.ClusterIP, service.Spec.Ports[i].Port)) if err != nil { return err } @@ -229,7 +229,7 @@ func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service) error { if len(service.Spec.Ports) == 0 { glog.Fatalf("unexpected service with no ports: %v", service) } - // if PortalIP is not set, a DNS entry should not be created + // if ClusterIP is not set, a DNS entry should not be created if !kapi.IsServiceIPSet(service) { return ks.newHeadlessService(subdomain, service) } diff --git a/cluster/addons/dns/kube2sky/kube2sky_test.go b/cluster/addons/dns/kube2sky/kube2sky_test.go index 324a634f1a8..5c6d8c68302 100644 --- a/cluster/addons/dns/kube2sky/kube2sky_test.go +++ b/cluster/addons/dns/kube2sky/kube2sky_test.go @@ -94,7 +94,7 @@ type hostPort struct { func getHostPort(service *kapi.Service) *hostPort { return &hostPort{ - Host: service.Spec.PortalIP, + Host: service.Spec.ClusterIP, Port: service.Spec.Ports[0].Port, } } @@ -134,7 +134,7 @@ func TestHeadlessService(t *testing.T) { Namespace: testNamespace, }, Spec: kapi.ServiceSpec{ - PortalIP: "None", + ClusterIP: "None", Ports: []kapi.ServicePort{ {Port: 80}, }, @@ -187,7 +187,7 @@ func TestHeadlessServiceEndpointsUpdate(t *testing.T) { Namespace: testNamespace, }, Spec: kapi.ServiceSpec{ - PortalIP: "None", + ClusterIP: "None", Ports: []kapi.ServicePort{ {Port: 80}, }, @@ -244,7 +244,7 @@ func TestHeadlessServiceWithDelayedEndpointsAddition(t *testing.T) { Namespace: testNamespace, }, Spec: kapi.ServiceSpec{ - PortalIP: "None", + ClusterIP: "None", Ports: []kapi.ServicePort{ {Port: 80}, }, @@ -308,7 +308,7 @@ func TestAddSinglePortService(t *testing.T) { Port: 80, }, }, - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", }, } k2s.newService(&service) @@ -334,12 +334,12 @@ func TestUpdateSinglePortService(t *testing.T) { Port: 80, }, }, - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", }, } k2s.newService(&service) assert.Len(t, ec.writes, 2) - service.Spec.PortalIP = "0.0.0.0" + service.Spec.ClusterIP = "0.0.0.0" k2s.newService(&service) expectedValue := getHostPort(&service) assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue) @@ -363,7 +363,7 @@ func TestDeleteSinglePortService(t *testing.T) { Port: 80, }, }, - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", }, } // Add the service diff --git a/cluster/aws/config-default.sh b/cluster/aws/config-default.sh index 4910ce92599..48051c58ea3 100644 --- a/cluster/aws/config-default.sh +++ b/cluster/aws/config-default.sh @@ -41,7 +41,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_SCOPES="" POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # If set to Elastic IP, master instance will be associated with this IP. # If set to auto, a new Elastic IP will be aquired @@ -75,7 +75,7 @@ DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! diff --git a/cluster/aws/config-test.sh b/cluster/aws/config-test.sh index 584e0b29236..80ee7f04da9 100755 --- a/cluster/aws/config-test.sh +++ b/cluster/aws/config-test.sh @@ -37,7 +37,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion" MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_SCOPES="" POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" # If set to Elastic IP, master instance will be associated with this IP. # If set to auto, a new Elastic IP will be aquired @@ -72,7 +72,7 @@ DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: Enable/disable public IP assignment for minions. # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! diff --git a/cluster/aws/templates/create-dynamic-salt-files.sh b/cluster/aws/templates/create-dynamic-salt-files.sh index 2e767e329ac..e03d23665c2 100644 --- a/cluster/aws/templates/create-dynamic-salt-files.sh +++ b/cluster/aws/templates/create-dynamic-salt-files.sh @@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' -portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' +service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index d0e60d2e4c0..96a00c24f14 100644 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -507,7 +507,7 @@ function kube-up { echo "readonly ZONE='${ZONE}'" echo "readonly KUBE_USER='${KUBE_USER}'" echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'" - echo "readonly PORTAL_NET='${PORTAL_NET}'" + echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'" echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'" diff --git a/cluster/azure/config-default.sh b/cluster/azure/config-default.sh index 8b8bbb6dfc9..2039ae84829 100644 --- a/cluster/azure/config-default.sh +++ b/cluster/azure/config-default.sh @@ -35,7 +35,7 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}})) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_SCOPES="" -PORTAL_NET="10.250.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.250.0.0/16" # formerly PORTAL_NET # Optional: Install node logging ENABLE_NODE_LOGGING=false @@ -49,4 +49,4 @@ ELASTICSEARCH_LOGGING_REPLICAS=1 ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-true}" # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota diff --git a/cluster/azure/templates/create-dynamic-salt-files.sh b/cluster/azure/templates/create-dynamic-salt-files.sh index f8e5a42726e..d946fa1957b 100644 --- a/cluster/azure/templates/create-dynamic-salt-files.sh +++ b/cluster/azure/templates/create-dynamic-salt-files.sh @@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: $NODE_INSTANCE_PREFIX -portal_net: $PORTAL_NET +service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' EOF diff --git a/cluster/azure/util.sh b/cluster/azure/util.sh index 399c13ca59c..df1a1083fa2 100644 --- a/cluster/azure/util.sh +++ b/cluster/azure/util.sh @@ -322,7 +322,7 @@ function kube-up { echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" echo "readonly MASTER_HTPASSWD='${htpasswd}'" - echo "readonly PORTAL_NET='${PORTAL_NET}'" + echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh" diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 0b2d68e1a40..949aa3f5fb7 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -44,7 +44,7 @@ CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}" MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write") # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET ALLOCATE_NODE_CIDRS=true # When set to true, Docker Cache is enabled by default as part of the cluster bring up. @@ -77,4 +77,4 @@ DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 405f05eee26..e74203b6dff 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -44,7 +44,7 @@ MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring") # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET # When set to true, Docker Cache is enabled by default as part of the cluster bring up. ENABLE_DOCKER_REGISTRY_CACHE=true @@ -75,4 +75,4 @@ DNS_SERVER_IP="10.0.0.10" DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 -ADMISSION_CONTROL=NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 800d8e92bfa..647ef041c61 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -250,7 +250,7 @@ instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")' -portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' +service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' @@ -437,15 +437,22 @@ function download-release() { # store it when we download, and then when it's different infer that # a push occurred (otherwise it's a simple reboot). - echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)" - download-or-bust "$SERVER_BINARY_TAR_URL" + # In case of failure of unpacking Salt tree (the last command in the + # "until" block) retry downloading both release and Salt tars. + until + echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)" + download-or-bust "$SERVER_BINARY_TAR_URL" - echo "Downloading Salt tar ($SALT_TAR_URL)" - download-or-bust "$SALT_TAR_URL" + echo "Downloading Salt tar ($SALT_TAR_URL)" + download-or-bust "$SALT_TAR_URL" - echo "Unpacking Salt tree" - rm -rf kubernetes - tar xzf "${SALT_TAR_URL##*/}" + echo "Unpacking Salt tree" + rm -rf kubernetes + tar xzf "${SALT_TAR_URL##*/}" + do + sleep 15 + echo "Couldn't unpack Salt tree. Retrying..." + done echo "Running release install script" sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}" diff --git a/cluster/gce/coreos/helper.sh b/cluster/gce/coreos/helper.sh index 526d8e55f17..e8f5c6b24df 100644 --- a/cluster/gce/coreos/helper.sh +++ b/cluster/gce/coreos/helper.sh @@ -31,7 +31,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX}) CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) -PORTAL_NET: $(yaml-quote ${PORTAL_NET}) +SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE}) ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false}) ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false}) @@ -64,7 +64,7 @@ ENV_TIMESTAMP=$(yaml-quote $(date -u +%Y-%m-%dT%T%z)) INSTANCE_PREFIX=$(yaml-quote ${INSTANCE_PREFIX}) NODE_INSTANCE_PREFIX=$(yaml-quote ${NODE_INSTANCE_PREFIX}) SERVER_BINARY_TAR_URL=$(yaml-quote ${SERVER_BINARY_TAR_URL}) -PORTAL_NET=$(yaml-quote ${PORTAL_NET}) +SERVICE_CLUSTER_IP_RANGE=$(yaml-quote ${SERVICE_CLUSTER_IP_RANGE}) ENABLE_CLUSTER_MONITORING=$(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_NODE_MONITORING=$(yaml-quote ${ENABLE_NODE_MONITORING:-false}) ENABLE_CLUSTER_LOGGING=$(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false}) @@ -121,12 +121,11 @@ function create-master-instance { --image "${MASTER_IMAGE}" \ --tags "${MASTER_TAG}" \ --network "${NETWORK}" \ - --scopes "storage-ro" "compute-rw" \ + --scopes "storage-ro,compute-rw" \ --can-ip-forward \ --metadata-from-file \ - "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \ - "kube-env=${KUBE_TEMP}/master-kube-env.yaml" \ - --disk name="${MASTER_NAME}-pd" device-name=master-pd mode=rw boot=no auto-delete=no + "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh,kube-env=${KUBE_TEMP}/master-kube-env.yaml" \ + --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" } # TODO(dawnchen): Check $CONTAINER_RUNTIME to decide which diff --git a/cluster/gce/debian/helper.sh b/cluster/gce/debian/helper.sh index 6ae48475d0c..204318dfd72 100644 --- a/cluster/gce/debian/helper.sh +++ b/cluster/gce/debian/helper.sh @@ -29,7 +29,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX}) CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) -PORTAL_NET: $(yaml-quote ${PORTAL_NET}) +SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE}) ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false}) ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false}) @@ -99,12 +99,11 @@ function create-master-instance { --image "${MASTER_IMAGE}" \ --tags "${MASTER_TAG}" \ --network "${NETWORK}" \ - --scopes "storage-ro" "compute-rw" \ + --scopes "storage-ro,compute-rw" \ --can-ip-forward \ --metadata-from-file \ - "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \ - "kube-env=${KUBE_TEMP}/master-kube-env.yaml" \ - --disk name="${MASTER_NAME}-pd" device-name=master-pd mode=rw boot=no auto-delete=no + "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh,kube-env=${KUBE_TEMP}/master-kube-env.yaml" \ + --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" } # TODO(mbforbes): Make $1 required. diff --git a/cluster/gce/upgrade.sh b/cluster/gce/upgrade.sh index c8f9b1b72c5..e75ffd6c804 100755 --- a/cluster/gce/upgrade.sh +++ b/cluster/gce/upgrade.sh @@ -159,7 +159,7 @@ function upgrade-nodes() { # TODO(mbforbes): Refactor setting scope flags. local -a scope_flags=() if (( "${#MINION_SCOPES[@]}" > 0 )); then - scope_flags=("--scopes" "${MINION_SCOPES[@]}") + scope_flags=("--scopes" "$(join_csv ${MINION_SCOPES[@]})") else scope_flags=("--no-scopes") fi diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index cc9c243744a..da69733eb15 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -36,6 +36,10 @@ ALLOCATE_NODE_CIDRS=true KUBE_PROMPT_FOR_UPDATE=y KUBE_SKIP_UPDATE=${KUBE_SKIP_UPDATE-"n"} +function join_csv { + local IFS=','; echo "$*"; +} + # Verify prereqs function verify-prereqs { local cmd @@ -145,10 +149,10 @@ function already-staged() { local -r file=$1 local -r newsum=$2 - [[ -e "${file}.sha1" ]] || return 1 + [[ -e "${file}.uploaded.sha1" ]] || return 1 local oldsum - oldsum=$(cat "${file}.sha1") + oldsum=$(cat "${file}.uploaded.sha1") [[ "${oldsum}" == "${newsum}" ]] } @@ -166,6 +170,7 @@ function copy-if-not-staged() { echo "${server_hash}" > "${tar}.sha1" gsutil -m -q -h "Cache-Control:private, max-age=0" cp "${tar}" "${tar}.sha1" "${staging_path}" gsutil -m acl ch -g all:R "${gs_url}" "${gs_url}.sha1" >/dev/null 2>&1 + echo "${server_hash}" > "${tar}.uploaded.sha1" fi } @@ -363,7 +368,7 @@ function create-firewall-rule { --network "${NETWORK}" \ --source-ranges "$2" \ --target-tags "$3" \ - --allow tcp udp icmp esp ah sctp; then + --allow tcp,udp,icmp,esp,ah,sctp; then if (( attempt > 5 )); then echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}" exit 2 @@ -459,7 +464,7 @@ function add-instance-metadata-from-file { if ! gcloud compute instances add-metadata "${instance}" \ --project "${PROJECT}" \ --zone "${ZONE}" \ - --metadata-from-file $(IFS=, ; echo "${kvs[*]}"); then + --metadata-from-file "$(join_csv ${kvs[@]})"; then if (( attempt > 5 )); then echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" exit 2 @@ -575,7 +580,7 @@ function kube-up { --project "${PROJECT}" \ --network "${NETWORK}" \ --source-ranges "10.0.0.0/8" \ - --allow "tcp:1-65535" "udp:1-65535" "icmp" & + --allow "tcp:1-65535,udp:1-65535,icmp" & fi if ! gcloud compute firewall-rules describe --project "${PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then @@ -637,7 +642,7 @@ function kube-up { # TODO(mbforbes): Refactor setting scope flags. local -a scope_flags=() if (( "${#MINION_SCOPES[@]}" > 0 )); then - scope_flags=("--scopes" "${MINION_SCOPES[@]}") + scope_flags=("--scopes" "$(join_csv ${MINION_SCOPES[@]})") else scope_flags=("--no-scopes") fi @@ -665,8 +670,18 @@ function kube-up { echo " up." echo + # curl in mavericks is borked. + secure="" + if which sw_vers > /dev/null; then + if [[ $(sw_vers | grep ProductVersion | awk '{print $2}') = "10.9."* ]]; then + secure="--insecure" + fi + fi + + until curl --cacert "${CERT_DIR}/pki/ca.crt" \ -H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \ + ${secure} \ --max-time 5 --fail --output /dev/null --silent \ "https://${KUBE_MASTER_IP}/api/v1beta3/pods"; do printf "." diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index 91dd8cf806e..b79e949ee3c 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -167,7 +167,7 @@ function test-setup() { # collisions here? "${GCLOUD}" compute firewall-rules create \ "${MINION_TAG}-${USER}-http-alt" \ - --allow tcp:80 tcp:8080 \ + --allow tcp:80,tcp:8080 \ --project "${PROJECT}" \ --target-tags "${MINION_TAG}" \ --network="${NETWORK}" diff --git a/cluster/images/hyperkube/master-multi.json b/cluster/images/hyperkube/master-multi.json index 49a34de4560..4b980648387 100644 --- a/cluster/images/hyperkube/master-multi.json +++ b/cluster/images/hyperkube/master-multi.json @@ -23,7 +23,7 @@ "command": [ "/hyperkube", "apiserver", - "--portal_net=10.0.0.1/24", + "--service-cluster-ip-range=10.0.0.1/24", "--address=0.0.0.0", "--etcd_servers=http://127.0.0.1:4001", "--cluster_name=kubernetes", diff --git a/cluster/images/hyperkube/master.json b/cluster/images/hyperkube/master.json index ff94d0f1e1e..13975101b6c 100644 --- a/cluster/images/hyperkube/master.json +++ b/cluster/images/hyperkube/master.json @@ -23,7 +23,7 @@ "command": [ "/hyperkube", "apiserver", - "--portal_net=10.0.0.1/24", + "--service-cluster-ip-range=10.0.0.1/24", "--address=127.0.0.1", "--etcd_servers=http://127.0.0.1:4001", "--cluster_name=kubernetes", diff --git a/cluster/juju/charms/trusty/kubernetes-master/files/apiserver.upstart.tmpl b/cluster/juju/charms/trusty/kubernetes-master/files/apiserver.upstart.tmpl index b45fd6dd839..3f0109eb83a 100644 --- a/cluster/juju/charms/trusty/kubernetes-master/files/apiserver.upstart.tmpl +++ b/cluster/juju/charms/trusty/kubernetes-master/files/apiserver.upstart.tmpl @@ -11,7 +11,7 @@ exec /usr/local/bin/apiserver \ --address=%(api_bind_address)s \ --etcd_servers=%(etcd_servers)s \ --logtostderr=true \ - --portal_net=10.244.240.0/20 + --service-cluster-ip-range=10.244.240.0/20 diff --git a/cluster/kubectl.sh b/cluster/kubectl.sh index df35ab1620c..e62be51c42b 100755 --- a/cluster/kubectl.sh +++ b/cluster/kubectl.sh @@ -59,7 +59,7 @@ case "$(uname -m)" in host_arch=arm ;; i?86*) - host_arch=x86 + host_arch=386 ;; *) echo "Unsupported host arch. Must be x86_64, 386 or arm." >&2 diff --git a/cluster/libvirt-coreos/config-default.sh b/cluster/libvirt-coreos/config-default.sh index 7a87873729a..7cdcca8769a 100644 --- a/cluster/libvirt-coreos/config-default.sh +++ b/cluster/libvirt-coreos/config-default.sh @@ -46,7 +46,7 @@ for ((i=0; i < NUM_MINIONS; i++)) do done MINION_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET -PORTAL_NET=10.11.0.0/16 +SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET # Optional: Install node monitoring. ENABLE_NODE_MONITORING=true diff --git a/cluster/libvirt-coreos/skydns-rc.yaml b/cluster/libvirt-coreos/skydns-rc.yaml index f2e033af540..7af68d93b45 100644 --- a/cluster/libvirt-coreos/skydns-rc.yaml +++ b/cluster/libvirt-coreos/skydns-rc.yaml @@ -1,44 +1,37 @@ +apiVersion: v1beta3 kind: ReplicationController -apiVersion: v1beta1 -id: skydns -namespace: default -labels: - k8s-app: skydns -desiredState: - replicas: ${DNS_REPLICAS} - replicaSelector: +metadata: + labels: k8s-app: skydns - podTemplate: - labels: - k8s-app: skydns - desiredState: - manifest: - version: v1beta2 - id: skydns - dnsPolicy: "Default" # Don't use cluster DNS. - containers: - - name: etcd - image: quay.io/coreos/etcd:latest - command: [ - \"/etcd\", - \"-bind-addr=127.0.0.1\", - \"-peer-bind-addr=127.0.0.1\", - ] - - name: kube2sky - image: kubernetes/kube2sky:1.0 - command: [ - # entrypoint = \"/kube2sky\", - \"-domain=${DNS_DOMAIN}\", - ] - - name: skydns - image: kubernetes/skydns:2014-12-23-001 - command: [ - # entrypoint = \"/skydns\", - \"-machines=http://localhost:4001\", - \"-addr=0.0.0.0:53\", - \"-domain=${DNS_DOMAIN}.\", - ] - ports: - - name: dns - containerPort: 53 - protocol: UDP + name: skydns + namespace: default +spec: + replicas: ${DNS_REPLICAS} + selector: + k8s-app: skydns + template: + metadata: + labels: + k8s-app: skydns + spec: + containers: + - args: + - \"/etcd\" + - \"-bind-addr=127.0.0.1\" + - \"-peer-bind-addr=127.0.0.1\" + image: quay.io/coreos/etcd:latest + name: etcd + - args: + - \"-domain=${DNS_DOMAIN}\" + image: kubernetes/kube2sky:1.0 + name: kube2sky + - args: + - \"-machines=http://localhost:4001\" + - \"-addr=0.0.0.0:53\" + - \"-domain=${DNS_DOMAIN}.\" + image: kubernetes/skydns:2014-12-23-001 + name: skydns + ports: + - containerPort: 53 + name: dns + protocol: UDP diff --git a/cluster/libvirt-coreos/skydns-svc.yaml b/cluster/libvirt-coreos/skydns-svc.yaml index d765f3f3fa9..65e6d43488a 100644 --- a/cluster/libvirt-coreos/skydns-svc.yaml +++ b/cluster/libvirt-coreos/skydns-svc.yaml @@ -1,12 +1,15 @@ +apiVersion: v1beta3 kind: Service -apiVersion: v1beta1 -id: skydns -namespace: default -protocol: UDP -port: 53 -portalIP: ${DNS_SERVER_IP} -containerPort: 53 -labels: - k8s-app: skydns -selector: - k8s-app: skydns +metadata: + labels: + k8s-app: skydns + name: skydns + namespace: default +spec: + portalIP: ${DNS_SERVER_IP} + ports: + - port: 53 + protocol: UDP + targetPort: 53 + selector: + k8s-app: skydns diff --git a/cluster/libvirt-coreos/user_data_master.yml b/cluster/libvirt-coreos/user_data_master.yml index 256c62fa2ce..376c3db8fd9 100644 --- a/cluster/libvirt-coreos/user_data_master.yml +++ b/cluster/libvirt-coreos/user_data_master.yml @@ -18,7 +18,7 @@ coreos: --port=8080 \ --etcd_servers=http://127.0.0.1:4001 \ --kubelet_port=10250 \ - --portal_net=${PORTAL_NET} + --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE} Restart=always RestartSec=2 diff --git a/cluster/rackspace/cloud-config/master-cloud-config.yaml b/cluster/rackspace/cloud-config/master-cloud-config.yaml index 83e09db30b7..1102d3a023a 100644 --- a/cluster/rackspace/cloud-config/master-cloud-config.yaml +++ b/cluster/rackspace/cloud-config/master-cloud-config.yaml @@ -93,7 +93,7 @@ coreos: --etcd_servers=http://127.0.0.1:4001 \ --logtostderr=true \ --port=8080 \ - --portal_net=PORTAL_NET \ + --service-cluster-ip-range=SERVICE_CLUSTER_IP_RANGE \ --token-auth-file=/var/lib/kube-apiserver/known_tokens.csv \ --v=2 Restart=always diff --git a/cluster/rackspace/config-default.sh b/cluster/rackspace/config-default.sh index 6cc4fc918aa..ce1e07fac60 100644 --- a/cluster/rackspace/config-default.sh +++ b/cluster/rackspace/config-default.sh @@ -36,7 +36,7 @@ RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}" MINION_TAG="tags=${INSTANCE_PREFIX}-minion" MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}})) KUBE_NETWORK="10.240.0.0/16" -PORTAL_NET="10.0.0.0/16" +SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET # Optional: Install node monitoring. ENABLE_NODE_MONITORING=true diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index c72a9952ac6..faa6705f651 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -164,7 +164,7 @@ rax-boot-master() { -e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \ -e "s|KUBE_USER|${KUBE_USER}|" \ -e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \ - -e "s|PORTAL_NET|${PORTAL_NET}|" \ + -e "s|SERVICE_CLUSTER_IP_RANGE|${SERVICE_CLUSTER_IP_RANGE}|" \ -e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \ -e "s|OS_USERNAME|${OS_USERNAME}|" \ -e "s|OS_PASSWORD|${OS_PASSWORD}|" \ diff --git a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest index d5a74e2201a..07e45c639e4 100644 --- a/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest +++ b/cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest @@ -36,9 +36,9 @@ {% set etcd_servers = "--etcd_servers=http://127.0.0.1:4001" -%} -{% set portal_net = "" -%} -{% if pillar['portal_net'] is defined -%} - {% set portal_net = "--portal_net=" + pillar['portal_net'] -%} +{% set service_cluster_ip_range = "" -%} +{% if pillar['service_cluster_ip_range'] is defined -%} + {% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%} {% endif -%} {% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%} @@ -74,7 +74,7 @@ {% set runtime_config = "--runtime_config=" + grains.runtime_config -%} {% endif -%} -{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + portal_net + " " + client_ca_file + " " + basic_auth_file -%} +{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file -%} {% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure_port=" + secure_port + " " + token_auth_file + " " + publicAddressOverride + " " + pillar['log_level'] -%} diff --git a/cluster/saltbase/salt/kube-proxy/initd b/cluster/saltbase/salt/kube-proxy/initd index 4e8f72e1b6b..658689949e3 100644 --- a/cluster/saltbase/salt/kube-proxy/initd +++ b/cluster/saltbase/salt/kube-proxy/initd @@ -40,6 +40,13 @@ DAEMON_USER=root # do_start() { + # Avoid a potential race at boot time when both monit and init.d start + # the same service + PIDS=$(pidof $DAEMON) + for PID in ${PIDS}; do + kill -9 $PID + done + # Raise the file descriptor limit - we expect to open a lot of sockets! ulimit -n 65536 diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index e34369ea1bc..02c39c49821 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -58,10 +58,12 @@ {% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%} {% endif -%} -# Run containers under the root cgroup. +# Run containers under the root cgroup and create a system container. +{% set system_container = "" -%} {% set cgroup_root = "" -%} {% if grains['os_family'] == 'Debian' -%} + {% set system_container = "--system-container=/system" -%} {% set cgroup_root = "--cgroup_root=/" -%} {% endif -%} -DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{configure_cbr0}} {{cgroup_root}}" +DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}}" diff --git a/cluster/saltbase/salt/kubelet/initd b/cluster/saltbase/salt/kubelet/initd index 5af22468ada..5379c8e07d8 100644 --- a/cluster/saltbase/salt/kubelet/initd +++ b/cluster/saltbase/salt/kubelet/initd @@ -39,6 +39,13 @@ DAEMON_USER=root # do_start() { + # Avoid a potential race at boot time when both monit and init.d start + # the same service + PIDS=$(pidof $DAEMON) + for PID in ${PIDS}; do + kill -9 $PID + done + # Return # 0 if daemon has been started # 1 if daemon was already running diff --git a/cluster/ubuntu/config-default.sh b/cluster/ubuntu/config-default.sh index 16850ecac53..0f603d3b390 100755 --- a/cluster/ubuntu/config-default.sh +++ b/cluster/ubuntu/config-default.sh @@ -23,10 +23,10 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223" export roles=("ai" "i" "i") # Define minion numbers export NUM_MINIONS=${NUM_MINIONS:-3} -# define the IP range used for service portal. +# define the IP range used for service cluster IPs. # according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. -export PORTAL_NET=192.168.3.0/24 -# define the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range +export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 # formerly PORTAL_NET +# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE export FLANNEL_NET=172.16.0.0/16 # Admission Controllers to invoke prior to persisting objects in cluster @@ -52,7 +52,7 @@ DOCKER_OPTS="" # Optional: Install cluster DNS. ENABLE_CLUSTER_DNS=true -# DNS_SERVER_IP must be a IP in PORTAL_NET range +# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE DNS_SERVER_IP="192.168.3.10" DNS_DOMAIN="cluster.local" DNS_REPLICAS=1 diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index 9ae5129e225..ab74faa4449 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -210,7 +210,7 @@ KUBE_APISERVER_OPTS="--address=0.0.0.0 \ --port=8080 \ --etcd_servers=http://127.0.0.1:4001 \ --logtostderr=true \ ---portal_net=${1}" +--service-cluster-ip-range=${1}" EOF } @@ -377,7 +377,7 @@ function provision-master() { ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ setClusterInfo; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ - create-kube-apiserver-opts "${PORTAL_NET}"; \ + create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \ @@ -416,7 +416,7 @@ function provision-masterandminion() { ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ setClusterInfo; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ - create-kube-apiserver-opts "${PORTAL_NET}"; \ + create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; @@ -450,4 +450,4 @@ function kube-push { # Perform preparations required to run e2e tests function prepare-e2e() { echo "Ubuntu doesn't need special preparations for e2e tests" 1>&2 -} \ No newline at end of file +} diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index a59f1b95e0d..7ab47df5e60 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -43,14 +43,14 @@ for ((i=0; i < NUM_MINIONS; i++)) do VAGRANT_MINION_NAMES[$i]="minion-$((i+1))" done -PORTAL_NET=10.247.0.0/16 +SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET # Since this isn't exposed on the network, default to a simple user/passwd MASTER_USER=vagrant MASTER_PASSWD=vagrant # Admission Controllers to invoke prior to persisting objects in cluster -ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota +ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota # Optional: Install node monitoring. ENABLE_NODE_MONITORING=true diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index bdf9ba514d4..33203fd0529 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -85,7 +85,7 @@ EOF mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls - portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' + service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index b558e0d3d6f..35ef1d31b0e 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -127,7 +127,7 @@ function create-provision-scripts { echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'" echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" - echo "PORTAL_NET='${PORTAL_NET}'" + echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "MASTER_USER='${MASTER_USER}'" echo "MASTER_PASSWD='${MASTER_PASSWD}'" echo "ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" diff --git a/cluster/validate-cluster.sh b/cluster/validate-cluster.sh index b27b19ae8c8..b844b29dd4e 100755 --- a/cluster/validate-cluster.sh +++ b/cluster/validate-cluster.sh @@ -45,7 +45,8 @@ while true; do if (( ${found} == "${NUM_MINIONS}" )) && (( ${ready} == "${NUM_MINIONS}")); then break else - if (( attempt > 20 )); then + # Set the timeout to ~10minutes (40 x 15 second) to avoid timeouts for 100-node clusters. + if (( attempt > 40 )); then echo -e "${color_red}Detected ${ready} ready nodes, found ${found} nodes out of expected ${NUM_MINIONS}. Your cluster may not be working.${color_norm}" cat -n "${MINIONS_FILE}" exit 2 diff --git a/cluster/vsphere/config-default.sh b/cluster/vsphere/config-default.sh index 8d629f63eb4..1ece58e31f9 100755 --- a/cluster/vsphere/config-default.sh +++ b/cluster/vsphere/config-default.sh @@ -31,7 +31,7 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_MEMORY_MB=2048 MINION_CPU=1 -PORTAL_NET="10.244.240.0/20" +SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET # Optional: Install node monitoring. ENABLE_NODE_MONITORING=true diff --git a/cluster/vsphere/config-test.sh b/cluster/vsphere/config-test.sh index 09d5c438667..57cbe8e935e 100755 --- a/cluster/vsphere/config-test.sh +++ b/cluster/vsphere/config-test.sh @@ -31,4 +31,4 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_MEMORY_MB=1024 MINION_CPU=1 -PORTAL_NET="10.244.240.0/20" +SERVICE_CLUSTER_IP_RANGE="10.244.240.0/20" # formerly PORTAL_NET diff --git a/cluster/vsphere/templates/create-dynamic-salt-files.sh b/cluster/vsphere/templates/create-dynamic-salt-files.sh index 41fd1ae15a3..880b24757a9 100755 --- a/cluster/vsphere/templates/create-dynamic-salt-files.sh +++ b/cluster/vsphere/templates/create-dynamic-salt-files.sh @@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar cat </srv/salt-overlay/pillar/cluster-params.sls instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: $NODE_INSTANCE_PREFIX -portal_net: $PORTAL_NET +service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE enable_cluster_monitoring: $ENABLE_CLUSTER_MONITORING enable_node_monitoring: $ENABLE_NODE_MONITORING enable_cluster_logging: $ENABLE_CLUSTER_LOGGING diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index c517b7d0c87..19361515d4d 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -280,7 +280,7 @@ function kube-up { echo "readonly MASTER_NAME='${MASTER_NAME}'" echo "readonly INSTANCE_PREFIX='${INSTANCE_PREFIX}'" echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'" - echo "readonly PORTAL_NET='${PORTAL_NET}'" + echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'" echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'" echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'" diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index 59873eb3fbc..88a949ef835 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -237,8 +237,8 @@ func podsOnMinions(c *client.Client, podNamespace string, labelSelector labels.S for i := range pods.Items { pod := pods.Items[i] podString := fmt.Sprintf("%q/%q", pod.Namespace, pod.Name) - glog.Infof("Check whether pod %q exists on node %q", podString, pod.Spec.Host) - if len(pod.Spec.Host) == 0 { + glog.Infof("Check whether pod %q exists on node %q", podString, pod.Spec.NodeName) + if len(pod.Spec.NodeName) == 0 { glog.Infof("Pod %q is not bound to a host yet", podString) return false, nil } @@ -905,7 +905,7 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) { if err != nil { glog.Fatalf("Failed to create pod: %v, %v", pod, err) } - if err := wait.Poll(time.Second, time.Second*30, podRunning(client, baz.Namespace, baz.Name)); err != nil { + if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil { glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err) } diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index 7b878d5acfb..cae071cbb55 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -85,8 +85,8 @@ type APIServer struct { OldEtcdPathPrefix string CorsAllowedOriginList util.StringList AllowPrivileged bool - PortalNet util.IPNet // TODO: make this a list - ServiceNodePorts util.PortRange + ServiceClusterIPRange util.IPNet // TODO: make this a list + ServiceNodePortRange util.PortRange EnableLogsSupport bool MasterServiceNamespace string RuntimeConfig util.ConfigurationMap @@ -183,8 +183,12 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.OldEtcdPathPrefix, "old-etcd-prefix", s.OldEtcdPathPrefix, "The previous prefix for all resource paths in etcd, if any.") fs.Var(&s.CorsAllowedOriginList, "cors-allowed-origins", "List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.") fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, "If true, allow privileged containers.") - fs.Var(&s.PortalNet, "portal-net", "A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods.") - fs.Var(&s.ServiceNodePorts, "service-node-ports", "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.") + fs.Var(&s.ServiceClusterIPRange, "service-cluster-ip-range", "A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.") + fs.Var(&s.ServiceClusterIPRange, "portal-net", "Deprecated: see --service-cluster-ip-range instead.") + fs.MarkDeprecated("portal-net", "see --service-cluster-ip-range instead.") + fs.Var(&s.ServiceNodePortRange, "service-node-port-range", "A port range to reserve for services with NodePort visibility. Example: '30000-32767'. Inclusive at both ends of the range.") + fs.Var(&s.ServiceNodePortRange, "service-node-ports", "Deprecated: see --service-node-port-range instead.") + fs.MarkDeprecated("service-node-ports", "see --service-node-port-range instead.") fs.StringVar(&s.MasterServiceNamespace, "master-service-namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods") fs.Var(&s.RuntimeConfig, "runtime-config", "A set of key=value pairs that describe runtime configuration that may be passed to the apiserver. api/ key can be used to turn on/off specific api versions. api/all and api/legacy are special keys to control all and legacy api versions respectively.") client.BindKubeletClientConfigFlags(fs, &s.KubeletConfig) @@ -196,9 +200,9 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { } // TODO: Longer term we should read this from some config store, rather than a flag. -func (s *APIServer) verifyPortalFlags() { - if s.PortalNet.IP == nil { - glog.Fatal("No --portal-net specified") +func (s *APIServer) verifyClusterIPFlags() { + if s.ServiceClusterIPRange.IP == nil { + glog.Fatal("No --service-cluster-ip-range specified") } } @@ -227,7 +231,7 @@ func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersi // Run runs the specified APIServer. This should never exit. func (s *APIServer) Run(_ []string) error { - s.verifyPortalFlags() + s.verifyClusterIPFlags() if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) { glog.Fatalf("specify either --etcd-servers or --etcd-config") @@ -302,7 +306,7 @@ func (s *APIServer) Run(_ []string) error { } } - n := net.IPNet(s.PortalNet) + n := net.IPNet(s.ServiceClusterIPRange) // Default to the private server key for service account token signing if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" { @@ -349,7 +353,7 @@ func (s *APIServer) Run(_ []string) error { EtcdHelper: helper, EventTTL: s.EventTTL, KubeletClient: kubeletClient, - PortalNet: &n, + ServiceClusterIPRange: &n, EnableCoreControllers: true, EnableLogsSupport: s.EnableLogsSupport, EnableUISupport: true, diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index e9845700fc7..0c6d75cc30d 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -108,6 +108,7 @@ type KubeletServer struct { CgroupRoot string ContainerRuntime string DockerDaemonContainer string + SystemContainer string ConfigureCBR0 bool MaxPods int @@ -170,6 +171,7 @@ func NewKubeletServer() *KubeletServer { CgroupRoot: "", ContainerRuntime: "docker", DockerDaemonContainer: "/docker-daemon", + SystemContainer: "", ConfigureCBR0: false, } } @@ -228,7 +230,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kubelet in (Default: /kubelet).") fs.StringVar(&s.CgroupRoot, "cgroup_root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.") fs.StringVar(&s.ContainerRuntime, "container_runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.") - fs.StringVar(&s.DockerDaemonContainer, "docker-daemon-container", s.DockerDaemonContainer, "Optional resource-only container in which to place the Docker Daemon. Empty for no container (Default: /docker-daemon).") + fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").") fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.") fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.") @@ -347,6 +349,7 @@ func (s *KubeletServer) Run(_ []string) error { ContainerRuntime: s.ContainerRuntime, Mounter: mounter, DockerDaemonContainer: s.DockerDaemonContainer, + SystemContainer: s.SystemContainer, ConfigureCBR0: s.ConfigureCBR0, MaxPods: s.MaxPods, } @@ -513,6 +516,7 @@ func SimpleKubelet(client *client.Client, ContainerRuntime: "docker", Mounter: mount.New(), DockerDaemonContainer: "/docker-daemon", + SystemContainer: "", MaxPods: 32, } return &kcfg @@ -648,6 +652,7 @@ type KubeletConfig struct { ContainerRuntime string Mounter mount.Interface DockerDaemonContainer string + SystemContainer string ConfigureCBR0 bool MaxPods int } @@ -701,6 +706,7 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod kc.ContainerRuntime, kc.Mounter, kc.DockerDaemonContainer, + kc.SystemContainer, kc.ConfigureCBR0, kc.MaxPods) diff --git a/contrib/ansible/roles/master/templates/apiserver.j2 b/contrib/ansible/roles/master/templates/apiserver.j2 index eaabd3bf3e4..c389419c596 100644 --- a/contrib/ansible/roles/master/templates/apiserver.j2 +++ b/contrib/ansible/roles/master/templates/apiserver.j2 @@ -14,7 +14,7 @@ KUBE_API_ADDRESS="--address=0.0.0.0" # KUBELET_PORT="--kubelet_port=10250" # Address range to use for services -KUBE_SERVICE_ADDRESSES="--portal_net={{ kube_service_addresses }}" +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range={{ kube_service_addresses }}" # Location of the etcd cluster KUBE_ETCD_SERVERS="--etcd_servers=http://{{ groups['etcd'][0] }}:2379" diff --git a/contrib/for-tests/mount-tester/mt.go b/contrib/for-tests/mount-tester/mt.go index 266f39793c4..0a39d7cc86d 100644 --- a/contrib/for-tests/mount-tester/mt.go +++ b/contrib/for-tests/mount-tester/mt.go @@ -48,6 +48,15 @@ func main() { errs = []error{} ) + // NOTE: the ordering of execution of the various command line + // flags is intentional and allows a single command to: + // + // 1. Check the fstype of a path + // 2. Write a new file within that path + // 3. Check that the file's content can be read + // + // Changing the ordering of the following code will break tests. + err = fsType(fsTypePath) if err != nil { errs = append(errs, err) diff --git a/contrib/for-tests/network-tester/Makefile b/contrib/for-tests/network-tester/Makefile index 4834679d471..e456904d60e 100644 --- a/contrib/for-tests/network-tester/Makefile +++ b/contrib/for-tests/network-tester/Makefile @@ -1,7 +1,7 @@ all: push # Set this to the *next* version to prevent accidentally overwriting the existing image. -TAG = 1.4 +TAG = 1.5 webserver: webserver.go CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./webserver.go diff --git a/contrib/for-tests/network-tester/rc.json b/contrib/for-tests/network-tester/rc.json index d52f4343b34..f108cd36eaa 100644 --- a/contrib/for-tests/network-tester/rc.json +++ b/contrib/for-tests/network-tester/rc.json @@ -8,7 +8,7 @@ } }, "spec": { - "replicas": 8, + "replicas": 2, "selector": { "name": "nettest" }, @@ -22,9 +22,13 @@ "containers": [ { "name": "webserver", - "image": "gcr.io/google_containers/nettest:1.1", - "command": [ - "-service=nettest" + "image": "gcr.io/google_containers/nettest:1.4", + "imagePullPolicy": "Always", + "args": [ + "-service=nettest", + "-port=8080", + "-namespace=default", + "-peers=2" ], "ports": [ { diff --git a/contrib/for-tests/network-tester/webserver.go b/contrib/for-tests/network-tester/webserver.go index 384cc9ab9b0..937abc8b633 100644 --- a/contrib/for-tests/network-tester/webserver.go +++ b/contrib/for-tests/network-tester/webserver.go @@ -37,13 +37,12 @@ import ( "io/ioutil" "log" "math/rand" + "net" "net/http" - "net/url" "os" "sync" "time" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) @@ -203,12 +202,25 @@ func main() { // Find all sibling pods in the service and post to their /write handler. func contactOthers(state *State) { defer state.doneContactingPeers() - masterRO := url.URL{ - Scheme: "http", - Host: os.Getenv("KUBERNETES_RO_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_RO_SERVICE_PORT"), - Path: "/api/" + latest.Version, + token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") + if err != nil { + log.Fatalf("Unable to read service account token: %v", err) + } + cc := client.Config{ + Host: "https://" + net.JoinHostPort(os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")), + Version: "v1beta3", + BearerToken: string(token), + Insecure: true, // TOOD: package certs along with the token + } + client, err := client.New(&cc) + if err != nil { + log.Fatalf("Unable to create client:\nconfig: %#v\nerror: %v\n", err) + } + if v, err := client.ServerVersion(); err != nil { + log.Fatalf("Unable to get server version: %v\n", err) + } else { + log.Printf("Server version: %#v\n", v) } - client := &client.Client{client.NewRESTClient(&masterRO, latest.Version, latest.Codec, false, 5, 10)} // Do this repeatedly, in case there's some propagation delay with getting // newly started pods into the endpoints list. diff --git a/contrib/init/systemd/environ/apiserver b/contrib/init/systemd/environ/apiserver index 3196610f8ec..1f5725712d8 100644 --- a/contrib/init/systemd/environ/apiserver +++ b/contrib/init/systemd/environ/apiserver @@ -17,7 +17,7 @@ KUBE_API_ADDRESS="--address=127.0.0.1" KUBE_ETCD_SERVERS="--etcd_servers=http://127.0.0.1:4001" # Address range to use for services -KUBE_SERVICE_ADDRESSES="--portal_net=10.254.0.0/16" +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" # default admission control policies KUBE_ADMISSION_CONTROL="--admission_control=NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" diff --git a/contrib/release-notes/release-notes.go b/contrib/release-notes/release-notes.go index ea42f657f8d..32e4a95c1f5 100644 --- a/contrib/release-notes/release-notes.go +++ b/contrib/release-notes/release-notes.go @@ -18,24 +18,43 @@ package main import ( "bytes" - "flag" "fmt" + "net/http" "os" "github.com/google/go-github/github" + flag "github.com/spf13/pflag" + "golang.org/x/oauth2" ) -var target = flag.Int("last-release-pr", 0, "The PR number of the last versioned release.") +var ( + target int + token string +) + +func init() { + flag.IntVar(&target, "last-release-pr", 0, "The PR number of the last versioned release.") + flag.StringVar(&token, "api-token", "", "Github api token for rate limiting. See https://developer.github.com/v3/#rate-limiting.") +} func main() { flag.Parse() // Automatically determine this from github. - if *target == 0 { + if target == 0 { fmt.Printf("--last-release-pr is required.\n") os.Exit(1) } + var tc *http.Client - client := github.NewClient(nil) + if len(token) > 0 { + tc = oauth2.NewClient( + oauth2.NoContext, + oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: token}), + ) + } + + client := github.NewClient(tc) done := false @@ -62,7 +81,7 @@ func main() { if result.MergedAt == nil { continue } - if *result.Number == *target { + if *result.Number == target { done = true break } diff --git a/docs/api-conventions.md b/docs/api-conventions.md index b45042bd73f..96b6ffb453f 100644 --- a/docs/api-conventions.md +++ b/docs/api-conventions.md @@ -201,7 +201,7 @@ The API supports three different PATCH operations, determined by their correspon * As defined in [RFC6902](https://tools.ietf.org/html/rfc6902), a JSON Patch is a sequence of operations that are executed on the resource, e.g. `{"op": "add", "path": "/a/b/c", "value": [ "foo", "bar" ]}`. For more details on how to use JSON Patch, see the RFC. * Merge Patch, `Content-Type: application/merge-json-patch+json` * As defined in [RFC7386](https://tools.ietf.org/html/rfc7386), a Merge Patch is essentially a partial representation of the resource. The submitted JSON is "merged" with the current resource to create a new one, then the new one is saved. For more details on how to use Merge Patch, see the RFC. -* Strategic Merge Patch, `Content-Type: application/strategic-merge-json-patch+json` +* Strategic Merge Patch, `Content-Type: application/strategic-merge-patch+json` * Strategic Merge Patch is a custom implementation of Merge Patch. For a detailed explanation of how it works and why it needed to be introduced, see below. #### Strategic Merge Patch @@ -301,7 +301,7 @@ Late Initialization Late initialization is when resource fields are set by a system controller after an object is created/updated. -For example, the scheduler sets the pod.spec.host field after the pod is created. +For example, the scheduler sets the pod.spec.nodeName field after the pod is created. Late-initializers should only make the following types of modifications: - Setting previously unset fields diff --git a/docs/design/expansion.md b/docs/design/expansion.md index d15f2501dc0..b3ef161b408 100644 --- a/docs/design/expansion.md +++ b/docs/design/expansion.md @@ -359,9 +359,7 @@ spec: command: [ "/bin/sh", "-c", "env" ] env: - name: PUBLIC_URL - valueFrom: - expansion: - expand: "http://$(GITSERVER_SERVICE_HOST):$(GITSERVER_SERVICE_PORT)" + value: "http://$(GITSERVER_SERVICE_HOST):$(GITSERVER_SERVICE_PORT)" restartPolicy: Never ``` @@ -383,9 +381,7 @@ spec: fieldRef: fieldPath: "metadata.namespace" - name: PUBLIC_URL - valueFrom: - expansion: - expand: "http://gitserver.$(POD_NAMESPACE):$(SERVICE_PORT)" + value: "http://gitserver.$(POD_NAMESPACE):$(SERVICE_PORT)" restartPolicy: Never ``` diff --git a/docs/design/networking.md b/docs/design/networking.md index f351629e881..cd2bd0c5cfb 100644 --- a/docs/design/networking.md +++ b/docs/design/networking.md @@ -83,7 +83,7 @@ We want to be able to assign IP addresses externally from Docker ([Docker issue In addition to enabling self-registration with 3rd-party discovery mechanisms, we'd like to setup DDNS automatically ([Issue #146](https://github.com/GoogleCloudPlatform/kubernetes/issues/146)). hostname, $HOSTNAME, etc. should return a name for the pod ([Issue #298](https://github.com/GoogleCloudPlatform/kubernetes/issues/298)), and gethostbyname should be able to resolve names of other pods. Probably we need to set up a DNS resolver to do the latter ([Docker issue #2267](https://github.com/dotcloud/docker/issues/2267)), so that we don't need to keep /etc/hosts files up to date dynamically. -[Service](http://docs.k8s.io/services.md) endpoints are currently found through environment variables. Both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) variables and kubernetes-specific variables ({NAME}_SERVICE_HOST and {NAME}_SERVICE_BAR) are supported, and resolve to ports opened by the service proxy. We don't actually use [the Docker ambassador pattern](https://docs.docker.com/articles/ambassador_pattern_linking/) to link containers because we don't require applications to identify all clients at configuration time, yet. While services today are managed by the service proxy, this is an implementation detail that applications should not rely on. Clients should instead use the [service portal IP](http://docs.k8s.io/services.md) (which the above environment variables will resolve to). However, a flat service namespace doesn't scale and environment variables don't permit dynamic updates, which complicates service deployment by imposing implicit ordering constraints. We intend to register each service portal IP in DNS, and for that to become the preferred resolution protocol. +[Service](http://docs.k8s.io/services.md) endpoints are currently found through environment variables. Both [Docker-links-compatible](https://docs.docker.com/userguide/dockerlinks/) variables and kubernetes-specific variables ({NAME}_SERVICE_HOST and {NAME}_SERVICE_BAR) are supported, and resolve to ports opened by the service proxy. We don't actually use [the Docker ambassador pattern](https://docs.docker.com/articles/ambassador_pattern_linking/) to link containers because we don't require applications to identify all clients at configuration time, yet. While services today are managed by the service proxy, this is an implementation detail that applications should not rely on. Clients should instead use the [service IP](http://docs.k8s.io/services.md) (which the above environment variables will resolve to). However, a flat service namespace doesn't scale and environment variables don't permit dynamic updates, which complicates service deployment by imposing implicit ordering constraints. We intend to register each service's IP in DNS, and for that to become the preferred resolution protocol. We'd also like to accommodate other load-balancing solutions (e.g., HAProxy), non-load-balanced services ([Issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260)), and other types of groups (worker pools, etc.). Providing the ability to Watch a label selector applied to pod addresses would enable efficient monitoring of group membership, which could be directly consumed or synced with a discovery mechanism. Event hooks ([Issue #140](https://github.com/GoogleCloudPlatform/kubernetes/issues/140)) for join/leave events would probably make this even easier. diff --git a/docs/devel/api_changes.md b/docs/devel/api_changes.md index 4627c6dfd65..17278c6ef5e 100644 --- a/docs/devel/api_changes.md +++ b/docs/devel/api_changes.md @@ -254,6 +254,12 @@ regenerate auto-generated ones. To regenerate them: $ hack/update-generated-conversions.sh ``` +If running the above script is impossible due to compile errors, the easiest +workaround is to comment out the code causing errors and let the script to +regenerate it. If the auto-generated conversion methods are not used by the +manually-written ones, it's fine to just remove the whole file and let the +generator to create it from scratch. + Unsurprisingly, adding manually written conversion also requires you to add tests to `pkg/api//conversion_test.go`. diff --git a/docs/getting-started-guides/aws/cloud-configs/master.yaml b/docs/getting-started-guides/aws/cloud-configs/master.yaml index 4959f1ad7bd..af8d61078a7 100644 --- a/docs/getting-started-guides/aws/cloud-configs/master.yaml +++ b/docs/getting-started-guides/aws/cloud-configs/master.yaml @@ -118,7 +118,7 @@ coreos: ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver ExecStart=/opt/bin/kube-apiserver \ --insecure-bind-address=0.0.0.0 \ - --portal-net=10.100.0.0/16 \ + --service-cluster-ip-range=10.100.0.0/16 \ --etcd-servers=http://localhost:2379 Restart=always RestartSec=10 diff --git a/docs/getting-started-guides/aws/cloudformation-template.json b/docs/getting-started-guides/aws/cloudformation-template.json index 0129f990dc3..7617445125c 100644 --- a/docs/getting-started-guides/aws/cloudformation-template.json +++ b/docs/getting-started-guides/aws/cloudformation-template.json @@ -260,7 +260,7 @@ " ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver\n", " ExecStart=/opt/bin/kube-apiserver \\\n", " --insecure-bind-address=0.0.0.0 \\\n", - " --portal-net=10.100.0.0/16 \\\n", + " --service-cluster-ip-range=10.100.0.0/16 \\\n", " --etcd-servers=http://localhost:2379\n", " Restart=always\n", " RestartSec=10\n", diff --git a/docs/getting-started-guides/azure.md b/docs/getting-started-guides/azure.md index 4f791942763..d2608ae0637 100644 --- a/docs/getting-started-guides/azure.md +++ b/docs/getting-started-guides/azure.md @@ -37,90 +37,10 @@ You can then use the `cluster/kube-*.sh` scripts to manage your azure cluster, s The script above will start (by default) a single master VM along with 4 worker VMs. You can tweak some of these parameters by editing `cluster/azure/config-default.sh`. -### Running a container (simple version) +### Getting started with your cluster +See [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster. -Once you have your instances up and running, the `hack/build-go.sh` script sets up -your Go workspace and builds the Go components. - -The `kubectl.sh` line below spins up two containers running -[Nginx](http://nginx.org/en/) running on port 80: - -```bash -cluster/kubectl.sh run my-nginx --image=nginx --replicas=2 --port=80 -``` - -To stop the containers: - -```bash -cluster/kubectl.sh stop rc my-nginx -``` - -To delete the containers: - -```bash -cluster/kubectl.sh delete rc my-nginx -``` - -### Running a container (more complete version) - - -You can create a pod like this: - - -``` -cd kubernetes -cluster/kubectl.sh create -f docs/getting-started-guides/pod.json -``` - -Where pod.json contains something like: - -``` -{ - "id": "php", - "kind": "Pod", - "apiVersion": "v1beta1", - "desiredState": { - "manifest": { - "version": "v1beta1", - "id": "php", - "containers": [{ - "name": "nginx", - "image": "nginx", - "ports": [{ - "containerPort": 80, - "hostPort": 8080 - }], - "livenessProbe": { - "enabled": true, - "type": "http", - "initialDelaySeconds": 30, - "httpGet": { - "path": "/index.html", - "port": 8080 - } - } - }] - } - }, - "labels": { - "name": "foo" - } -} -``` - -You can see your cluster's pods: - -``` -cluster/kubectl.sh get pods -``` - -and delete the pod you just created: - -``` -cluster/kubectl.sh delete pods php -``` - -Look in `api/examples/` for more examples +For more complete applications, please look in the [examples directory](../../examples). ### Tearing down the cluster ``` diff --git a/docs/getting-started-guides/centos/centos_manual_config.md b/docs/getting-started-guides/centos/centos_manual_config.md index f24c3b41b96..c76c4cba543 100644 --- a/docs/getting-started-guides/centos/centos_manual_config.md +++ b/docs/getting-started-guides/centos/centos_manual_config.md @@ -97,7 +97,7 @@ KUBE_MASTER="--master=http://centos-master:8080" KUBELET_PORT="--kubelet_port=10250" # Address range to use for services -KUBE_SERVICE_ADDRESSES="--portal_net=10.254.0.0/16" +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" # Add your own! KUBE_API_ARGS="" diff --git a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml b/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml index c9988eea217..53dc5d56706 100644 --- a/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml +++ b/docs/getting-started-guides/coreos/azure/cloud_config_templates/kubernetes-cluster-main-nodes-template.yml @@ -273,7 +273,7 @@ coreos: --address=0.0.0.0 \ --port=8080 \ $ETCD_SERVERS \ - --portal_net=10.1.0.0/16 \ + --service-cluster-ip-range=10.1.0.0/16 \ --cloud_provider=vagrant \ --logtostderr=true --v=3 Restart=always diff --git a/docs/getting-started-guides/coreos/bare_metal_offline.md b/docs/getting-started-guides/coreos/bare_metal_offline.md index fd323a42380..1690534c161 100644 --- a/docs/getting-started-guides/coreos/bare_metal_offline.md +++ b/docs/getting-started-guides/coreos/bare_metal_offline.md @@ -363,7 +363,7 @@ On the PXE server make and fill in the variables ```vi /var/www/html/coreos/pxe- ExecStart=/opt/bin/kube-apiserver \ --address=0.0.0.0 \ --port=8080 \ - --portal_net=10.100.0.0/16 \ + --service-cluster-ip-range=10.100.0.0/16 \ --etcd_servers=http://127.0.0.1:4001 \ --logtostderr=true Restart=always @@ -602,46 +602,9 @@ Reboot these servers to get the images PXEd and ready for running containers! ## Creating test pod Now that the CoreOS with Kubernetes installed is up and running lets spin up some Kubernetes pods to demonstrate the system. -Here is a fork where you can do a full walk through by using [Kubernetes docs](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/walkthrough), or use the following example for a quick test. +See [a simple nginx example](../../../examples/simple-nginx.md) to try out your new cluster. - -On the Kubernetes Master node lets create a '''nginx.yml''' - - apiVersion: v1beta1 - kind: Pod - id: www - desiredState: - manifest: - version: v1beta1 - id: www - containers: - - name: nginx - image: nginx - - -Now for the service: ```nginx-service.yml``` - - kind: Service - apiVersion: v1beta1 - # must be a DNS compatible name - id: nginx-example - # the port that this service should serve on - port: 80 - # just like the selector in the replication controller, - # but this time it identifies the set of pods to load balance - # traffic to. - selector: - name: www - # the container on each pod to connect to, can be a name - # (e.g. 'www') or a number (e.g. 80) - containerPort: 80 - -Now add the pod to Kubernetes: - - kubectl create -f nginx.yml - -This might take a while to download depending on the environment. - +For more complete applications, please look in the [examples directory](../../../examples). ## Helping commands for debugging diff --git a/docs/getting-started-guides/coreos/cloud-configs/master.yaml b/docs/getting-started-guides/coreos/cloud-configs/master.yaml index b58da94ad07..cffee3de510 100644 --- a/docs/getting-started-guides/coreos/cloud-configs/master.yaml +++ b/docs/getting-started-guides/coreos/cloud-configs/master.yaml @@ -116,7 +116,7 @@ coreos: --insecure_port=8080 \ --kubelet_https=true \ --secure_port=6443 \ - --portal_net=10.100.0.0/16 \ + --service-cluster-ip-range=10.100.0.0/16 \ --etcd_servers=http://127.0.0.1:4001 \ --public_address_override=${DEFAULT_IPV4} \ --logtostderr=true diff --git a/docs/getting-started-guides/coreos/cloud-configs/standalone.yaml b/docs/getting-started-guides/coreos/cloud-configs/standalone.yaml index f9fd09560fa..a37b05e37d3 100644 --- a/docs/getting-started-guides/coreos/cloud-configs/standalone.yaml +++ b/docs/getting-started-guides/coreos/cloud-configs/standalone.yaml @@ -84,7 +84,7 @@ coreos: --insecure_port=8080 \ --kubelet_https=true \ --secure_port=6443 \ - --portal_net=10.100.0.0/16 \ + --service-cluster-ip-range=10.100.0.0/16 \ --etcd_servers=http://127.0.0.1:4001 \ --public_address_override=127.0.0.1 \ --logtostderr=true diff --git a/docs/getting-started-guides/fedora/fedora_ansible_config.md b/docs/getting-started-guides/fedora/fedora_ansible_config.md index 379dbf229f0..54d7405901a 100644 --- a/docs/getting-started-guides/fedora/fedora_ansible_config.md +++ b/docs/getting-started-guides/fedora/fedora_ansible_config.md @@ -174,25 +174,27 @@ iptables -nvL ``` cat << EOF > apache.json { - "id": "fedoraapache", "kind": "Pod", - "apiVersion": "v1beta1", - "desiredState": { - "manifest": { - "version": "v1beta1", - "id": "fedoraapache", - "containers": [{ - "name": "fedoraapache", - "image": "fedora/apache", - "ports": [{ - "containerPort": 80, - "hostPort": 80 - }] - }] + "apiVersion": "v1beta3", + "metadata": { + "name": "fedoraapache", + "labels": { + "name": "fedoraapache" } }, - "labels": { - "name": "fedoraapache" + "spec": { + "containers": [ + { + "name": "fedoraapache", + "image": "fedora/apache", + "ports": [ + { + "hostPort": 80, + "containerPort": 80 + } + ] + } + ] } } EOF diff --git a/docs/getting-started-guides/fedora/fedora_manual_config.md b/docs/getting-started-guides/fedora/fedora_manual_config.md index fe1c1d828cc..d72fafeb5bc 100644 --- a/docs/getting-started-guides/fedora/fedora_manual_config.md +++ b/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -61,7 +61,7 @@ systemctl stop iptables-services firewalld **Configure the kubernetes services on the master.** -* Edit /etc/kubernetes/apiserver to appear as such. The portal_net IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything. +* Edit /etc/kubernetes/apiserver to appear as such. The service_cluster_ip_range IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything. ``` # The address on the local server to listen to. @@ -71,7 +71,7 @@ KUBE_API_ADDRESS="--address=0.0.0.0" KUBE_ETCD_SERVERS="--etcd_servers=http://127.0.0.1:4001" # Address range to use for services -KUBE_SERVICE_ADDRESSES="--portal_net=10.254.0.0/16" +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" # Add your own! KUBE_API_ARGS="" diff --git a/docs/getting-started-guides/gce.md b/docs/getting-started-guides/gce.md index 7b7522520cd..463422ff251 100644 --- a/docs/getting-started-guides/gce.md +++ b/docs/getting-started-guides/gce.md @@ -13,10 +13,7 @@ If you want to use custom binaries or pure open source Kubernetes, please contin 1. You need a Google Cloud Platform account with billing enabled. Visit the [Google Developers Console](http://cloud.google.com/console) for more details. 1. Make sure you have the `gcloud preview` command line component installed. Simply run `gcloud preview` at the command line - if it asks to install any components, go ahead and install them. If it simply shows help text, you're good to go. This is required as the cluster setup script uses GCE [Instance Groups](https://cloud.google.com/compute/docs/instance-groups/), which are in the gcloud preview namespace. 1. Make sure that gcloud is set to use the Google Cloud Platform project you want. You can check the current project using `gcloud config list project` and change it via `gcloud config set project `. -1. Make sure you have credentials for GCloud by running -```bash -gcloud auth login -``` +1. Make sure you have credentials for GCloud by running ` gcloud auth login`. 1. Make sure you can start up a GCE VM from the command line. At least make sure you can do the [Create an instance](https://cloud.google.com/compute/docs/quickstart#create_an_instance) part of the GCE Quickstart. 1. Make sure you can ssh into the VM without interactive prompts. See the [Log in to the instance](https://cloud.google.com/compute/docs/quickstart#ssh) part of the GCE Quickstart. @@ -67,7 +64,7 @@ potential issues with client/server version skew. ### Getting started with your cluster See [a simple nginx example](../../examples/simple-nginx.md) to try out your new cluster. -For more complete applications, please look in the [examples directory](../../examples) +For more complete applications, please look in the [examples directory](../../examples). ### Tearing down the cluster To remove/delete/teardown the cluster, use the `kube-down.sh` script. diff --git a/docs/getting-started-guides/juju.md b/docs/getting-started-guides/juju.md index 4eae8d8d2ac..570b473016d 100644 --- a/docs/getting-started-guides/juju.md +++ b/docs/getting-started-guides/juju.md @@ -96,26 +96,24 @@ We'll follow the aws-coreos example. Create a pod manifest: `pod.json` ``` { - "id": "hello", + "apiVersion": "v1beta3", "kind": "Pod", - "apiVersion": "v1beta1", - "desiredState": { - "manifest": { - "version": "v1beta1", - "id": "hello", - "containers": [{ - "name": "hello", - "image": "quay.io/kelseyhightower/hello", - "ports": [{ - "containerPort": 80, - "hostPort": 80 - }] - }] + "metadata": { + "name": "hello", + "labels": { + "name": "hello", + "environment": "testing" } }, - "labels": { - "name": "hello", - "environment": "testing" + "spec": { + "containers": [{ + "name": "hello", + "image": "quay.io/kelseyhightower/hello", + "ports": [{ + "containerPort": 80, + "hostPort": 80 + }] + }] } } ``` diff --git a/docs/getting-started-guides/locally.md b/docs/getting-started-guides/locally.md index 7c332838f9f..e486f03fc05 100644 --- a/docs/getting-started-guides/locally.md +++ b/docs/getting-started-guides/locally.md @@ -87,10 +87,10 @@ Some firewall software that uses iptables may not interact well with kubernetes. If you're having trouble around networking, try disabling any firewall or other iptables-using systems, first. -By default the IP range for service portals is 10.0.*.* - depending on your +By default the IP range for service cluster IPs is 10.0.*.* - depending on your docker installation, this may conflict with IPs for containers. If you find containers running with IPs in this range, edit hack/local-cluster-up.sh and -change the portal_net flag to something else. +change the service-cluster-ip-range flag to something else. #### I cannot create a replication controller with replica size greater than 1! What gives? diff --git a/docs/getting-started-guides/mesos.md b/docs/getting-started-guides/mesos.md index a91b9023c62..7c4067ed1d1 100644 --- a/docs/getting-started-guides/mesos.md +++ b/docs/getting-started-guides/mesos.md @@ -69,7 +69,7 @@ $ ./bin/km apiserver \ --address=${servicehost} \ --mesos_master=${mesos_master} \ --etcd_servers=http://${servicehost}:4001 \ - --portal_net=10.10.10.0/24 \ + --service-cluster-ip-range=10.10.10.0/24 \ --port=8888 \ --cloud_provider=mesos \ --v=1 >apiserver.log 2>&1 & @@ -235,7 +235,7 @@ $ mesos ps ``` The number of Kubernetes pods listed earlier (from `bin/kubectl get pods`) should equal to the number active Mesos tasks listed the previous listing (`mesos ps`). -Next, determine the internal IP address of the front end [service portal][8]: +Next, determine the internal IP address of the front end [service][8]: ```bash $ bin/kubectl get services @@ -268,14 +268,14 @@ Or interact with the frontend application via your browser, in 2 steps: First, open the firewall on the master machine. ```bash -# determine the internal port for the frontend service portal +# determine the internal port for the frontend service $ sudo iptables-save|grep -e frontend # -- port 36336 in this case -A KUBE-PORTALS-CONTAINER -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -A KUBE-PORTALS-CONTAINER -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -A KUBE-PORTALS-HOST -d 10.10.10.149/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -A KUBE-PORTALS-HOST -d 10.22.183.23/32 -p tcp -m comment --comment frontend -m tcp --dport 9998 -j DNAT --to-destination 10.22.183.23:36336 -# open up access to the internal port for the frontend service portal +# open up access to the internal port for the frontend service $ sudo iptables -A INPUT -i eth0 -p tcp -m state --state NEW,ESTABLISHED -m tcp \ --dport ${internal_frontend_service_port} -j ACCEPT ``` @@ -297,7 +297,7 @@ Now, you can visit the guestbook in your browser! [5]: https://google.mesosphere.com [6]: http://mesosphere.com/docs/getting-started/cloud/google/mesosphere/#vpn-setup [7]: https://github.com/mesosphere/kubernetes-mesos/tree/v0.4.0/examples/guestbook -[8]: https://github.com/GoogleCloudPlatform/kubernetes/blob/v0.11.0/docs/services.md#ips-and-portals +[8]: https://github.com/GoogleCloudPlatform/kubernetes/blob/v0.11.0/docs/services.md#ips-and-vips [9]: mesos/k8s-firewall.png [10]: mesos/k8s-guestbook.png [11]: http://mesos.apache.org/ diff --git a/docs/getting-started-guides/rkt/README.md b/docs/getting-started-guides/rkt/README.md new file mode 100644 index 00000000000..af1c91d3d10 --- /dev/null +++ b/docs/getting-started-guides/rkt/README.md @@ -0,0 +1,65 @@ +# Run Kubernetes with rkt + +This document describes how to run Kubernetes using [rkt](https://github.com/coreos/rkt) as a container runtime. +We still have [a bunch of work](https://github.com/GoogleCloudPlatform/kubernetes/issues/8262) to do to make the experience with rkt wonderful, please stay tuned! + +### **Prerequisite** + +- [systemd](http://www.freedesktop.org/wiki/Software/systemd/) should be installed on your machine and should be enabled. The minimum version required at this moment (2015/05/28) is [215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html). + *(Note that systemd is not required by rkt itself, we are using it here to monitor and manage the pods launched by kubelet.)* + +- Install the latest rkt release according to the instructions [here](https://github.com/coreos/rkt). + The minimum version required for now is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6). + +- Make sure the `rkt metadata service` is running because it is necessary for running pod in private network mode. + More details about the networking of rkt can be found in the [documentation](https://github.com/coreos/rkt/blob/master/Documentation/networking.md). + + To start the `rkt metadata service`, you can simply run: + ```shell + $ sudo rkt metadata-service + ``` + + If you want the service to be running as a systemd service, then: + ```shell + $ sudo systemd-run rkt metadata-service + ``` + Alternatively, you can use the [rkt-metadata.service](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.service) and [rkt-metadata.socket](https://github.com/coreos/rkt/blob/master/dist/init/systemd/rkt-metadata.socket) to start the service. + + +### Local cluster + +To use rkt as the container runtime, you just need to set the environment variable `CONTAINER_RUNTIME`: +```shell +$ export CONTAINER_RUNTIME=rkt +$ hack/local-up-cluster.sh +``` + +After this, you can launch some pods in another terminal: +```shell +$ cluster/kubectl.sh create -f example/pod.yaml +``` + +### CoreOS cluster on GCE + +To use rkt as the container runtime for your CoreOS cluster on GCE, you need to specify the OS distribution, project, image: +```shell +$ export KUBE_OS_DISTRIBUTION=coreos +$ export KUBE_GCE_MINION_IMAGE= +$ export KUBE_GCE_MINION_PROJECT=coreos-cloud +$ export KUBE_CONTAINER_RUNTIME=rkt +``` + +You can optionally choose the version of rkt used by setting `KUBE_RKT_VERSION`: +```shell +$ export KUBE_RKT_VERSION=0.5.6 +``` + +Then you can launch the cluster by: +````shell +$ kube-up.sh +``` + +Note that we are still working on making all containerized the master components run smoothly in rkt. Before that we are not able to run the master node with rkt yet. + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/getting-started-guides/rkt/README.md?pixel)]() diff --git a/docs/getting-started-guides/ubuntu.md b/docs/getting-started-guides/ubuntu.md index d8b76a197b0..dbb30624f24 100644 --- a/docs/getting-started-guides/ubuntu.md +++ b/docs/getting-started-guides/ubuntu.md @@ -48,7 +48,7 @@ export roles=("ai" "i" "i") export NUM_MINIONS=${NUM_MINIONS:-3} -export PORTAL_NET=11.1.1.0/24 +export SERVICE_CLUSTER_IP_RANGE=11.1.1.0/24 export FLANNEL_NET=172.16.0.0/16 @@ -61,7 +61,7 @@ Then the `roles ` variable defines the role of above machine in the same order, The `NUM_MINIONS` variable defines the total number of minions. -The `PORTAL_NET` variable defines the kubernetes service portal ip range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. You can use below three private network range accordin to rfc1918. Besides you'd better not choose the one that conflicts with your own private network range. +The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service IP range. Please make sure that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips. You can use below three private network range accordin to rfc1918. Besides you'd better not choose the one that conflicts with your own private network range. 10.0.0.0 - 10.255.255.255 (10/8 prefix) @@ -69,7 +69,7 @@ The `PORTAL_NET` variable defines the kubernetes service portal ip range. Please 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) -The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range +The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above `SERVICE_CLUSTER_IP_RANGE`. After all the above variable being set correctly. We can use below command in cluster/ directory to bring up the whole cluster. @@ -127,7 +127,7 @@ DNS_DOMAIN="kubernetes.local" DNS_REPLICAS=1 ``` -The `DNS_SERVER_IP` is defining the ip of dns server which must be in the portal_net range. +The `DNS_SERVER_IP` is defining the ip of dns server which must be in the service_cluster_ip_range. The `DNS_REPLICAS` describes how many dns pod running in the cluster. diff --git a/docs/kubectl_get.md b/docs/kubectl_get.md index 9522f4dd4c0..563b6c63f37 100644 --- a/docs/kubectl_get.md +++ b/docs/kubectl_get.md @@ -8,7 +8,9 @@ Display one or many resources Display one or many resources. Possible resources include pods (po), replication controllers (rc), services -(svc), nodes, events (ev), or component statuses (cs). +(svc), nodes, events (ev), component statuses (cs), limit ranges (limits), +minions (mi), persistent volumes (pv), persistent volume claims (pvc) +or resource quotas (quota). By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s). @@ -85,6 +87,6 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-05-22 14:24:30.17132893 +0000 UTC +###### Auto generated by spf13/cobra at 2015-05-28 22:43:52.329286408 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/kubectl_get.md?pixel)]() diff --git a/docs/kubectl_label.md b/docs/kubectl_label.md index 9897bdaa331..fa0bbaa83ec 100644 --- a/docs/kubectl_label.md +++ b/docs/kubectl_label.md @@ -7,6 +7,7 @@ Update the labels on a resource Update the labels on a resource. +A label must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 63 characters. If --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error. If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used. @@ -80,6 +81,6 @@ $ kubectl label pods foo bar- ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-05-21 10:33:11.210679161 +0000 UTC +###### Auto generated by spf13/cobra at 2015-05-28 08:44:48.996047458 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/kubectl_label.md?pixel)]() diff --git a/docs/kubectl_proxy.md b/docs/kubectl_proxy.md index abf695beaaa..ea38efe158a 100644 --- a/docs/kubectl_proxy.md +++ b/docs/kubectl_proxy.md @@ -18,8 +18,8 @@ kubectl proxy [--port=PORT] [--www=static-dir] [--www-prefix=prefix] [--api-pref $ kubectl proxy --port=8011 --www=./local/www/ // Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api -// This makes e.g. the pods api available at localhost:8011/k8s-api/v1beta1/pods/ -$ kubectl proxy --api-prefix=k8s-api +// This makes e.g. the pods api available at localhost:8011/k8s-api/v1beta3/pods/ +$ kubectl proxy --api-prefix=/k8s-api ``` ### Options @@ -64,6 +64,6 @@ $ kubectl proxy --api-prefix=k8s-api ### SEE ALSO * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-05-21 10:33:11.188518514 +0000 UTC +###### Auto generated by spf13/cobra at 2015-05-28 20:57:46.689818993 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/kubectl_proxy.md?pixel)]() diff --git a/docs/man/kube-apiserver.1.md b/docs/man/kube-apiserver.1.md index 4c44cc69af4..21e4edbda9f 100644 --- a/docs/man/kube-apiserver.1.md +++ b/docs/man/kube-apiserver.1.md @@ -134,8 +134,8 @@ The the kube-apiserver several options. **--port**=8080 DEPRECATED: see --insecure-port instead -**--portal-net**= - A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods. +**--service-cluster-ip-range**= + A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods. **--profiling**=true Enable profiling via web interface host:port/debug/pprof/ @@ -184,7 +184,7 @@ The the kube-apiserver several options. # EXAMPLES ``` -/usr/bin/kube-apiserver --logtostderr=true --v=0 --etcd_servers=http://127.0.0.1:4001 --insecure_bind_address=127.0.0.1 --insecure_port=8080 --kubelet_port=10250 --portal_net=11.1.1.0/24 --allow_privileged=false +/usr/bin/kube-apiserver --logtostderr=true --v=0 --etcd_servers=http://127.0.0.1:4001 --insecure_bind_address=127.0.0.1 --insecure_port=8080 --kubelet_port=10250 --service-cluster-ip-range=10.1.1.0/24 --allow_privileged=false ``` # HISTORY diff --git a/docs/man/man1/kube-apiserver.1 b/docs/man/man1/kube-apiserver.1 index fa8ea7b6be6..2fa1600b7ea 100644 --- a/docs/man/man1/kube-apiserver.1 +++ b/docs/man/man1/kube-apiserver.1 @@ -178,8 +178,8 @@ The the kube\-apiserver several options. DEPRECATED: see \-\-insecure\-port instead .PP -\fB\-\-portal\-net\fP= - A CIDR notation IP range from which to assign portal IPs. This must not overlap with any IP ranges assigned to nodes for pods. +\fB\-\-service\-cluster\-ip\-range\fP= + A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods. .PP \fB\-\-profiling\fP=true @@ -246,7 +246,7 @@ The the kube\-apiserver several options. .RS .nf -/usr/bin/kube\-apiserver \-\-logtostderr=true \-\-v=0 \-\-etcd\_servers=http://127.0.0.1:4001 \-\-insecure\_bind\_address=127.0.0.1 \-\-insecure\_port=8080 \-\-kubelet\_port=10250 \-\-portal\_net=11.1.1.0/24 \-\-allow\_privileged=false +/usr/bin/kube\-apiserver \-\-logtostderr=true \-\-v=0 \-\-etcd\_servers=http://127.0.0.1:4001 \-\-insecure\_bind\_address=127.0.0.1 \-\-insecure\_port=8080 \-\-kubelet\_port=10250 \-\-service\-cluster\-ip\-range=10.1.1.0/24 \-\-allow\_privileged=false .fi diff --git a/docs/man/man1/kubectl-get.1 b/docs/man/man1/kubectl-get.1 index 8414924bad5..109a313c146 100644 --- a/docs/man/man1/kubectl-get.1 +++ b/docs/man/man1/kubectl-get.1 @@ -17,7 +17,9 @@ Display one or many resources. .PP Possible resources include pods (po), replication controllers (rc), services -(svc), nodes, events (ev), or component statuses (cs). +(svc), nodes, events (ev), component statuses (cs), limit ranges (limits), +minions (mi), persistent volumes (pv), persistent volume claims (pvc) +or resource quotas (quota). .PP By specifying the output as 'template' and providing a Go template as the value diff --git a/docs/man/man1/kubectl-label.1 b/docs/man/man1/kubectl-label.1 index fe7037eee90..9fddbcd4a07 100644 --- a/docs/man/man1/kubectl-label.1 +++ b/docs/man/man1/kubectl-label.1 @@ -16,6 +16,7 @@ kubectl label \- Update the labels on a resource Update the labels on a resource. .PP +A label must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to 63 characters. If \-\-overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error. If \-\-resource\-version is specified, then updates will use this resource version, otherwise the existing resource\-version will be used. diff --git a/docs/man/man1/kubectl-proxy.1 b/docs/man/man1/kubectl-proxy.1 index 8d977ee583e..0875897c694 100644 --- a/docs/man/man1/kubectl-proxy.1 +++ b/docs/man/man1/kubectl-proxy.1 @@ -145,8 +145,8 @@ Run a proxy to the Kubernetes API server. $ kubectl proxy \-\-port=8011 \-\-www=./local/www/ // Run a proxy to kubernetes apiserver, changing the api prefix to k8s\-api -// This makes e.g. the pods api available at localhost:8011/k8s\-api/v1beta1/pods/ -$ kubectl proxy \-\-api\-prefix=k8s\-api +// This makes e.g. the pods api available at localhost:8011/k8s\-api/v1beta3/pods/ +$ kubectl proxy \-\-api\-prefix=/k8s\-api .fi .RE diff --git a/docs/proposals/autoscaling.md b/docs/proposals/autoscaling.md index 29d20c8236b..31374448708 100644 --- a/docs/proposals/autoscaling.md +++ b/docs/proposals/autoscaling.md @@ -42,7 +42,7 @@ applications will expose one or more network endpoints for clients to connect to balanced or situated behind a proxy - the data from those proxies and load balancers can be used to estimate client to server traffic for applications. This is the primary, but not sole, source of data for making decisions. -Within Kubernetes a [kube proxy](http://docs.k8s.io/services.md#ips-and-portals) +Within Kubernetes a [kube proxy](http://docs.k8s.io/services.md#ips-and-vips) running on each node directs service requests to the underlying implementation. While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage diff --git a/docs/roadmap.md b/docs/roadmap.md index 43734f87a51..05104c8bac9 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -1,13 +1,15 @@ -# Kubernetes Roadmap +# Kubernetes v1 -Updated April 20, 2015 +Updated May 28, 2015 This document is intended to capture the set of supported use cases, features, docs, and patterns that we feel are required to call Kubernetes “feature -complete” for a 1.0 release candidate.  This list does not emphasize the bug -fixes and stabilization that will be required to take it all the way to -production ready. This is a living document, and is certainly open for -discussion. +complete” for a 1.0 release candidate. + +This list does not emphasize the bug fixes and stabilization that will be required to take it all the way to +production ready. Please see the [Github issues] (https://github.com/GoogleCloudPlatform/kubernetes/issues) for a more detailed view. + +This is a living document, where suggested changes can be made via a pull request. ## Target workloads @@ -16,6 +18,9 @@ frontend exposed to the public Internet, with a stateful backend, such as a clustered database or key-value store. We will target such workloads for our 1.0 release. +## v1 APIs +For existing and future workloads, we want to provide a consistent, stable set of APIs, over which developers can build and extend Kubernetes. This includes input validation, a consistent API structure, clean semantics, and improved diagnosability of the system. +||||||| merged common ancestors ## APIs and core features 1. Consistent v1 API - Status: DONE. [v1beta3](http://kubernetesio.blogspot.com/2015/04/introducing-kubernetes-v1beta3.html) was developed as the release candidate for the v1 API. @@ -68,38 +73,22 @@ clustered database or key-value store. We will target such workloads for our 8. API test coverage more than 85% in e2e tests - Status: -## Project -1. Define a deprecation policy for expiring and removing features and interfaces, including the time non-beta APIs will be supported - - Status: -2. Define a version numbering policy regarding point upgrades, support, compat, and release frequency. - - Status: -3. Define an SLO that users can reasonable expect to hit in properly managed clusters - - Status: -4. Accurate and complete API documentation - - Status: -5. Accurate and complete getting-started-guides for supported platforms - - Status: +In addition, we will provide versioning and deprecation policies for the APIs. -## Platforms -1. Possible for cloud partners / vendors to self-qualify Kubernetes on their platform. - - Status: -2. Define the set of platforms that are supported by the core team. - - Status: +## Cluster Environment +Currently, a cluster is a set of nodes (VMs, machines), managed by a master, running a version of Kubernetes. This master is the cluster-level control-plane. For the purpose of running production workloads, members of the cluster must be serviceable and upgradeable. -## Beyond 1.0 +## Micro-services and Resources +For applications / micro-services that run on Kubernetes, we want deployments to be easy but powerful. An Operations user should be able to launch a micro-service, letting the scheduler find the right placement. That micro-service should be able to require “pet storage” resources, fulfilled by external storage and with help from the cluster. We also want to improve the tools, experience for how users can roll-out applications through patterns like canary deployments. -We acknowledge that there are a great many things that are not included in our 1.0 roadmap. We intend to document the plans past 1.0 soon, but some of the things that are clearly in scope include: +## Performance and Reliability +The system should be performant, especially from the perspective of micro-service running on top of the cluster and for Operations users. As part of being production grade, the system should have a measured availability and be resilient to failures, including fatal failures due to hardware. -1. Scalability - more nodes, more pods -2. HA masters -3. Monitoring -4. Authn and authz -5. Enhanced resource management and isolation -6. Better performance -7. Easier plugins and add-ons -8. More support for jobs that complete (compute, batch) -9. More platforms -10. Easier testing +In terms of performance, the objectives include: +- API call return times at 99%tile ([#4521](https://github.com/GoogleCloudPlatform/kubernetes/issues/4521)) +- scale to 100 nodes with 30-50 pods (1-2 containers) per node +- scheduling throughput at the 99%tile ([#3954](https://github.com/GoogleCloudPlatform/kubernetes/issues/3954)) +- startup time at the 99%tile ([#3552](https://github.com/GoogleCloudPlatform/kubernetes/issues/3952)) [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/roadmap.md?pixel)]() diff --git a/docs/services.md b/docs/services.md index 57da3634262..fe6ffa4f808 100644 --- a/docs/services.md +++ b/docs/services.md @@ -31,7 +31,7 @@ that is updated whenever the set of `Pods` in a `Service` changes. For non-native applications, Kubernetes offers a virtual-IP-based bridge to Services which redirects to the backend `Pods`. -## Defining a Service +## Defining a service A `Service` in Kubernetes is a REST object, similar to a `Pod`. Like all of the REST objects, a `Service` definition can be POSTed to the apiserver to create a @@ -43,7 +43,7 @@ port 9376 and carry a label "app=MyApp". "kind": "Service", "apiVersion": "v1beta3", "metadata": { - "name": "my-service", + "name": "my-service" }, "spec": { "selector": { @@ -138,7 +138,7 @@ Accessing a `Service` without a selector works the same as if it had selector. The traffic will be routed to endpoints defined by the user (`1.2.3.4:80` in this example). -## Portals and service proxies +## Virtual IPs and service proxies Every node in a Kubernetes cluster runs a `kube-proxy`. This application watches the Kubernetes master for the addition and removal of `Service` @@ -175,7 +175,7 @@ disambiguated. For example: "kind": "Service", "apiVersion": "v1beta3", "metadata": { - "name": "my-service", + "name": "my-service" }, "spec": { "selector": { @@ -199,20 +199,23 @@ disambiguated. For example: } ``` -## Choosing your own PortalIP address +## Choosing your own IP address -A user can specify their own `PortalIP` address as part of a `Service` creation -request. For example, if they already have an existing DNS entry that they -wish to replace, or legacy systems that are configured for a specific IP -address and difficult to re-configure. The `PortalIP` address that a user -chooses must be a valid IP address and within the portal_net CIDR range that is -specified by flag to the API server. If the PortalIP value is invalid, the -apiserver returns a 422 HTTP status code to indicate that the value is invalid. +A user can specify their own cluster IP address as part of a `Service` creation +request. To do this, set the `spec.clusterIP` field (called `portalIP` in +v1beta3 and earlier APIs). For example, if they already have an existing DNS +entry that they wish to replace, or legacy systems that are configured for a +specific IP address and difficult to re-configure. The IP address that a user +chooses must be a valid IP address and within the service_cluster_ip_range CIDR +range that is specified by flag to the API server. If the IP address value is +invalid, the apiserver returns a 422 HTTP status code to indicate that the +value is invalid. ### Why not use round-robin DNS? A question that pops up every now and then is why we do all this stuff with -portals rather than just use standard round-robin DNS. There are a few reasons: +virtual IPs rather than just use standard round-robin DNS. There are a few +reasons: * There is a long history of DNS libraries not respecting DNS TTLs and caching the results of name lookups. @@ -221,7 +224,7 @@ portals rather than just use standard round-robin DNS. There are a few reasons: client re-resolving DNS over and over would be difficult to manage. We try to discourage users from doing things that hurt themselves. That said, -if enough people ask for this, we may implement it as an alternative to portals. +if enough people ask for this, we may implement it as an alternative. ## Discovering services @@ -238,7 +241,7 @@ and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, where the Service name is upper-cased and dashes are converted to underscores. For example, the Service "redis-master" which exposes TCP port 6379 and has been -allocated portal IP address 10.0.0.11 produces the following environment +allocated cluster IP address 10.0.0.11 produces the following environment variables: ``` @@ -272,24 +275,25 @@ cluster IP. We will soon add DNS support for multi-port `Service`s in the form of SRV records. -## Headless Services +## Headless services -Sometimes you don't need or want a single service IP. In this case, you can -create "headless" services by specifying `"None"` for the `PortalIP`. For such -`Service`s, a cluster IP is not allocated and service-specific environment -variables for `Pod`s are not created. DNS is configured to return multiple A -records (addresses) for the `Service` name, which point directly to the `Pod`s -backing the `Service`. Additionally, the kube proxy does not handle these -services and there is no load balancing or proxying done by the platform for -them. The endpoints controller will still create `Endpoints` records in the -API. +Sometimes you don't need or want load-balancing and a single service IP. In +this case, you can create "headless" services by specifying `"None"` for the +cluster IP (`spec.clusterIP` or `spec.portalIP` in v1beta3 and earlier APIs). +For such `Service`s, a cluster IP is not allocated and service-specific +environment variables for `Pod`s are not created. DNS is configured to return +multiple A records (addresses) for the `Service` name, which point directly to +the `Pod`s backing the `Service`. Additionally, the kube proxy does not handle +these services and there is no load balancing or proxying done by the platform +for them. The endpoints controller will still create `Endpoints` records in +the API. This option allows developers to reduce coupling to the Kubernetes system, if they desire, but leaves them freedom to do discovery in their own way. Applications can still use a self-registration pattern and adapters for other discovery systems could easily be built upon this API. -## External Services +## External services For some parts of your application (e.g. frontends) you may want to expose a Service onto an external (outside of your cluster, maybe public internet) IP @@ -332,7 +336,7 @@ information about the provisioned balancer will be published in the `Service`'s "kind": "Service", "apiVersion": "v1beta3", "metadata": { - "name": "my-service", + "name": "my-service" }, "spec": { "selector": { @@ -366,7 +370,7 @@ though exactly how that works depends on the cloud provider. ## Shortcomings -We expect that using iptables and userspace proxies for portals will work at +We expect that using iptables and userspace proxies for VIPs will work at small to medium scale, but may not scale to very large clusters with thousands of Services. See [the original design proposal for portals](https://github.com/GoogleCloudPlatform/kubernetes/issues/1107) for more @@ -387,7 +391,7 @@ but the current API requires it. In the future we envision that the proxy policy can become more nuanced than simple round robin balancing, for example master elected or sharded. We also envision that some `Services` will have "real" load balancers, in which case the -portal will simply transport the packets there. +VIP will simply transport the packets there. There's a [proposal](https://github.com/GoogleCloudPlatform/kubernetes/issues/3760) to @@ -400,7 +404,7 @@ We intend to have first-class support for L7 (HTTP) `Service`s. We intend to have more flexible ingress modes for `Service`s which encompass the current `ClusterIP`, `NodePort`, and `LoadBalancer` modes and more. -## The gory details of portals +## The gory details of virtual IPs The previous information should be sufficient for many people who just want to use `Services`. However, there is a lot going on behind the scenes that may be @@ -427,26 +431,25 @@ of Kubernetes that used in memory locking) as well as checking for invalid assignments due to administrator intervention and cleaning up any any IPs that were allocated but which no service currently uses. -### IPs and Portals +### IPs and VIPs Unlike `Pod` IP addresses, which actually route to a fixed destination, `Service` IPs are not actually answered by a single host. Instead, we use `iptables` (packet processing logic in Linux) to define virtual IP addresses -which are transparently redirected as needed. We call the tuple of the -`Service` IP and the `Service` port the `portal`. When clients connect to the -`portal`, their traffic is automatically transported to an appropriate -endpoint. The environment variables and DNS for `Services` are actually -populated in terms of the portal IP and port. +which are transparently redirected as needed. When clients connect to the +VIP, their traffic is automatically transported to an appropriate endpoint. +The environment variables and DNS for `Services` are actually populated in +terms of the `Service`'s VIP and port. As an example, consider the image processing application described above. -When the backend `Service` is created, the Kubernetes master assigns a portal +When the backend `Service` is created, the Kubernetes master assigns a virtual IP address, for example 10.0.0.1. Assuming the `Service` port is 1234, the -portal is 10.0.0.1:1234. The master stores that information, which is then -observed by all of the `kube-proxy` instances in the cluster. When a proxy -sees a new portal, it opens a new random port, establishes an iptables redirect -from the portal to this new port, and starts accepting connections on it. +`Service` is observed by all of the `kube-proxy` instances in the cluster. +When a proxy sees a new `Service`, it opens a new random port, establishes an +iptables redirect from the VIP to this new port, and starts accepting +connections on it. -When a client connects to the portal the iptables rule kicks in, and redirects +When a client connects to the VIP the iptables rule kicks in, and redirects the packets to the `Service proxy`'s own port. The `Service proxy` chooses a backend, and starts proxying traffic from the client to the backend. diff --git a/examples/k8petstore/k8petstore.sh b/examples/k8petstore/k8petstore.sh index 612640f21fd..16c1be24a9f 100755 --- a/examples/k8petstore/k8petstore.sh +++ b/examples/k8petstore/k8petstore.sh @@ -16,16 +16,32 @@ echo "WRITING KUBE FILES , will overwrite the jsons, then testing pods. is kube clean ready to go?" + +#Args below can be overriden when calling from cmd line. +#Just send all the args in order. #for dev/test you can use: #kubectl=$GOPATH/src/github.com/GoogleCloudPlatform/kubernetes/cluster/kubectl.sh" kubectl="kubectl" VERSION="r.2.8.19" PUBLIC_IP="10.1.4.89" # ip which we use to access the Web server. -SECONDS=1000 # number of seconds to measure throughput. +_SECONDS=1000 # number of seconds to measure throughput. FE="1" # amount of Web server LG="1" # amount of load generators SLAVE="1" # amount of redis slaves +TEST="1" # 0 = Dont run tests, 1 = Do run tests. +NS="k8petstore" # namespace +kubectl="${1:-$kubectl}" +VERSION="${2:-$VERSION}" +PUBLIC_IP="${3:-$PUBLIC_IP}" # ip which we use to access the Web server. +_SECONDS="${4:-$_SECONDS}" # number of seconds to measure throughput. +FE="${5:-$FE}" # amount of Web server +LG="${6:-$LG}" # amount of load generators +SLAVE="${7:-$SLAVE}" # amount of redis slaves +TEST="${8:-$TEST}" # 0 = Dont run tests, 1 = Do run tests. +NS="${9:-$NS}" # namespace + +echo "Running w/ args: kubectl $kubectl version $VERSION ip $PUBLIC_IP sec $_SECONDS fe $FE lg $LG slave $SLAVE test $TEST NAMESPACE $NS" function create { cat << EOF > fe-rc.json @@ -188,53 +204,70 @@ cat << EOF > slave-rc.json "labels": {"name": "redisslave"} } EOF -$kubectl create -f rm.json --api-version=v1beta1 -$kubectl create -f rm-s.json --api-version=v1beta1 +$kubectl create -f rm.json --api-version=v1beta1 --namespace=$NS +$kubectl create -f rm-s.json --api-version=v1beta1 --namespace=$NS sleep 3 # precaution to prevent fe from spinning up too soon. -$kubectl create -f slave-rc.json --api-version=v1beta1 -$kubectl create -f rs-s.json --api-version=v1beta1 +$kubectl create -f slave-rc.json --api-version=v1beta1 --namespace=$NS +$kubectl create -f rs-s.json --api-version=v1beta1 --namespace=$NS sleep 3 # see above comment. -$kubectl create -f fe-rc.json --api-version=v1beta1 -$kubectl create -f fe-s.json --api-version=v1beta1 -$kubectl create -f bps-load-gen-rc.json --api-version=v1beta1 +$kubectl create -f fe-rc.json --api-version=v1beta1 --namespace=$NS +$kubectl create -f fe-s.json --api-version=v1beta1 --namespace=$NS +$kubectl create -f bps-load-gen-rc.json --api-version=v1beta1 --namespace=$NS } -function test { - pass_http=0 +function pollfor { + pass_http=0 - ### Test HTTP Server comes up. - for i in `seq 1 150`; - do - ### Just testing that the front end comes up. Not sure how to test total entries etc... (yet) - echo "Trying curl ... $i . expect a few failures while pulling images... " - curl "$PUBLIC_IP:3000" > result - cat result - cat result | grep -q "k8-bps" - if [ $? -eq 0 ]; then - echo "TEST PASSED after $i tries !" - i=1000 - break - else - echo "the above RESULT didn't contain target string for trial $i" - fi - sleep 5 - done + ### Test HTTP Server comes up. + for i in `seq 1 150`; + do + ### Just testing that the front end comes up. Not sure how to test total entries etc... (yet) + echo "Trying curl ... $PUBLIC_IP:3000 , attempt $i . expect a few failures while pulling images... " + curl "$PUBLIC_IP:3000" > result + cat result + cat result | grep -q "k8-bps" + if [ $? -eq 0 ]; then + echo "TEST PASSED after $i tries !" + i=1000 + break + else + echo "the above RESULT didn't contain target string for trial $i" + fi + sleep 3 + done - if [ $i -eq 1000 ]; then - pass_http=-1 - fi + if [ $i -eq 1000 ]; then + pass_http=1 + fi + +} - pass_load=0 +function tests { + pass_load=0 - ### Print statistics of db size, every second, until $SECONDS are up. - for i in `seq 1 $SECONDS`; - do - echo "curl : $i" - curl "$PUBLIC_IP:3000/llen" >> result - sleep 1 - done + ### Print statistics of db size, every second, until $SECONDS are up. + for i in `seq 1 $_SECONDS`; + do + echo "curl : $PUBLIC_IP:3000 , $i of $_SECONDS" + curr_cnt="`curl "$PUBLIC_IP:3000/llen"`" + ### Write CSV File of # of trials / total transcations. + echo "$i $curr_cnt" >> result + echo "total transactions so far : $curr_cnt" + sleep 1 + done } create -test +pollfor + +if [[ $pass_http -eq 1 ]]; then + echo "Passed..." +else + exit 1 +fi + +if [[ $TEST -eq 1 ]]; then + echo "running polling tests now" + tests +fi diff --git a/examples/meteor/README.md b/examples/meteor/README.md index 7c9375fdf94..ad08de308a9 100644 --- a/examples/meteor/README.md +++ b/examples/meteor/README.md @@ -136,7 +136,7 @@ _sticky sessions_. With Kubernetes you can scale out your app easily with session affinity. The [`meteor-service.json`](meteor-service.json) file contains `"sessionAffinity": "ClientIP"`, which provides this for us. See the [service -documentation](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#portals-and-service-proxies) +documentation](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#virtual-ips-and-service-proxies) for more information. As mentioned above, the mongo container uses a volume which is mapped diff --git a/examples/mysql-wordpress-pd/README.md b/examples/mysql-wordpress-pd/README.md index 68bf487b15f..23c60983fae 100644 --- a/examples/mysql-wordpress-pd/README.md +++ b/examples/mysql-wordpress-pd/README.md @@ -1,17 +1,14 @@ # Persistent Installation of MySQL and WordPress on Kubernetes -This example describes how to run a persistent installation of [Wordpress](https://wordpress.org/). +This example describes how to run a persistent installation of [Wordpress](https://wordpress.org/) using the [volumes](/docs/volumes.md) feature of Kubernetes, and [Google Compute Engine](https://cloud.google.com/compute/docs/disks) [persistent disks](/docs/volumes.md#gcepersistentdisk). We'll use the [mysql](https://registry.hub.docker.com/_/mysql/) and [wordpress](https://registry.hub.docker.com/_/wordpress/) official [Docker](https://www.docker.com/) images for this installation. (The wordpress image includes an Apache server). -We'll create two Kubernetes [pods](http://docs.k8s.io/pods.md) to run mysql and wordpress, both with associated [persistent disks](https://cloud.google.com/compute/docs/disks), then set up a Kubernetes [service](http://docs.k8s.io/services.md) to front each pod. +We'll create two Kubernetes [pods](http://docs.k8s.io/pods.md) to run mysql and wordpress, both with associated persistent disks, then set up a Kubernetes [service](http://docs.k8s.io/services.md) to front each pod. This example demonstrates several useful things, including: how to set up and use persistent disks with Kubernetes pods; how to define Kubernetes services to leverage docker-links-compatible service environment variables; and use of an external load balancer to expose the wordpress service externally and make it transparent to the user if the wordpress pod moves to a different cluster node. -Some of the example's details, such as the Persistent Disk setup, require that Kubernetes is running on [Google Compute Engine](https://cloud.google.com/compute/). - - ## Install gcloud and start up a Kubernetes cluster First, if you have not already done so, [create](https://cloud.google.com/compute/docs/quickstart) a [Google Cloud Platform](https://cloud.google.com/) project, and install the [gcloud SDK](https://cloud.google.com/sdk/). @@ -41,8 +38,10 @@ curl -sS https://get.k8s.io | bash For this WordPress installation, we're going to configure our Kubernetes [pods](http://docs.k8s.io/pods.md) to use [persistent disks](https://cloud.google.com/compute/docs/disks). This means that we can preserve installation state across pod shutdown and re-startup. +You will need to create the disks in the same [GCE zone](https://cloud.google.com/compute/docs/zones) as the Kubernetes cluster. The `cluster/kube-up.sh` script will create the cluster in the `us-central1-b` zone by default, as seen in the [config-default.sh](/cluster/gce/config-default.sh) file. Replace `$ZONE` below with the appropriate zone. + Before doing anything else, we'll create the persistent disks that we'll use for the installation: one for the mysql pod, and one for the wordpress pod. -The general series of steps required is as described [here](http://docs.k8s.io/volumes.md), where $ZONE is the zone where your cluster is running, and $DISK_SIZE is specified as, e.g. '500GB'. In future, this process will be more streamlined. +The general series of steps required is as described [here](http://docs.k8s.io/volumes.md), where $DISK_SIZE is specified as, e.g. '500GB'. In future, this process will be more streamlined. So for the two disks used in this example, do the following. First create the mysql disk, setting the disk size to meet your needs: diff --git a/examples/openshift-origin/create.sh b/examples/openshift-origin/create.sh index 52c02395deb..6be3ea3a72b 100755 --- a/examples/openshift-origin/create.sh +++ b/examples/openshift-origin/create.sh @@ -23,11 +23,11 @@ cluster/kubectl.sh create -f $OPENSHIFT_EXAMPLE/openshift-service.yaml sleep 30 export PUBLIC_IP=$(cluster/kubectl.sh get services openshift --template="{{ index .spec.publicIPs 0 }}") echo $PUBLIC_IP -export PORTAL_IP=$(cluster/kubectl.sh get services openshift --template="{{ .spec.portalIP }}") -echo $PORTAL_IP +export SVC_IP=$(cluster/kubectl.sh get services openshift --template="{{ .spec.portalIP }}") +echo $SVC_IP docker run --privileged -v ${OPENSHIFT_CONFIG}:/config openshift/origin start master --write-config=/config --kubeconfig=/config/kubeconfig --master=https://localhost:8443 --public-master=https://${PUBLIC_IP}:8443 sudo -E chown ${USER} -R ${OPENSHIFT_CONFIG} docker run -i -t --privileged -e="OPENSHIFTCONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin ex bundle-secret openshift-config -f /config &> ${OPENSHIFT_EXAMPLE}/secret.json cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/secret.json cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/openshift-controller.yaml -cluster/kubectl.sh get pods | grep openshift \ No newline at end of file +cluster/kubectl.sh get pods | grep openshift diff --git a/examples/walkthrough/k8s201.md b/examples/walkthrough/k8s201.md index aa0538f50ff..93f48c653e8 100644 --- a/examples/walkthrough/k8s201.md +++ b/examples/walkthrough/k8s201.md @@ -6,7 +6,7 @@ We'll now cover some slightly more advanced topics in Kubernetes, related to app scaling. ### Labels -Having already learned about Pods and how to create them, you may be struck by an urge to create many, many pods. Please do! But eventually you will need a system to organize these pods into groups. The system for achieving this in Kubernetes is Labels. Labels are key-value pairs that are attached to each API object in Kubernetes. Label selectors can be passed along with a RESTful ```list``` request to the apiserver to retrieve a list of objects which match that label selector. For example: +Having already learned about Pods and how to create them, you may be struck by an urge to create many, many pods. Please do! But eventually you will need a system to organize these pods into groups. The system for achieving this in Kubernetes is Labels. Labels are key-value pairs that are attached to each object in Kubernetes. Label selectors can be passed along with a RESTful ```list``` request to the apiserver to retrieve a list of objects which match that label selector. For example: ```sh cluster/kubectl.sh get pods -l name=nginx @@ -18,7 +18,7 @@ Lists all pods who name label matches 'nginx'. Labels are discussed in detail [ OK, now you have an awesome, multi-container, labelled pod and you want to use it to build an application, you might be tempted to just start building a whole bunch of individual pods, but if you do that, a whole host of operational concerns pop up. For example: how will you scale the number of pods up or down and how will you ensure that all pods are homogenous? -Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single API object. The replication controller also contains a label selector that identifies the set of objects managed by the replication controller. The replication controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods. The design of replication controllers is discussed in detail [elsewhere](http://docs.k8s.io/replication-controller.md). +Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single Kubernetes object. The replication controller also contains a label selector that identifies the set of objects managed by the replication controller. The replication controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods. The design of replication controllers is discussed in detail [elsewhere](http://docs.k8s.io/replication-controller.md). An example replication controller that instantiates two pods running nginx looks like: ```yaml @@ -49,7 +49,7 @@ spec: ``` ### Services -Once you have a replicated set of pods, you need an abstraction that enables connectivity between the layers of your application. For example, if you have a replication controller managing your backend jobs, you don't want to have to reconfigure your front-ends whenever you re-scale your backends. Likewise, if the pods in your backends are scheduled (or rescheduled) onto different machines, you can't be required to re-configure your front-ends. In Kubernetes the Service API object achieves these goals. A Service basically combines an IP address and a label selector together to form a simple, static rallying point for connecting to a micro-service in your application. +Once you have a replicated set of pods, you need an abstraction that enables connectivity between the layers of your application. For example, if you have a replication controller managing your backend jobs, you don't want to have to reconfigure your front-ends whenever you re-scale your backends. Likewise, if the pods in your backends are scheduled (or rescheduled) onto different machines, you can't be required to re-configure your front-ends. In Kubernetes, the Service object achieves these goals. A Service basically combines an IP address and a label selector together to form a simple, static rallying point for connecting to a micro-service in your application. For example, here is a service that balances across the pods created in the previous nginx replication controller example: ```yaml diff --git a/hack/e2e-suite/services.sh b/hack/e2e-suite/services.sh index 10617f268de..ebe644a2ce0 100755 --- a/hack/e2e-suite/services.sh +++ b/hack/e2e-suite/services.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Verifies that services and portals work. +# Verifies that services and virtual IPs work. set -o errexit set -o nounset @@ -285,10 +285,10 @@ function verify_from_container() { fi done '")) \ - || error "testing $1 portal from container failed" + || error "testing $1 VIP from container failed" found_pods=$(sort_args "${results[@]}") if [[ "${found_pods}" != "$5" ]]; then - error -e "$1 portal failed from container, expected:\n + error -e "$1 VIP failed from container, expected:\n $(printf '\t%s\n' $5)\n got:\n $(printf '\t%s\n' ${found_pods}) @@ -323,20 +323,20 @@ wait_for_pods "${svc2_name}" "${svc2_count}" svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}") svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}") -# Get the portal IPs. +# Get the VIP IPs. svc1_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc1_name}" --api-version=v1beta3) test -n "${svc1_ip}" || error "Service1 IP is blank" svc2_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc2_name}" --api-version=v1beta3) test -n "${svc2_ip}" || error "Service2 IP is blank" if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then - error "Portal IPs conflict: ${svc1_ip}" + error "VIPs conflict: ${svc1_ip}" fi # -# Test 1: Prove that the service portal is alive. +# Test 1: Prove that the service VIP is alive. # -echo "Test 1: Prove that the service portal is alive." -echo "Verifying the portals from the host" +echo "Test 1: Prove that the service VIP is alive." +echo "Verifying the VIP from the host" wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ "${svc1_count}" "${svc1_pods}" for ip in ${svc1_publics}; do @@ -345,7 +345,7 @@ for ip in ${svc1_publics}; do done wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ "${svc2_count}" "${svc2_pods}" -echo "Verifying the portals from a container" +echo "Verifying the VIP from a container" verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ "${svc1_count}" "${svc1_pods}" for ip in ${svc1_publics}; do @@ -356,17 +356,17 @@ verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ "${svc2_count}" "${svc2_pods}" # -# Test 2: Bounce the proxy and make sure the portal comes back. +# Test 2: Bounce the proxy and make sure the VIP comes back. # -echo "Test 2: Bounce the proxy and make sure the portal comes back." +echo "Test 2: Bounce the proxy and make sure the VIP comes back." echo "Restarting kube-proxy" restart-kube-proxy "${test_node}" -echo "Verifying the portals from the host" +echo "Verifying the VIP from the host" wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ "${svc1_count}" "${svc1_pods}" wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ "${svc2_count}" "${svc2_pods}" -echo "Verifying the portals from a container" +echo "Verifying the VIP from a container" verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ "${svc1_count}" "${svc1_pods}" verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ @@ -395,14 +395,14 @@ wait_for_pods "${svc3_name}" "${svc3_count}" # Get the sorted lists of pods. svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}") -# Get the portal IP. +# Get the VIP. svc3_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc3_name}" --api-version=v1beta3) test -n "${svc3_ip}" || error "Service3 IP is blank" -echo "Verifying the portals from the host" +echo "Verifying the VIPs from the host" wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ "${svc3_count}" "${svc3_pods}" -echo "Verifying the portals from a container" +echo "Verifying the VIPs from a container" verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ "${svc3_count}" "${svc3_pods}" @@ -415,31 +415,31 @@ echo "Manually removing iptables rules" ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-HOST || true" ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true" ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PROXY || true" -echo "Verifying the portals from the host" +echo "Verifying the VIPs from the host" wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ "${svc3_count}" "${svc3_pods}" -echo "Verifying the portals from a container" +echo "Verifying the VIPs from a container" verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ "${svc3_count}" "${svc3_pods}" # -# Test 6: Restart the master, make sure portals come back. +# Test 6: Restart the master, make sure VIPs come back. # -echo "Test 6: Restart the master, make sure portals come back." +echo "Test 6: Restart the master, make sure VIPs come back." echo "Restarting the master" restart-apiserver "${master}" sleep 5 -echo "Verifying the portals from the host" +echo "Verifying the VIPs from the host" wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ "${svc3_count}" "${svc3_pods}" -echo "Verifying the portals from a container" +echo "Verifying the VIPs from a container" verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ "${svc3_count}" "${svc3_pods}" # -# Test 7: Bring up another service, make sure it does not re-use Portal IPs. +# Test 7: Bring up another service, make sure it does not re-use IPs. # -echo "Test 7: Bring up another service, make sure it does not re-use Portal IPs." +echo "Test 7: Bring up another service, make sure it does not re-use IPs." svc4_name="service4" svc4_port=80 svc4_count=3 @@ -451,17 +451,17 @@ wait_for_pods "${svc4_name}" "${svc4_count}" # Get the sorted lists of pods. svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}") -# Get the portal IP. +# Get the VIP. svc4_ip=$(${KUBECTL} get services -o template '--template={{.spec.portalIP}}' "${svc4_name}" --api-version=v1beta3) test -n "${svc4_ip}" || error "Service4 IP is blank" if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then - error "Portal IPs conflict: ${svc4_ip}" + error "VIPs conflict: ${svc4_ip}" fi -echo "Verifying the portals from the host" +echo "Verifying the VIPs from the host" wait_for_service_up "${svc4_name}" "${svc4_ip}" "${svc4_port}" \ "${svc4_count}" "${svc4_pods}" -echo "Verifying the portals from a container" +echo "Verifying the VIPs from a container" verify_from_container "${svc4_name}" "${svc4_ip}" "${svc4_port}" \ "${svc4_count}" "${svc4_pods}" diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index f642a328cce..ad9367cf798 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -204,7 +204,9 @@ fi cd kubernetes # Have cmd/e2e run by goe2e.sh generate JUnit report in ${WORKSPACE}/junit*.xml -export E2E_REPORT_DIR=${WORKSPACE} +ARTIFACTS=${WORKSPACE}/_artifacts +mkdir -p ${ARTIFACTS} +export E2E_REPORT_DIR=${ARTIFACTS} ### Set up ### if [[ "${E2E_UP,,}" == "true" ]]; then @@ -220,6 +222,13 @@ if [[ "${E2E_TEST,,}" == "true" ]]; then go run ./hack/e2e.go ${E2E_OPT} -v --test --test_args="${GINKGO_TEST_ARGS}--ginkgo.noColor" || true fi +# TODO(zml): We have a bunch of legacy Jenkins configs that are +# expecting junit*.xml to be in ${WORKSPACE} root and it's Friday +# afternoon, so just put the junit report where it's expected. +for junit in ${ARTIFACTS}/junit*.xml; do + ln -s ${junit} ${WORKSPACE} +done + ### Clean up ### if [[ "${E2E_DOWN,,}" == "true" ]]; then # Sleep before deleting the cluster to give the controller manager time to diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index ff86bb0ca53..48b024bbe79 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -166,7 +166,7 @@ sudo -E "${GO_OUT}/kube-apiserver" \ --port="${API_PORT}" \ --runtime_config=api/v1beta3 \ --etcd_servers="http://127.0.0.1:4001" \ - --portal_net="10.0.0.0/24" \ + --service-cluster-ip-range="10.0.0.0/24" \ --cors_allowed_origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 & APISERVER_PID=$! diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 4ed8822289e..6a81355a00e 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -94,7 +94,7 @@ kube::log::status "Starting kube-apiserver" --kubelet_port=${KUBELET_PORT} \ --runtime_config=api/v1beta3 \ --cert_dir="${TMPDIR:-/tmp/}" \ - --portal_net="10.0.0.0/24" 1>&2 & + --service-cluster-ip-range="10.0.0.0/24" 1>&2 & APISERVER_PID=$! kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver: " @@ -171,7 +171,7 @@ for version in "${kube_api_versions[@]}"; do kube::test::get_object_assert 'pod/valid-pod' "{{$id_field}}" 'valid-pod' kube::test::get_object_assert 'pods/valid-pod' "{{$id_field}}" 'valid-pod' # Describe command should print detailed information - kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image(s):" "Host:" "Labels:" "Status:" "Replication Controllers" + kube::test::describe_object_assert pods 'valid-pod' "Name:" "Image(s):" "Node:" "Labels:" "Status:" "Replication Controllers" ### Dump current valid-pod POD output_pod=$(kubectl get pod valid-pod -o yaml --output-version=v1beta3 "${kube_flags[@]}") diff --git a/hack/test-go.sh b/hack/test-go.sh index 507ce1ce07f..5571ea619b3 100755 --- a/hack/test-go.sh +++ b/hack/test-go.sh @@ -32,7 +32,6 @@ kube::test::find_dirs() { -o -wholename './_output' \ -o -wholename './release' \ -o -wholename './target' \ - -o -wholename '*/third_party/*' \ -o -wholename '*/Godeps/*' \ -o -wholename '*/contrib/podex/*' \ -o -wholename '*/test/e2e/*' \ diff --git a/hack/test-update-storage-objects.sh b/hack/test-update-storage-objects.sh index a595c69816d..90f21820960 100755 --- a/hack/test-update-storage-objects.sh +++ b/hack/test-update-storage-objects.sh @@ -53,7 +53,7 @@ function startApiServer() { --kubelet_port=${KUBELET_PORT} \ --runtime_config="${RUNTIME_CONFIG}" \ --cert_dir="${TMPDIR:-/tmp/}" \ - --portal_net="10.0.0.0/24" 1>&2 & + --service-cluster-ip-range="10.0.0.0/24" 1>&2 & APISERVER_PID=$! kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver: " diff --git a/hack/update-generated-deep-copies.sh b/hack/update-generated-deep-copies.sh index 051f481444b..3b2e9a2415f 100755 --- a/hack/update-generated-deep-copies.sh +++ b/hack/update-generated-deep-copies.sh @@ -18,6 +18,15 @@ set -o errexit set -o nounset set -o pipefail +function result_file_name() { + local version=$1 + if [ "${version}" == "api" ]; then + echo "pkg/api/deep_copy_generated.go" + else + echo "pkg/api/${version}/deep_copy_generated.go" + fi +} + function generate_version() { local version=$1 local TMPFILE="/tmp/deep_copy_generated.$(date +%s).go" @@ -38,14 +47,14 @@ EOF EOF gofmt -w -s $TMPFILE - if [ "${version}" == "api" ]; then - mv $TMPFILE pkg/api/deep_copy_generated.go - else - mv $TMPFILE pkg/api/${version}/deep_copy_generated.go - fi + mv $TMPFILE `result_file_name ${version}` } VERSIONS="api v1beta3 v1" +# To avoid compile errors, remove the currently existing files. +for ver in $VERSIONS; do + rm -f `result_file_name ${ver}` +done for ver in $VERSIONS; do generate_version "${ver}" done diff --git a/hack/update-swagger-spec.sh b/hack/update-swagger-spec.sh index 1fc4e5abc11..d1699accf9a 100755 --- a/hack/update-swagger-spec.sh +++ b/hack/update-swagger-spec.sh @@ -54,7 +54,7 @@ kube::log::status "Starting kube-apiserver" --public_address_override="127.0.0.1" \ --kubelet_port=${KUBELET_PORT} \ --runtime_config=api/v1beta3 \ - --portal_net="10.0.0.0/24" 1>&2 & + --service-cluster-ip-range="10.0.0.0/24" 1>&2 & APISERVER_PID=$! kube::util::wait_for_url "http://127.0.0.1:${API_PORT}/healthz" "apiserver: " diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index 97bad4c3165..cc3202a19d3 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -305,13 +305,13 @@ func deepCopy_api_ContainerState(in ContainerState, out *ContainerState, c *conv } else { out.Running = nil } - if in.Termination != nil { - out.Termination = new(ContainerStateTerminated) - if err := deepCopy_api_ContainerStateTerminated(*in.Termination, out.Termination, c); err != nil { + if in.Terminated != nil { + out.Terminated = new(ContainerStateTerminated) + if err := deepCopy_api_ContainerStateTerminated(*in.Terminated, out.Terminated, c); err != nil { return err } } else { - out.Termination = nil + out.Terminated = nil } return nil } @@ -921,6 +921,7 @@ func deepCopy_api_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) err func deepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error { out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } @@ -1374,7 +1375,7 @@ func deepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error out.NodeSelector = nil } out.ServiceAccount = in.ServiceAccount - out.Host = in.Host + out.NodeName = in.NodeName out.HostNetwork = in.HostNetwork if in.ImagePullSecrets != nil { out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) @@ -1581,14 +1582,6 @@ func deepCopy_api_ReplicationControllerSpec(in ReplicationControllerSpec, out *R } else { out.Selector = nil } - if in.TemplateRef != nil { - out.TemplateRef = new(ObjectReference) - if err := deepCopy_api_ObjectReference(*in.TemplateRef, out.TemplateRef, c); err != nil { - return err - } - } else { - out.TemplateRef = nil - } if in.Template != nil { out.Template = new(PodTemplateSpec) if err := deepCopy_api_PodTemplateSpec(*in.Template, out.Template, c); err != nil { @@ -1927,7 +1920,7 @@ func deepCopy_api_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Cl } else { out.Selector = nil } - out.PortalIP = in.PortalIP + out.ClusterIP = in.ClusterIP out.Type = in.Type if in.DeprecatedPublicIPs != nil { out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) diff --git a/pkg/api/helpers.go b/pkg/api/helpers.go index c85ac6b400c..26affb3c51e 100644 --- a/pkg/api/helpers.go +++ b/pkg/api/helpers.go @@ -99,15 +99,15 @@ func NewDeleteOptions(grace int64) *DeleteOptions { return &DeleteOptions{GracePeriodSeconds: &grace} } -// this function aims to check if the service portal IP is set or not +// this function aims to check if the service's ClusterIP is set or not // the objective is not to perform validation here func IsServiceIPSet(service *Service) bool { - return service.Spec.PortalIP != PortalIPNone && service.Spec.PortalIP != "" + return service.Spec.ClusterIP != ClusterIPNone && service.Spec.ClusterIP != "" } -// this function aims to check if the service portal IP is requested or not +// this function aims to check if the service's cluster IP is requested or not func IsServiceIPRequested(service *Service) bool { - return service.Spec.PortalIP == "" + return service.Spec.ClusterIP == "" } var standardFinalizers = util.NewStringSet( diff --git a/pkg/api/node_example.json b/pkg/api/node_example.json index 8574c731a7b..81250f28cd3 100644 --- a/pkg/api/node_example.json +++ b/pkg/api/node_example.json @@ -3,7 +3,7 @@ "apiVersion": "v1beta3", "metadata": { "name": "e2e-test-wojtekt-minion-etd6", - "selfLink": "/api/v1beta1/nodes/e2e-test-wojtekt-minion-etd6", + "selfLink": "/api/v1beta3/nodes/e2e-test-wojtekt-minion-etd6", "uid": "a7e89222-e8e5-11e4-8fde-42010af09327", "resourceVersion": "379", "creationTimestamp": "2015-04-22T11:49:39Z" diff --git a/pkg/api/pod_example.json b/pkg/api/pod_example.json index 0bfa6adab9e..4acd9c5da14 100644 --- a/pkg/api/pod_example.json +++ b/pkg/api/pod_example.json @@ -4,7 +4,7 @@ "metadata": { "name": "etcd-server-e2e-test-wojtekt-master", "namespace": "default", - "selfLink": "/api/v1beta1/pods/etcd-server-e2e-test-wojtekt-master?namespace=default", + "selfLink": "/api/v1beta3/namespaces/default/pods/etcd-server-e2e-test-wojtekt-master", "uid": "a671734a-e8e5-11e4-8fde-42010af09327", "resourceVersion": "22", "creationTimestamp": "2015-04-22T11:49:36Z", diff --git a/pkg/api/replication_controller_example.json b/pkg/api/replication_controller_example.json index 8d2b3594ba9..9ddc8f21e3b 100644 --- a/pkg/api/replication_controller_example.json +++ b/pkg/api/replication_controller_example.json @@ -4,7 +4,7 @@ "metadata": { "name": "elasticsearch-logging-controller", "namespace": "default", - "selfLink": "/api/v1beta1/replicationControllers/elasticsearch-logging-controller?namespace=default", + "selfLink": "/api/v1beta3/namespaces/default/replicationcontrollers/elasticsearch-logging-controller", "uid": "aa76f162-e8e5-11e4-8fde-42010af09327", "resourceVersion": "98", "creationTimestamp": "2015-04-22T11:49:43Z", diff --git a/pkg/api/rest/update_test.go b/pkg/api/rest/update_test.go index 049a2a250a4..6c85eb87f80 100644 --- a/pkg/api/rest/update_test.go +++ b/pkg/api/rest/update_test.go @@ -77,10 +77,10 @@ func TestBeforeUpdate(t *testing.T) { expectErr: true, }, { - name: "change portal IP", + name: "change ClusterIP", tweakSvc: func(oldSvc, newSvc *api.Service) { - oldSvc.Spec.PortalIP = "1.2.3.4" - newSvc.Spec.PortalIP = "4.3.2.1" + oldSvc.Spec.ClusterIP = "1.2.3.4" + newSvc.Spec.ClusterIP = "4.3.2.1" }, expectErr: true, }, diff --git a/pkg/api/testing/fuzzer.go b/pkg/api/testing/fuzzer.go index 6e076a2d727..f60d68c4cbf 100644 --- a/pkg/api/testing/fuzzer.go +++ b/pkg/api/testing/fuzzer.go @@ -106,8 +106,8 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer { j.Target.Name = c.RandString() }, func(j *api.ReplicationControllerSpec, c fuzz.Continue) { - c.FuzzNoCustom(j) // fuzz self without calling this function again - j.TemplateRef = nil // this is required for round trip + c.FuzzNoCustom(j) // fuzz self without calling this function again + //j.TemplateRef = nil // this is required for round trip }, func(j *api.ReplicationControllerStatus, c fuzz.Continue) { // only replicas round trips diff --git a/pkg/api/types.go b/pkg/api/types.go index 5504ba2f213..a016ff25a94 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -740,9 +740,9 @@ type ContainerStateTerminated struct { // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. type ContainerState struct { - Waiting *ContainerStateWaiting `json:"waiting,omitempty"` - Running *ContainerStateRunning `json:"running,omitempty"` - Termination *ContainerStateTerminated `json:"termination,omitempty"` + Waiting *ContainerStateWaiting `json:"waiting,omitempty"` + Running *ContainerStateRunning `json:"running,omitempty"` + Terminated *ContainerStateTerminated `json:"terminated,omitempty"` } type ContainerStatus struct { @@ -868,10 +868,10 @@ type PodSpec struct { // The pod will be allowed to use secrets referenced by the ServiceAccount ServiceAccount string `json:"serviceAccount"` - // Host is a request to schedule this pod onto a specific host. If it is non-empty, - // the the scheduler simply schedules this pod onto that host, assuming that it fits - // resource requirements. - Host string `json:"host,omitempty"` + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + NodeName string `json:"nodeName,omitempty"` // Uses the host's network namespace. If this option is set, the ports that will be // used must be specified. // Optional: Default to false. @@ -966,7 +966,7 @@ type ReplicationControllerSpec struct { // TemplateRef is a reference to an object that describes the pod that will be created if // insufficient replicas are detected. This reference is ignored if a Template is set. // Must be set before converting to a v1beta3 API object - TemplateRef *ObjectReference `json:"templateRef,omitempty"` + //TemplateRef *ObjectReference `json:"templateRef,omitempty"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. Internally, this takes precedence over a @@ -1004,9 +1004,9 @@ type ReplicationControllerList struct { } const ( - // PortalIPNone - do not assign a portal IP + // ClusterIPNone - do not assign a cluster IP // no proxying required and no environment variables should be created for pods - PortalIPNone = "None" + ClusterIPNone = "None" ) // ServiceList holds a list of services. @@ -1033,7 +1033,7 @@ type ServiceType string const ( // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the portal IP. + // cluster, via the ClusterIP. ServiceTypeClusterIP ServiceType = "ClusterIP" // ServiceTypeNodePort means a service will be exposed on one port of @@ -1082,12 +1082,12 @@ type ServiceSpec struct { // those endpoints. Selector map[string]string `json:"selector"` - // PortalIP is usually assigned by the master. If specified by the user + // ClusterIP is usually assigned by the master. If specified by the user // we will try to respect it or else fail the request. This field can // not be changed by updates. // Valid values are None, empty string (""), or a valid IP address // None can be specified for headless services when proxying is not required - PortalIP string `json:"portalIP,omitempty"` + ClusterIP string `json:"clusterIP,omitempty"` // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer Type ServiceType `json:"type,omitempty"` @@ -1241,6 +1241,10 @@ type NodeSpec struct { // External ID of the node assigned by some machine database (e.g. a cloud provider) ExternalID string `json:"externalID,omitempty"` + // ID of the node assigned by the cloud provider + // Note: format is "://" + ProviderID string `json:"providerID,omitempty"` + // Unschedulable controls node schedulability of new pods. By default node is schedulable. Unschedulable bool `json:"unschedulable,omitempty"` } diff --git a/pkg/api/v1/conversion.go b/pkg/api/v1/conversion.go index bde80f7790e..5240dde5456 100644 --- a/pkg/api/v1/conversion.go +++ b/pkg/api/v1/conversion.go @@ -193,14 +193,14 @@ func convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *a } else { out.Selector = nil } - if in.TemplateRef != nil { - out.TemplateRef = new(ObjectReference) - if err := convert_api_ObjectReference_To_v1_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { - return err - } - } else { - out.TemplateRef = nil - } + //if in.TemplateRef != nil { + // out.TemplateRef = new(ObjectReference) + // if err := convert_api_ObjectReference_To_v1_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { + // return err + // } + //} else { + // out.TemplateRef = nil + //} if in.Template != nil { out.Template = new(PodTemplateSpec) if err := convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { @@ -225,14 +225,14 @@ func convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *R } else { out.Selector = nil } - if in.TemplateRef != nil { - out.TemplateRef = new(api.ObjectReference) - if err := convert_v1_ObjectReference_To_api_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { - return err - } - } else { - out.TemplateRef = nil - } + //if in.TemplateRef != nil { + // out.TemplateRef = new(api.ObjectReference) + // if err := convert_v1_ObjectReference_To_api_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { + // return err + // } + //} else { + // out.TemplateRef = nil + //} if in.Template != nil { out.Template = new(api.PodTemplateSpec) if err := convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index da1de85a2c0..2ada8393a58 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -257,13 +257,13 @@ func convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out } else { out.Running = nil } - if in.Termination != nil { - out.Termination = new(ContainerStateTerminated) - if err := convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in.Termination, out.Termination, s); err != nil { + if in.Terminated != nil { + out.Terminated = new(ContainerStateTerminated) + if err := convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in.Terminated, out.Terminated, s); err != nil { return err } } else { - out.Termination = nil + out.Terminated = nil } return nil } @@ -995,6 +995,7 @@ func convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *NodeSpec, s conv } out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } @@ -1513,7 +1514,7 @@ func convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi out.NodeSelector = nil } out.ServiceAccount = in.ServiceAccount - out.Host = in.Host + out.NodeName = in.NodeName out.HostNetwork = in.HostNetwork if in.ImagePullSecrets != nil { out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) @@ -2115,7 +2116,7 @@ func convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *Service } else { out.Selector = nil } - out.PortalIP = in.PortalIP + out.ClusterIP = in.ClusterIP out.Type = ServiceType(in.Type) if in.DeprecatedPublicIPs != nil { out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) @@ -2531,13 +2532,13 @@ func convert_v1_ContainerState_To_api_ContainerState(in *ContainerState, out *ap } else { out.Running = nil } - if in.Termination != nil { - out.Termination = new(api.ContainerStateTerminated) - if err := convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in.Termination, out.Termination, s); err != nil { + if in.Terminated != nil { + out.Terminated = new(api.ContainerStateTerminated) + if err := convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in.Terminated, out.Terminated, s); err != nil { return err } } else { - out.Termination = nil + out.Terminated = nil } return nil } @@ -3269,6 +3270,7 @@ func convert_v1_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s conv } out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } @@ -3787,7 +3789,7 @@ func convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversi out.NodeSelector = nil } out.ServiceAccount = in.ServiceAccount - out.Host = in.Host + out.NodeName = in.NodeName out.HostNetwork = in.HostNetwork if in.ImagePullSecrets != nil { out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) @@ -4389,7 +4391,7 @@ func convert_v1_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Service } else { out.Selector = nil } - out.PortalIP = in.PortalIP + out.ClusterIP = in.ClusterIP out.Type = api.ServiceType(in.Type) if in.DeprecatedPublicIPs != nil { out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index c365033e4ba..d724895bf8c 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -246,13 +246,13 @@ func deepCopy_v1_ContainerState(in ContainerState, out *ContainerState, c *conve } else { out.Running = nil } - if in.Termination != nil { - out.Termination = new(ContainerStateTerminated) - if err := deepCopy_v1_ContainerStateTerminated(*in.Termination, out.Termination, c); err != nil { + if in.Terminated != nil { + out.Terminated = new(ContainerStateTerminated) + if err := deepCopy_v1_ContainerStateTerminated(*in.Terminated, out.Terminated, c); err != nil { return err } } else { - out.Termination = nil + out.Terminated = nil } return nil } @@ -852,6 +852,7 @@ func deepCopy_v1_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) erro func deepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error { out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } @@ -1305,7 +1306,7 @@ func deepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error { out.NodeSelector = nil } out.ServiceAccount = in.ServiceAccount - out.Host = in.Host + out.NodeName = in.NodeName out.HostNetwork = in.HostNetwork if in.ImagePullSecrets != nil { out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) @@ -1517,14 +1518,6 @@ func deepCopy_v1_ReplicationControllerSpec(in ReplicationControllerSpec, out *Re } else { out.Selector = nil } - if in.TemplateRef != nil { - out.TemplateRef = new(ObjectReference) - if err := deepCopy_v1_ObjectReference(*in.TemplateRef, out.TemplateRef, c); err != nil { - return err - } - } else { - out.TemplateRef = nil - } if in.Template != nil { out.Template = new(PodTemplateSpec) if err := deepCopy_v1_PodTemplateSpec(*in.Template, out.Template, c); err != nil { @@ -1863,7 +1856,7 @@ func deepCopy_v1_ServiceSpec(in ServiceSpec, out *ServiceSpec, c *conversion.Clo } else { out.Selector = nil } - out.PortalIP = in.PortalIP + out.ClusterIP = in.ClusterIP out.Type = in.Type if in.DeprecatedPublicIPs != nil { out.DeprecatedPublicIPs = make([]string, len(in.DeprecatedPublicIPs)) diff --git a/pkg/api/v1/defaults_test.go b/pkg/api/v1/defaults_test.go index c13c2f7fc05..a359ee0de10 100644 --- a/pkg/api/v1/defaults_test.go +++ b/pkg/api/v1/defaults_test.go @@ -397,6 +397,9 @@ func TestSetDefaultNodeExternalID(t *testing.T) { if n2.Spec.ExternalID != name { t.Errorf("Expected default External ID: %s, got: %s", name, n2.Spec.ExternalID) } + if n2.Spec.ProviderID != "" { + t.Errorf("Expected empty default Cloud Provider ID, got: %s", n2.Spec.ProviderID) + } } func TestSetDefaultObjectFieldSelectorAPIVersion(t *testing.T) { diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 18855dd86e5..40e56941c7a 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -750,9 +750,9 @@ type ContainerStateTerminated struct { // Only one of its members may be specified. // If none of them is specified, the default one is ContainerStateWaiting. type ContainerState struct { - Waiting *ContainerStateWaiting `json:"waiting,omitempty" description:"details about a waiting container"` - Running *ContainerStateRunning `json:"running,omitempty" description:"details about a running container"` - Termination *ContainerStateTerminated `json:"termination,omitempty" description:"details about a terminated container"` + Waiting *ContainerStateWaiting `json:"waiting,omitempty" description:"details about a waiting container"` + Running *ContainerStateRunning `json:"running,omitempty" description:"details about a running container"` + Terminated *ContainerStateTerminated `json:"terminated,omitempty" description:"details about a terminated container"` } type ContainerStatus struct { @@ -862,10 +862,10 @@ type PodSpec struct { // ServiceAccount is the name of the ServiceAccount to use to run this pod ServiceAccount string `json:"serviceAccount,omitempty" description:"name of the ServiceAccount to use to run this pod"` - // Host is a request to schedule this pod onto a specific host. If it is non-empty, - // the the scheduler simply schedules this pod onto that host, assuming that it fits - // resource requirements. - Host string `json:"host,omitempty" description:"host requested for this pod"` + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + NodeName string `json:"nodeName,omitempty" description:"node requested for this pod"` // Uses the host's network namespace. If this option is set, the ports that will be // used must be specified. // Optional: Default to false. @@ -962,7 +962,7 @@ type ReplicationControllerSpec struct { // TemplateRef is a reference to an object that describes the pod that will be created if // insufficient replicas are detected. - TemplateRef *ObjectReference `json:"templateRef,omitempty" description:"reference to an object that describes the pod that will be created if insufficient replicas are detected"` + //TemplateRef *ObjectReference `json:"templateRef,omitempty" description:"reference to an object that describes the pod that will be created if insufficient replicas are detected"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a @@ -1015,7 +1015,7 @@ type ServiceType string const ( // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the portal IP. + // cluster, via the cluster IP. ServiceTypeClusterIP ServiceType = "ClusterIP" // ServiceTypeNodePort means a service will be exposed on one port of @@ -1062,12 +1062,12 @@ type ServiceSpec struct { // This service will route traffic to pods having labels matching this selector. If null, no endpoints will be automatically created. If empty, all pods will be selected. Selector map[string]string `json:"selector,omitempty" description:"label keys and values that must match in order to receive traffic for this service; if empty, all pods are selected, if not specified, endpoints must be manually specified"` - // PortalIP is usually assigned by the master. If specified by the user + // ClusterIP is usually assigned by the master. If specified by the user // we will try to respect it or else fail the request. This field can // not be changed by updates. // Valid values are None, empty string (""), or a valid IP address // None can be specified for headless services when proxying is not required - PortalIP string `json:"portalIP,omitempty description: IP address of the service; usually assigned by the system; if specified, it will be allocated to the service if unused, and creation of the service will fail otherwise; cannot be updated; 'None' can be specified for a headless service when proxying is not required"` + ClusterIP string `json:"clusterIP,omitempty description: IP address of the service; usually assigned by the system; if specified, it will be allocated to the service if unused, and creation of the service will fail otherwise; cannot be updated; 'None' can be specified for a headless service when proxying is not required"` // Type determines how the service will be exposed. Valid options: ClusterIP, NodePort, LoadBalancer Type ServiceType `json:"type,omitempty" description:"type of this service; must be ClusterIP, NodePort, or LoadBalancer; defaults to ClusterIP"` @@ -1120,9 +1120,9 @@ type Service struct { } const ( - // PortalIPNone - do not assign a portal IP + // ClusterIPNone - do not assign a cluster IP // no proxying required and no environment variables should be created for pods - PortalIPNone = "None" + ClusterIPNone = "None" ) // ServiceList holds a list of services. @@ -1229,7 +1229,9 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node PodCIDR string `json:"podCIDR,omitempty" description:"pod IP range assigned to the node"` // External ID of the node assigned by some machine database (e.g. a cloud provider) - ExternalID string `json:"externalID,omitempty" description:"external ID assigned to the node by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + ExternalID string `json:"externalID,omitempty" description:"deprecated. External ID assigned to the node by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + // ID of the node assigned by the cloud provider + ProviderID string `json:"providerID,omitempty" description:"ID of the node assigned by the cloud provider in the format: ://"` // Unschedulable controls node schedulability of new pods. By default node is schedulable. Unschedulable bool `json:"unschedulable,omitempty" description:"disable pod scheduling on the node"` } diff --git a/pkg/api/v1beta1/conversion.go b/pkg/api/v1beta1/conversion.go index c1186c42b51..f8b38c8cf52 100644 --- a/pkg/api/v1beta1/conversion.go +++ b/pkg/api/v1beta1/conversion.go @@ -226,14 +226,14 @@ func addConversionFuncs() { if err := s.Convert(&in, &out.Manifest, 0); err != nil { return err } - out.Host = in.Host + out.Host = in.NodeName return nil }, func(in *PodState, out *api.PodSpec, s conversion.Scope) error { if err := s.Convert(&in.Manifest, &out, 0); err != nil { return err } - out.Host = in.Host + out.NodeName = in.Host return nil }, @@ -375,8 +375,8 @@ func addConversionFuncs() { if err := s.Convert(&in.Spec, &out.DesiredState.Manifest, 0); err != nil { return err } - out.DesiredState.Host = in.Spec.Host - out.CurrentState.Host = in.Spec.Host + out.DesiredState.Host = in.Spec.NodeName + out.CurrentState.Host = in.Spec.NodeName out.ServiceAccount = in.Spec.ServiceAccount if err := s.Convert(&in.Status, &out.CurrentState, 0); err != nil { return err @@ -399,7 +399,7 @@ func addConversionFuncs() { if err := s.Convert(&in.DesiredState.Manifest, &out.Spec, 0); err != nil { return err } - out.Spec.Host = in.DesiredState.Host + out.Spec.NodeName = in.DesiredState.Host out.Spec.ServiceAccount = in.ServiceAccount if err := s.Convert(&in.CurrentState, &out.Status, 0); err != nil { return err @@ -474,13 +474,13 @@ func addConversionFuncs() { if err := s.Convert(&in.Selector, &out.ReplicaSelector, 0); err != nil { return err } - if in.TemplateRef != nil && in.Template == nil { - return &api.ConversionError{ - In: in, - Out: out, - Message: "objects with a template ref cannot be converted to older objects, must populate template", - } - } + //if in.TemplateRef != nil && in.Template == nil { + // return &api.ConversionError{ + // In: in, + // Out: out, + // Message: "objects with a template ref cannot be converted to older objects, must populate template", + // } + //} if in.Template != nil { if err := s.Convert(in.Template, &out.PodTemplate, 0); err != nil { return err @@ -504,7 +504,7 @@ func addConversionFuncs() { if err := s.Convert(&in.Spec, &out.DesiredState.Manifest, 0); err != nil { return err } - out.DesiredState.Host = in.Spec.Host + out.DesiredState.Host = in.Spec.NodeName out.ServiceAccount = in.Spec.ServiceAccount if err := s.Convert(&in.Spec.NodeSelector, &out.NodeSelector, 0); err != nil { return err @@ -521,7 +521,7 @@ func addConversionFuncs() { if err := s.Convert(&in.DesiredState.Manifest, &out.Spec, 0); err != nil { return err } - out.Spec.Host = in.DesiredState.Host + out.Spec.NodeName = in.DesiredState.Host out.Spec.ServiceAccount = in.ServiceAccount if err := s.Convert(&in.NodeSelector, &out.Spec.NodeSelector, 0); err != nil { return err @@ -782,7 +782,7 @@ func addConversionFuncs() { return err } out.PublicIPs = in.Spec.DeprecatedPublicIPs - out.PortalIP = in.Spec.PortalIP + out.PortalIP = in.Spec.ClusterIP if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil { return err } @@ -834,7 +834,7 @@ func addConversionFuncs() { return err } out.Spec.DeprecatedPublicIPs = in.PublicIPs - out.Spec.PortalIP = in.PortalIP + out.Spec.ClusterIP = in.PortalIP if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil { return err } @@ -888,6 +888,7 @@ func addConversionFuncs() { } out.PodCIDR = in.Spec.PodCIDR out.ExternalID = in.Spec.ExternalID + out.ProviderID = in.Spec.ProviderID out.Unschedulable = in.Spec.Unschedulable return s.Convert(&in.Status.Capacity, &out.NodeResources.Capacity, 0) }, @@ -920,6 +921,7 @@ func addConversionFuncs() { } out.Spec.PodCIDR = in.PodCIDR out.Spec.ExternalID = in.ExternalID + out.Spec.ProviderID = in.ProviderID out.Spec.Unschedulable = in.Unschedulable return s.Convert(&in.NodeResources.Capacity, &out.Status.Capacity, 0) }, @@ -1630,6 +1632,30 @@ func addConversionFuncs() { out.SecretName = in.Target.ID return nil }, + func(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + if err := s.Convert(&in.Waiting, &out.Waiting, 0); err != nil { + return err + } + if err := s.Convert(&in.Running, &out.Running, 0); err != nil { + return err + } + if err := s.Convert(&in.Terminated, &out.Termination, 0); err != nil { + return err + } + return nil + }, + func(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + if err := s.Convert(&in.Waiting, &out.Waiting, 0); err != nil { + return err + } + if err := s.Convert(&in.Running, &out.Running, 0); err != nil { + return err + } + if err := s.Convert(&in.Termination, &out.Terminated, 0); err != nil { + return err + } + return nil + }, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. diff --git a/pkg/api/v1beta1/defaults.go b/pkg/api/v1beta1/defaults.go index e8405597a99..0b2ea9d2253 100644 --- a/pkg/api/v1beta1/defaults.go +++ b/pkg/api/v1beta1/defaults.go @@ -208,12 +208,11 @@ func defaultHostNetworkPorts(containers *[]Container) { // defaultSecurityContext performs the downward and upward merges of a pod definition func defaultSecurityContext(container *Container) { if container.SecurityContext == nil { - glog.V(4).Infof("creating security context for container %s", container.Name) + glog.V(5).Infof("creating security context for container %s", container.Name) container.SecurityContext = &SecurityContext{} } // if there are no capabilities defined on the SecurityContext then copy the container settings if container.SecurityContext.Capabilities == nil { - glog.V(4).Infof("downward merge of container.Capabilities for container %s", container.Name) container.SecurityContext.Capabilities = &container.Capabilities } else { // if there are capabilities defined on the security context and the container setting is @@ -222,17 +221,14 @@ func defaultSecurityContext(container *Container) { // there are settings in both then don't touch it, the converter will error if they don't // match if len(container.Capabilities.Add) == 0 { - glog.V(4).Infof("upward merge of container.Capabilities.Add for container %s", container.Name) container.Capabilities.Add = container.SecurityContext.Capabilities.Add } if len(container.Capabilities.Drop) == 0 { - glog.V(4).Infof("upward merge of container.Capabilities.Drop for container %s", container.Name) container.Capabilities.Drop = container.SecurityContext.Capabilities.Drop } } // if there are no privileged settings on the security context then copy the container settings if container.SecurityContext.Privileged == nil { - glog.V(4).Infof("downward merge of container.Privileged for container %s", container.Name) container.SecurityContext.Privileged = &container.Privileged } else { // we don't have a good way to know if container.Privileged was set or just defaulted to false @@ -240,7 +236,6 @@ func defaultSecurityContext(container *Container) { // container is set to false and assume that the Privileged field was left off the container // definition and not an intentional mismatch if *container.SecurityContext.Privileged && !container.Privileged { - glog.V(4).Infof("upward merge of container.Privileged for container %s", container.Name) container.Privileged = *container.SecurityContext.Privileged } } diff --git a/pkg/api/v1beta1/defaults_test.go b/pkg/api/v1beta1/defaults_test.go index d254396b634..e1e62d5dc1f 100644 --- a/pkg/api/v1beta1/defaults_test.go +++ b/pkg/api/v1beta1/defaults_test.go @@ -326,6 +326,9 @@ func TestSetDefaultMinionExternalID(t *testing.T) { if m2.ExternalID != name { t.Errorf("Expected default External ID: %s, got: %s", name, m2.ExternalID) } + if m2.ProviderID != "" { + t.Errorf("Expected empty default Cloud Provider ID, got: %s", m2.ProviderID) + } } func TestSetDefaultObjectFieldSelectorAPIVersion(t *testing.T) { diff --git a/pkg/api/v1beta1/types.go b/pkg/api/v1beta1/types.go index eb456722443..a4b5bd6ba18 100644 --- a/pkg/api/v1beta1/types.go +++ b/pkg/api/v1beta1/types.go @@ -1210,7 +1210,9 @@ type Minion struct { // Labels for the node Labels map[string]string `json:"labels,omitempty" description:"map of string keys and values that can be used to organize and categorize minions; labels of a minion assigned by the scheduler must match the scheduled pod's nodeSelector"` // External ID of the node - ExternalID string `json:"externalID,omitempty" description:"external id of the node assigned by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + ExternalID string `json:"externalID,omitempty" description:"deprecated. External id of the node assigned by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + // ID of the node assigned by the cloud provider + ProviderID string `json:"providerID,omitempty" description:"ID of the node assigned by the cloud provider in the format: ://"` } // MinionList is a list of minions. diff --git a/pkg/api/v1beta2/conversion.go b/pkg/api/v1beta2/conversion.go index be9800dc416..626fd061f9b 100644 --- a/pkg/api/v1beta2/conversion.go +++ b/pkg/api/v1beta2/conversion.go @@ -178,8 +178,8 @@ func addConversionFuncs() { if err := s.Convert(&in.Spec, &out.DesiredState.Manifest, 0); err != nil { return err } - out.DesiredState.Host = in.Spec.Host - out.CurrentState.Host = in.Spec.Host + out.DesiredState.Host = in.Spec.NodeName + out.CurrentState.Host = in.Spec.NodeName out.ServiceAccount = in.Spec.ServiceAccount if err := s.Convert(&in.Status, &out.CurrentState, 0); err != nil { return err @@ -203,7 +203,7 @@ func addConversionFuncs() { return err } out.Spec.ServiceAccount = in.ServiceAccount - out.Spec.Host = in.DesiredState.Host + out.Spec.NodeName = in.DesiredState.Host if err := s.Convert(&in.CurrentState, &out.Status, 0); err != nil { return err } @@ -253,13 +253,13 @@ func addConversionFuncs() { if err := s.Convert(&in.Selector, &out.ReplicaSelector, 0); err != nil { return err } - if in.TemplateRef != nil && in.Template == nil { - return &api.ConversionError{ - In: in, - Out: out, - Message: "objects with a template ref cannot be converted to older objects, must populate template", - } - } + //if in.TemplateRef != nil && in.Template == nil { + // return &api.ConversionError{ + // In: in, + // Out: out, + // Message: "objects with a template ref cannot be converted to older objects, must populate template", + // } + //} if in.Template != nil { if err := s.Convert(in.Template, &out.PodTemplate, 0); err != nil { return err @@ -283,7 +283,7 @@ func addConversionFuncs() { if err := s.Convert(&in.Spec, &out.DesiredState.Manifest, 0); err != nil { return err } - out.DesiredState.Host = in.Spec.Host + out.DesiredState.Host = in.Spec.NodeName out.ServiceAccount = in.Spec.ServiceAccount if err := s.Convert(&in.Spec.NodeSelector, &out.NodeSelector, 0); err != nil { return err @@ -300,7 +300,7 @@ func addConversionFuncs() { if err := s.Convert(&in.DesiredState.Manifest, &out.Spec, 0); err != nil { return err } - out.Spec.Host = in.DesiredState.Host + out.Spec.NodeName = in.DesiredState.Host out.Spec.ServiceAccount = in.ServiceAccount if err := s.Convert(&in.NodeSelector, &out.Spec.NodeSelector, 0); err != nil { return err @@ -660,14 +660,14 @@ func addConversionFuncs() { if err := s.Convert(&in, &out.Manifest, 0); err != nil { return err } - out.Host = in.Host + out.Host = in.NodeName return nil }, func(in *PodState, out *api.PodSpec, s conversion.Scope) error { if err := s.Convert(&in.Manifest, &out, 0); err != nil { return err } - out.Host = in.Host + out.NodeName = in.Host return nil }, func(in *api.Service, out *Service, s conversion.Scope) error { @@ -704,7 +704,7 @@ func addConversionFuncs() { return err } out.PublicIPs = in.Spec.DeprecatedPublicIPs - out.PortalIP = in.Spec.PortalIP + out.PortalIP = in.Spec.ClusterIP if err := s.Convert(&in.Spec.SessionAffinity, &out.SessionAffinity, 0); err != nil { return err } @@ -756,7 +756,7 @@ func addConversionFuncs() { return err } out.Spec.DeprecatedPublicIPs = in.PublicIPs - out.Spec.PortalIP = in.PortalIP + out.Spec.ClusterIP = in.PortalIP if err := s.Convert(&in.SessionAffinity, &out.Spec.SessionAffinity, 0); err != nil { return err } @@ -810,6 +810,7 @@ func addConversionFuncs() { } out.PodCIDR = in.Spec.PodCIDR out.ExternalID = in.Spec.ExternalID + out.ProviderID = in.Spec.ProviderID out.Unschedulable = in.Spec.Unschedulable return s.Convert(&in.Status.Capacity, &out.NodeResources.Capacity, 0) }, @@ -842,6 +843,7 @@ func addConversionFuncs() { } out.Spec.PodCIDR = in.PodCIDR out.Spec.ExternalID = in.ExternalID + out.Spec.ProviderID = in.ProviderID out.Spec.Unschedulable = in.Unschedulable return s.Convert(&in.NodeResources.Capacity, &out.Status.Capacity, 0) }, @@ -1545,6 +1547,30 @@ func addConversionFuncs() { out.SecretName = in.Target.ID return nil }, + func(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + if err := s.Convert(&in.Waiting, &out.Waiting, 0); err != nil { + return err + } + if err := s.Convert(&in.Running, &out.Running, 0); err != nil { + return err + } + if err := s.Convert(&in.Terminated, &out.Termination, 0); err != nil { + return err + } + return nil + }, + func(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + if err := s.Convert(&in.Waiting, &out.Waiting, 0); err != nil { + return err + } + if err := s.Convert(&in.Running, &out.Running, 0); err != nil { + return err + } + if err := s.Convert(&in.Termination, &out.Terminated, 0); err != nil { + return err + } + return nil + }, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. diff --git a/pkg/api/v1beta2/defaults.go b/pkg/api/v1beta2/defaults.go index d0e7bffc661..461eb05b09e 100644 --- a/pkg/api/v1beta2/defaults.go +++ b/pkg/api/v1beta2/defaults.go @@ -209,12 +209,11 @@ func defaultHostNetworkPorts(containers *[]Container) { // defaultSecurityContext performs the downward and upward merges of a pod definition func defaultSecurityContext(container *Container) { if container.SecurityContext == nil { - glog.V(4).Infof("creating security context for container %s", container.Name) + glog.V(5).Infof("creating security context for container %s", container.Name) container.SecurityContext = &SecurityContext{} } // if there are no capabilities defined on the SecurityContext then copy the container settings if container.SecurityContext.Capabilities == nil { - glog.V(4).Infof("downward merge of container.Capabilities for container %s", container.Name) container.SecurityContext.Capabilities = &container.Capabilities } else { // if there are capabilities defined on the security context and the container setting is @@ -223,17 +222,14 @@ func defaultSecurityContext(container *Container) { // there are settings in both then don't touch it, the converter will error if they don't // match if len(container.Capabilities.Add) == 0 { - glog.V(4).Infof("upward merge of container.Capabilities.Add for container %s", container.Name) container.Capabilities.Add = container.SecurityContext.Capabilities.Add } if len(container.Capabilities.Drop) == 0 { - glog.V(4).Infof("upward merge of container.Capabilities.Drop for container %s", container.Name) container.Capabilities.Drop = container.SecurityContext.Capabilities.Drop } } // if there are no privileged settings on the security context then copy the container settings if container.SecurityContext.Privileged == nil { - glog.V(4).Infof("downward merge of container.Privileged for container %s", container.Name) container.SecurityContext.Privileged = &container.Privileged } else { // we don't have a good way to know if container.Privileged was set or just defaulted to false @@ -241,7 +237,6 @@ func defaultSecurityContext(container *Container) { // container is set to false and assume that the Privileged field was left off the container // definition and not an intentional mismatch if *container.SecurityContext.Privileged && !container.Privileged { - glog.V(4).Infof("upward merge of container.Privileged for container %s", container.Name) container.Privileged = *container.SecurityContext.Privileged } } diff --git a/pkg/api/v1beta2/defaults_test.go b/pkg/api/v1beta2/defaults_test.go index 0a9224b5f8f..aaf70963e54 100644 --- a/pkg/api/v1beta2/defaults_test.go +++ b/pkg/api/v1beta2/defaults_test.go @@ -325,6 +325,9 @@ func TestSetDefaultMinionExternalID(t *testing.T) { if m2.ExternalID != name { t.Errorf("Expected default External ID: %s, got: %s", name, m2.ExternalID) } + if m2.ProviderID != "" { + t.Errorf("Expected empty default Cloud Provider ID, got: %s", m2.ProviderID) + } } func TestSetDefaultObjectFieldSelectorAPIVersion(t *testing.T) { diff --git a/pkg/api/v1beta2/types.go b/pkg/api/v1beta2/types.go index ae654016fdc..1bef33e39ab 100644 --- a/pkg/api/v1beta2/types.go +++ b/pkg/api/v1beta2/types.go @@ -1226,7 +1226,9 @@ type Minion struct { // Labels for the node Labels map[string]string `json:"labels,omitempty" description:"map of string keys and values that can be used to organize and categorize minions; labels of a minion assigned by the scheduler must match the scheduled pod's nodeSelector"` // External ID of the node - ExternalID string `json:"externalID,omitempty" description:"external id of the node assigned by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + ExternalID string `json:"externalID,omitempty" description:"deprecated. External id of the node assigned by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + // ID of the node assigned by the cloud provider + ProviderID string `json:"providerID,omitempty" description:"ID of the node assigned by the cloud provider in the format: ://"` } // MinionList is a list of minions. diff --git a/pkg/api/v1beta3/conversion.go b/pkg/api/v1beta3/conversion.go index a4de8508a28..490d5434690 100644 --- a/pkg/api/v1beta3/conversion.go +++ b/pkg/api/v1beta3/conversion.go @@ -31,6 +31,12 @@ func addConversionFuncs() { convert_api_Container_To_v1beta3_Container, convert_v1beta3_ServiceSpec_To_api_ServiceSpec, convert_api_ServiceSpec_To_v1beta3_ServiceSpec, + convert_v1beta3_PodSpec_To_api_PodSpec, + convert_api_PodSpec_To_v1beta3_PodSpec, + convert_v1beta3_ContainerState_To_api_ContainerState, + convert_api_ContainerState_To_v1beta3_ContainerState, + convert_api_ContainerStateTerminated_To_v1beta3_ContainerStateTerminated, + convert_v1beta3_ContainerStateTerminated_To_api_ContainerStateTerminated, ) if err != nil { // If one of the conversion functions is malformed, detect it immediately. @@ -354,7 +360,7 @@ func convert_v1beta3_ServiceSpec_To_api_ServiceSpec(in *ServiceSpec, out *api.Se } else { out.Selector = nil } - out.PortalIP = in.PortalIP + out.ClusterIP = in.PortalIP typeIn := in.Type if typeIn == "" { @@ -402,7 +408,7 @@ func convert_api_ServiceSpec_To_v1beta3_ServiceSpec(in *api.ServiceSpec, out *Se } else { out.Selector = nil } - out.PortalIP = in.PortalIP + out.PortalIP = in.ClusterIP if err := s.Convert(&in.Type, &out.Type, 0); err != nil { return err @@ -420,3 +426,225 @@ func convert_api_ServiceSpec_To_v1beta3_ServiceSpec(in *api.ServiceSpec, out *Se out.SessionAffinity = ServiceAffinity(in.SessionAffinity) return nil } + +func convert_v1beta3_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*PodSpec))(in) + } + if in.Volumes != nil { + out.Volumes = make([]api.Volume, len(in.Volumes)) + for i := range in.Volumes { + if err := convert_v1beta3_Volume_To_api_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.Containers != nil { + out.Containers = make([]api.Container, len(in.Containers)) + for i := range in.Containers { + if err := convert_v1beta3_Container_To_api_Container(&in.Containers[i], &out.Containers[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) + if in.TerminationGracePeriodSeconds != nil { + out.TerminationGracePeriodSeconds = new(int64) + *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds + } else { + out.TerminationGracePeriodSeconds = nil + } + if in.ActiveDeadlineSeconds != nil { + out.ActiveDeadlineSeconds = new(int64) + *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds + } else { + out.ActiveDeadlineSeconds = nil + } + out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) + if in.NodeSelector != nil { + out.NodeSelector = make(map[string]string) + for key, val := range in.NodeSelector { + out.NodeSelector[key] = val + } + } else { + out.NodeSelector = nil + } + out.ServiceAccount = in.ServiceAccount + out.NodeName = in.Host + out.HostNetwork = in.HostNetwork + if in.ImagePullSecrets != nil { + out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) + for i := range in.ImagePullSecrets { + if err := convert_v1beta3_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { + return err + } + } + } else { + out.ImagePullSecrets = nil + } + return nil +} + +func convert_api_PodSpec_To_v1beta3_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.PodSpec))(in) + } + if in.Volumes != nil { + out.Volumes = make([]Volume, len(in.Volumes)) + for i := range in.Volumes { + if err := convert_api_Volume_To_v1beta3_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.Containers != nil { + out.Containers = make([]Container, len(in.Containers)) + for i := range in.Containers { + if err := convert_api_Container_To_v1beta3_Container(&in.Containers[i], &out.Containers[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = RestartPolicy(in.RestartPolicy) + if in.TerminationGracePeriodSeconds != nil { + out.TerminationGracePeriodSeconds = new(int64) + *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds + } else { + out.TerminationGracePeriodSeconds = nil + } + if in.ActiveDeadlineSeconds != nil { + out.ActiveDeadlineSeconds = new(int64) + *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds + } else { + out.ActiveDeadlineSeconds = nil + } + out.DNSPolicy = DNSPolicy(in.DNSPolicy) + if in.NodeSelector != nil { + out.NodeSelector = make(map[string]string) + for key, val := range in.NodeSelector { + out.NodeSelector[key] = val + } + } else { + out.NodeSelector = nil + } + out.ServiceAccount = in.ServiceAccount + out.Host = in.NodeName + out.HostNetwork = in.HostNetwork + if in.ImagePullSecrets != nil { + out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) + for i := range in.ImagePullSecrets { + if err := convert_api_LocalObjectReference_To_v1beta3_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { + return err + } + } + } else { + out.ImagePullSecrets = nil + } + return nil +} + +func convert_api_ContainerState_To_v1beta3_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.ContainerState))(in) + } + if in.Waiting != nil { + out.Waiting = new(ContainerStateWaiting) + if err := convert_api_ContainerStateWaiting_To_v1beta3_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { + return err + } + } else { + out.Waiting = nil + } + if in.Running != nil { + out.Running = new(ContainerStateRunning) + if err := convert_api_ContainerStateRunning_To_v1beta3_ContainerStateRunning(in.Running, out.Running, s); err != nil { + return err + } + } else { + out.Running = nil + } + if in.Terminated != nil { + out.Termination = new(ContainerStateTerminated) + if err := convert_api_ContainerStateTerminated_To_v1beta3_ContainerStateTerminated(in.Terminated, out.Termination, s); err != nil { + return err + } + } else { + out.Termination = nil + } + return nil +} + +func convert_v1beta3_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*ContainerState))(in) + } + if in.Waiting != nil { + out.Waiting = new(api.ContainerStateWaiting) + if err := convert_v1beta3_ContainerStateWaiting_To_api_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { + return err + } + } else { + out.Waiting = nil + } + if in.Running != nil { + out.Running = new(api.ContainerStateRunning) + if err := convert_v1beta3_ContainerStateRunning_To_api_ContainerStateRunning(in.Running, out.Running, s); err != nil { + return err + } + } else { + out.Running = nil + } + if in.Termination != nil { + out.Terminated = new(api.ContainerStateTerminated) + if err := convert_v1beta3_ContainerStateTerminated_To_api_ContainerStateTerminated(in.Termination, out.Terminated, s); err != nil { + return err + } + } else { + out.Terminated = nil + } + return nil +} + +func convert_api_ContainerStateTerminated_To_v1beta3_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.ContainerStateTerminated))(in) + } + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { + return err + } + if err := s.Convert(&in.FinishedAt, &out.FinishedAt, 0); err != nil { + return err + } + out.ContainerID = in.ContainerID + return nil +} + +func convert_v1beta3_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*ContainerStateTerminated))(in) + } + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { + return err + } + if err := s.Convert(&in.FinishedAt, &out.FinishedAt, 0); err != nil { + return err + } + out.ContainerID = in.ContainerID + return nil +} diff --git a/pkg/api/v1beta3/conversion_generated.go b/pkg/api/v1beta3/conversion_generated.go index 486744c3e30..4799a52ac35 100644 --- a/pkg/api/v1beta3/conversion_generated.go +++ b/pkg/api/v1beta3/conversion_generated.go @@ -144,37 +144,6 @@ func convert_api_ContainerPort_To_v1beta3_ContainerPort(in *api.ContainerPort, o return nil } -func convert_api_ContainerState_To_v1beta3_ContainerState(in *api.ContainerState, out *ContainerState, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerState))(in) - } - if in.Waiting != nil { - out.Waiting = new(ContainerStateWaiting) - if err := convert_api_ContainerStateWaiting_To_v1beta3_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { - return err - } - } else { - out.Waiting = nil - } - if in.Running != nil { - out.Running = new(ContainerStateRunning) - if err := convert_api_ContainerStateRunning_To_v1beta3_ContainerStateRunning(in.Running, out.Running, s); err != nil { - return err - } - } else { - out.Running = nil - } - if in.Termination != nil { - out.Termination = new(ContainerStateTerminated) - if err := convert_api_ContainerStateTerminated_To_v1beta3_ContainerStateTerminated(in.Termination, out.Termination, s); err != nil { - return err - } - } else { - out.Termination = nil - } - return nil -} - func convert_api_ContainerStateRunning_To_v1beta3_ContainerStateRunning(in *api.ContainerStateRunning, out *ContainerStateRunning, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerStateRunning))(in) @@ -185,24 +154,6 @@ func convert_api_ContainerStateRunning_To_v1beta3_ContainerStateRunning(in *api. return nil } -func convert_api_ContainerStateTerminated_To_v1beta3_ContainerStateTerminated(in *api.ContainerStateTerminated, out *ContainerStateTerminated, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.ContainerStateTerminated))(in) - } - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { - return err - } - if err := s.Convert(&in.FinishedAt, &out.FinishedAt, 0); err != nil { - return err - } - out.ContainerID = in.ContainerID - return nil -} - func convert_api_ContainerStateWaiting_To_v1beta3_ContainerStateWaiting(in *api.ContainerStateWaiting, out *ContainerStateWaiting, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.ContainerStateWaiting))(in) @@ -902,6 +853,7 @@ func convert_api_NodeSpec_To_v1beta3_NodeSpec(in *api.NodeSpec, out *NodeSpec, s } out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } @@ -1373,68 +1325,6 @@ func convert_api_PodProxyOptions_To_v1beta3_PodProxyOptions(in *api.PodProxyOpti return nil } -func convert_api_PodSpec_To_v1beta3_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*api.PodSpec))(in) - } - if in.Volumes != nil { - out.Volumes = make([]Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := convert_api_Volume_To_v1beta3_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]Container, len(in.Containers)) - for i := range in.Containers { - if err := convert_api_Container_To_v1beta3_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccount = in.ServiceAccount - out.Host = in.Host - out.HostNetwork = in.HostNetwork - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := convert_api_LocalObjectReference_To_v1beta3_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - func convert_api_PodStatus_To_v1beta3_PodStatus(in *api.PodStatus, out *PodStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodStatus))(in) @@ -1654,14 +1544,6 @@ func convert_api_ReplicationControllerSpec_To_v1beta3_ReplicationControllerSpec( } else { out.Selector = nil } - if in.TemplateRef != nil { - out.TemplateRef = new(ObjectReference) - if err := convert_api_ObjectReference_To_v1beta3_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { - return err - } - } else { - out.TemplateRef = nil - } if in.Template != nil { out.Template = new(PodTemplateSpec) if err := convert_api_PodTemplateSpec_To_v1beta3_PodTemplateSpec(in.Template, out.Template, s); err != nil { @@ -2351,37 +2233,6 @@ func convert_v1beta3_ContainerPort_To_api_ContainerPort(in *ContainerPort, out * return nil } -func convert_v1beta3_ContainerState_To_api_ContainerState(in *ContainerState, out *api.ContainerState, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerState))(in) - } - if in.Waiting != nil { - out.Waiting = new(api.ContainerStateWaiting) - if err := convert_v1beta3_ContainerStateWaiting_To_api_ContainerStateWaiting(in.Waiting, out.Waiting, s); err != nil { - return err - } - } else { - out.Waiting = nil - } - if in.Running != nil { - out.Running = new(api.ContainerStateRunning) - if err := convert_v1beta3_ContainerStateRunning_To_api_ContainerStateRunning(in.Running, out.Running, s); err != nil { - return err - } - } else { - out.Running = nil - } - if in.Termination != nil { - out.Termination = new(api.ContainerStateTerminated) - if err := convert_v1beta3_ContainerStateTerminated_To_api_ContainerStateTerminated(in.Termination, out.Termination, s); err != nil { - return err - } - } else { - out.Termination = nil - } - return nil -} - func convert_v1beta3_ContainerStateRunning_To_api_ContainerStateRunning(in *ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerStateRunning))(in) @@ -2392,24 +2243,6 @@ func convert_v1beta3_ContainerStateRunning_To_api_ContainerStateRunning(in *Cont return nil } -func convert_v1beta3_ContainerStateTerminated_To_api_ContainerStateTerminated(in *ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*ContainerStateTerminated))(in) - } - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - if err := s.Convert(&in.StartedAt, &out.StartedAt, 0); err != nil { - return err - } - if err := s.Convert(&in.FinishedAt, &out.FinishedAt, 0); err != nil { - return err - } - out.ContainerID = in.ContainerID - return nil -} - func convert_v1beta3_ContainerStateWaiting_To_api_ContainerStateWaiting(in *ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*ContainerStateWaiting))(in) @@ -3109,6 +2942,7 @@ func convert_v1beta3_NodeSpec_To_api_NodeSpec(in *NodeSpec, out *api.NodeSpec, s } out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } @@ -3580,68 +3414,6 @@ func convert_v1beta3_PodProxyOptions_To_api_PodProxyOptions(in *PodProxyOptions, return nil } -func convert_v1beta3_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversion.Scope) error { - if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*PodSpec))(in) - } - if in.Volumes != nil { - out.Volumes = make([]api.Volume, len(in.Volumes)) - for i := range in.Volumes { - if err := convert_v1beta3_Volume_To_api_Volume(&in.Volumes[i], &out.Volumes[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.Containers != nil { - out.Containers = make([]api.Container, len(in.Containers)) - for i := range in.Containers { - if err := convert_v1beta3_Container_To_api_Container(&in.Containers[i], &out.Containers[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - if in.TerminationGracePeriodSeconds != nil { - out.TerminationGracePeriodSeconds = new(int64) - *out.TerminationGracePeriodSeconds = *in.TerminationGracePeriodSeconds - } else { - out.TerminationGracePeriodSeconds = nil - } - if in.ActiveDeadlineSeconds != nil { - out.ActiveDeadlineSeconds = new(int64) - *out.ActiveDeadlineSeconds = *in.ActiveDeadlineSeconds - } else { - out.ActiveDeadlineSeconds = nil - } - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - if in.NodeSelector != nil { - out.NodeSelector = make(map[string]string) - for key, val := range in.NodeSelector { - out.NodeSelector[key] = val - } - } else { - out.NodeSelector = nil - } - out.ServiceAccount = in.ServiceAccount - out.Host = in.Host - out.HostNetwork = in.HostNetwork - if in.ImagePullSecrets != nil { - out.ImagePullSecrets = make([]api.LocalObjectReference, len(in.ImagePullSecrets)) - for i := range in.ImagePullSecrets { - if err := convert_v1beta3_LocalObjectReference_To_api_LocalObjectReference(&in.ImagePullSecrets[i], &out.ImagePullSecrets[i], s); err != nil { - return err - } - } - } else { - out.ImagePullSecrets = nil - } - return nil -} - func convert_v1beta3_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodStatus))(in) @@ -3861,14 +3633,6 @@ func convert_v1beta3_ReplicationControllerSpec_To_api_ReplicationControllerSpec( } else { out.Selector = nil } - if in.TemplateRef != nil { - out.TemplateRef = new(api.ObjectReference) - if err := convert_v1beta3_ObjectReference_To_api_ObjectReference(in.TemplateRef, out.TemplateRef, s); err != nil { - return err - } - } else { - out.TemplateRef = nil - } if in.Template != nil { out.Template = new(api.PodTemplateSpec) if err := convert_v1beta3_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { @@ -4449,9 +4213,7 @@ func init() { convert_api_ComponentStatus_To_v1beta3_ComponentStatus, convert_api_ContainerPort_To_v1beta3_ContainerPort, convert_api_ContainerStateRunning_To_v1beta3_ContainerStateRunning, - convert_api_ContainerStateTerminated_To_v1beta3_ContainerStateTerminated, convert_api_ContainerStateWaiting_To_v1beta3_ContainerStateWaiting, - convert_api_ContainerState_To_v1beta3_ContainerState, convert_api_ContainerStatus_To_v1beta3_ContainerStatus, convert_api_DeleteOptions_To_v1beta3_DeleteOptions, convert_api_EmptyDirVolumeSource_To_v1beta3_EmptyDirVolumeSource, @@ -4514,7 +4276,6 @@ func init() { convert_api_PodList_To_v1beta3_PodList, convert_api_PodLogOptions_To_v1beta3_PodLogOptions, convert_api_PodProxyOptions_To_v1beta3_PodProxyOptions, - convert_api_PodSpec_To_v1beta3_PodSpec, convert_api_PodStatusResult_To_v1beta3_PodStatusResult, convert_api_PodStatus_To_v1beta3_PodStatus, convert_api_PodTemplateList_To_v1beta3_PodTemplateList, @@ -4561,9 +4322,7 @@ func init() { convert_v1beta3_ComponentStatus_To_api_ComponentStatus, convert_v1beta3_ContainerPort_To_api_ContainerPort, convert_v1beta3_ContainerStateRunning_To_api_ContainerStateRunning, - convert_v1beta3_ContainerStateTerminated_To_api_ContainerStateTerminated, convert_v1beta3_ContainerStateWaiting_To_api_ContainerStateWaiting, - convert_v1beta3_ContainerState_To_api_ContainerState, convert_v1beta3_ContainerStatus_To_api_ContainerStatus, convert_v1beta3_DeleteOptions_To_api_DeleteOptions, convert_v1beta3_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, @@ -4626,7 +4385,6 @@ func init() { convert_v1beta3_PodList_To_api_PodList, convert_v1beta3_PodLogOptions_To_api_PodLogOptions, convert_v1beta3_PodProxyOptions_To_api_PodProxyOptions, - convert_v1beta3_PodSpec_To_api_PodSpec, convert_v1beta3_PodStatusResult_To_api_PodStatusResult, convert_v1beta3_PodStatus_To_api_PodStatus, convert_v1beta3_PodTemplateList_To_api_PodTemplateList, diff --git a/pkg/api/v1beta3/deep_copy_generated.go b/pkg/api/v1beta3/deep_copy_generated.go index 15ed66e26b0..19f5f118c38 100644 --- a/pkg/api/v1beta3/deep_copy_generated.go +++ b/pkg/api/v1beta3/deep_copy_generated.go @@ -856,6 +856,7 @@ func deepCopy_v1beta3_NodeList(in NodeList, out *NodeList, c *conversion.Cloner) func deepCopy_v1beta3_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) error { out.PodCIDR = in.PodCIDR out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID out.Unschedulable = in.Unschedulable return nil } @@ -1516,14 +1517,6 @@ func deepCopy_v1beta3_ReplicationControllerSpec(in ReplicationControllerSpec, ou } else { out.Selector = nil } - if in.TemplateRef != nil { - out.TemplateRef = new(ObjectReference) - if err := deepCopy_v1beta3_ObjectReference(*in.TemplateRef, out.TemplateRef, c); err != nil { - return err - } - } else { - out.TemplateRef = nil - } if in.Template != nil { out.Template = new(PodTemplateSpec) if err := deepCopy_v1beta3_PodTemplateSpec(*in.Template, out.Template, c); err != nil { diff --git a/pkg/api/v1beta3/defaults.go b/pkg/api/v1beta3/defaults.go index f387a45f946..d542e5f3047 100644 --- a/pkg/api/v1beta3/defaults.go +++ b/pkg/api/v1beta3/defaults.go @@ -171,12 +171,11 @@ func defaultHostNetworkPorts(containers *[]Container) { // defaultSecurityContext performs the downward and upward merges of a pod definition func defaultSecurityContext(container *Container) { if container.SecurityContext == nil { - glog.V(4).Infof("creating security context for container %s", container.Name) + glog.V(5).Infof("creating security context for container %s", container.Name) container.SecurityContext = &SecurityContext{} } // if there are no capabilities defined on the SecurityContext then copy the container settings if container.SecurityContext.Capabilities == nil { - glog.V(4).Infof("downward merge of container.Capabilities for container %s", container.Name) container.SecurityContext.Capabilities = &container.Capabilities } else { // if there are capabilities defined on the security context and the container setting is @@ -185,17 +184,14 @@ func defaultSecurityContext(container *Container) { // there are settings in both then don't touch it, the converter will error if they don't // match if len(container.Capabilities.Add) == 0 { - glog.V(4).Infof("upward merge of container.Capabilities.Add for container %s", container.Name) container.Capabilities.Add = container.SecurityContext.Capabilities.Add } if len(container.Capabilities.Drop) == 0 { - glog.V(4).Infof("upward merge of container.Capabilities.Drop for container %s", container.Name) container.Capabilities.Drop = container.SecurityContext.Capabilities.Drop } } // if there are no privileged settings on the security context then copy the container settings if container.SecurityContext.Privileged == nil { - glog.V(4).Infof("downward merge of container.Privileged for container %s", container.Name) container.SecurityContext.Privileged = &container.Privileged } else { // we don't have a good way to know if container.Privileged was set or just defaulted to false @@ -203,7 +199,6 @@ func defaultSecurityContext(container *Container) { // container is set to false and assume that the Privileged field was left off the container // definition and not an intentional mismatch if *container.SecurityContext.Privileged && !container.Privileged { - glog.V(4).Infof("upward merge of container.Privileged for container %s", container.Name) container.Privileged = *container.SecurityContext.Privileged } } diff --git a/pkg/api/v1beta3/defaults_test.go b/pkg/api/v1beta3/defaults_test.go index 6d5a411979b..745be4c4f9f 100644 --- a/pkg/api/v1beta3/defaults_test.go +++ b/pkg/api/v1beta3/defaults_test.go @@ -334,6 +334,9 @@ func TestSetDefaultNodeExternalID(t *testing.T) { if n2.Spec.ExternalID != name { t.Errorf("Expected default External ID: %s, got: %s", name, n2.Spec.ExternalID) } + if n2.Spec.ProviderID != "" { + t.Errorf("Expected empty default Cloud Provider ID, got: %s", n2.Spec.ProviderID) + } } func TestSetDefaultObjectFieldSelectorAPIVersion(t *testing.T) { diff --git a/pkg/api/v1beta3/types.go b/pkg/api/v1beta3/types.go index c5fdb8180bd..559f5cc858f 100644 --- a/pkg/api/v1beta3/types.go +++ b/pkg/api/v1beta3/types.go @@ -966,7 +966,7 @@ type ReplicationControllerSpec struct { // TemplateRef is a reference to an object that describes the pod that will be created if // insufficient replicas are detected. - TemplateRef *ObjectReference `json:"templateRef,omitempty" description:"reference to an object that describes the pod that will be created if insufficient replicas are detected"` + //TemplateRef *ObjectReference `json:"templateRef,omitempty" description:"reference to an object that describes the pod that will be created if insufficient replicas are detected"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. This takes precedence over a @@ -1236,7 +1236,9 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node PodCIDR string `json:"podCIDR,omitempty" description:"pod IP range assigned to the node"` // External ID of the node assigned by some machine database (e.g. a cloud provider) - ExternalID string `json:"externalID,omitempty" description:"external ID assigned to the node by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + ExternalID string `json:"externalID,omitempty" description:"deprecated. External ID assigned to the node by some machine database (e.g. a cloud provider). Defaults to node name when empty."` + // ID of the node assigned by the cloud provider + ProviderID string `json:"providerID,omitempty" description:"ID of the node assigned by the cloud provider in the format: ://"` // Unschedulable controls node schedulability of new pods. By default node is schedulable. Unschedulable bool `json:"unschedulable,omitempty" description:"disable pod scheduling on the node"` } diff --git a/pkg/api/validation/schema.go b/pkg/api/validation/schema.go index 0bfaa1c9c2a..005991d1896 100644 --- a/pkg/api/validation/schema.go +++ b/pkg/api/validation/schema.go @@ -85,12 +85,12 @@ func (s *SwaggerSchema) ValidateBytes(data []byte) error { func (s *SwaggerSchema) ValidateObject(obj interface{}, apiVersion, fieldName, typeName string) error { models := s.api.Models // TODO: handle required fields here too. - model, ok := models[typeName] + model, ok := models.At(typeName) if !ok { return fmt.Errorf("couldn't find type: %s", typeName) } properties := model.Properties - if len(properties) == 0 { + if len(properties.List) == 0 { // The object does not have any sub-fields. return nil } @@ -102,7 +102,7 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, apiVersion, fieldName, t fieldName = fieldName + "." } for key, value := range fields { - details, ok := properties[key] + details, ok := properties.At(key) if !ok { glog.Infof("unknown field: %s", key) // Some properties can be missing because of diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index 9e18919623e..10065db79d8 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -1007,8 +1007,8 @@ func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) errs.ValidationErrorList { allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta).Prefix("metadata")...) // TODO: allow change when bindings are properly decoupled from pods - if newPod.Spec.Host != oldPod.Spec.Host { - allErrs = append(allErrs, errs.NewFieldInvalid("status.host", newPod.Spec.Host, "pod host cannot be changed directly")) + if newPod.Spec.NodeName != oldPod.Spec.NodeName { + allErrs = append(allErrs, errs.NewFieldInvalid("status.nodeName", newPod.Spec.NodeName, "pod nodename cannot be changed directly")) } // For status update we ignore changes to pod spec. @@ -1063,8 +1063,8 @@ func ValidateService(service *api.Service) errs.ValidationErrorList { } if api.IsServiceIPSet(service) { - if ip := net.ParseIP(service.Spec.PortalIP); ip == nil { - allErrs = append(allErrs, errs.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, "portalIP should be empty, 'None', or a valid IP address")) + if ip := net.ParseIP(service.Spec.ClusterIP); ip == nil { + allErrs = append(allErrs, errs.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, "clusterIP should be empty, 'None', or a valid IP address")) } } @@ -1157,10 +1157,8 @@ func ValidateServiceUpdate(oldService, service *api.Service) errs.ValidationErro allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, ValidateObjectMetaUpdate(&oldService.ObjectMeta, &service.ObjectMeta).Prefix("metadata")...) - // TODO: PortalIP should be a Status field, since the system can set a value != to the user's value - // once PortalIP is set, it cannot be unset. - if api.IsServiceIPSet(oldService) && service.Spec.PortalIP != oldService.Spec.PortalIP { - allErrs = append(allErrs, errs.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, "field is immutable")) + if api.IsServiceIPSet(oldService) && service.Spec.ClusterIP != oldService.Spec.ClusterIP { + allErrs = append(allErrs, errs.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, "field is immutable")) } allErrs = append(allErrs, ValidateService(service)...) diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index a2922b48280..0367f7f3797 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -1049,7 +1049,7 @@ func TestValidatePodSpec(t *testing.T) { NodeSelector: map[string]string{ "key": "value", }, - Host: "foobar", + NodeName: "foobar", DNSPolicy: api.DNSClusterFirst, ActiveDeadlineSeconds: &activeDeadlineSeconds, }, @@ -1116,7 +1116,7 @@ func TestValidatePodSpec(t *testing.T) { NodeSelector: map[string]string{ "key": "value", }, - Host: "foobar", + NodeName: "foobar", DNSPolicy: api.DNSClusterFirst, ActiveDeadlineSeconds: &activeDeadlineSeconds, }, @@ -1151,7 +1151,7 @@ func TestValidatePod(t *testing.T) { NodeSelector: map[string]string{ "key": "value", }, - Host: "foobar", + NodeName: "foobar", }, }, } @@ -1581,9 +1581,9 @@ func TestValidateService(t *testing.T) { numErrs: 1, }, { - name: "invalid portal ip", + name: "invalid cluster ip", tweakSvc: func(s *api.Service) { - s.Spec.PortalIP = "invalid" + s.Spec.ClusterIP = "invalid" }, numErrs: 1, }, @@ -1676,16 +1676,16 @@ func TestValidateService(t *testing.T) { numErrs: 0, }, { - name: "valid portal ip - none ", + name: "valid cluster ip - none ", tweakSvc: func(s *api.Service) { - s.Spec.PortalIP = "None" + s.Spec.ClusterIP = "None" }, numErrs: 0, }, { - name: "valid portal ip - empty", + name: "valid cluster ip - empty", tweakSvc: func(s *api.Service) { - s.Spec.PortalIP = "" + s.Spec.ClusterIP = "" s.Spec.Ports[0].TargetPort = util.NewIntOrStringFromString("http") }, numErrs: 0, @@ -2556,18 +2556,18 @@ func TestValidateServiceUpdate(t *testing.T) { numErrs: 0, }, { - name: "change portal IP", + name: "change cluster IP", tweakSvc: func(oldSvc, newSvc *api.Service) { - oldSvc.Spec.PortalIP = "1.2.3.4" - newSvc.Spec.PortalIP = "8.6.7.5" + oldSvc.Spec.ClusterIP = "1.2.3.4" + newSvc.Spec.ClusterIP = "8.6.7.5" }, numErrs: 1, }, { - name: "remove portal IP", + name: "remove cluster IP", tweakSvc: func(oldSvc, newSvc *api.Service) { - oldSvc.Spec.PortalIP = "1.2.3.4" - newSvc.Spec.PortalIP = "" + oldSvc.Spec.ClusterIP = "1.2.3.4" + newSvc.Spec.ClusterIP = "" }, numErrs: 1, }, diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index e8f5748f1b5..0d1637010de 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -63,7 +63,7 @@ var ( // Use buckets ranging from 125 ms to 8 seconds. Buckets: prometheus.ExponentialBuckets(125000, 2.0, 7), }, - []string{"verb", "resource", "client"}, + []string{"verb", "resource"}, ) requestLatenciesSummary = prometheus.NewSummaryVec( prometheus.SummaryOpts{ @@ -84,7 +84,7 @@ func init() { // instrumenting basic request counter and latency metrics. func monitor(verb, resource *string, client string, httpCode *int, reqStart time.Time) { requestCounter.WithLabelValues(*verb, *resource, client, strconv.Itoa(*httpCode)).Inc() - requestLatencies.WithLabelValues(*verb, *resource, client).Observe(float64((time.Since(reqStart)) / time.Microsecond)) + requestLatencies.WithLabelValues(*verb, *resource).Observe(float64((time.Since(reqStart)) / time.Microsecond)) requestLatenciesSummary.WithLabelValues(*verb, *resource).Observe(float64((time.Since(reqStart)) / time.Microsecond)) } diff --git a/pkg/cloudprovider/aws/aws.go b/pkg/cloudprovider/aws/aws.go index e116cf6bc66..3e905e81608 100644 --- a/pkg/cloudprovider/aws/aws.go +++ b/pkg/cloudprovider/aws/aws.go @@ -41,6 +41,8 @@ import ( "github.com/golang/glog" ) +const ProviderName = "aws" + // Abstraction over EC2, to allow mocking/other implementations type EC2 interface { // Query EC2 for instances matching the filter @@ -48,9 +50,8 @@ type EC2 interface { // Attach a volume to an instance AttachVolume(volumeID, instanceId, mountDevice string) (resp *ec2.VolumeAttachment, err error) - // Detach a volume from whatever instance it is attached to - // TODO: We should specify the InstanceID and the Device, for safety - DetachVolume(volumeID, instanceId, mountDevice string) (resp *ec2.VolumeAttachment, err error) + // Detach a volume from an instance it is attached to + DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error) // Lists volumes Volumes(volumeIDs []string, filter *ec2.Filter) (resp *ec2.DescribeVolumesOutput, err error) // Create an EBS volume @@ -224,13 +225,8 @@ func (s *awsSdkEC2) AttachVolume(volumeID, instanceId, device string) (resp *ec2 return s.ec2.AttachVolume(&request) } -func (s *awsSdkEC2) DetachVolume(volumeID, instanceId, device string) (resp *ec2.VolumeAttachment, err error) { - request := ec2.DetachVolumeInput{ - Device: &device, - InstanceID: &instanceId, - VolumeID: &volumeID, - } - return s.ec2.DetachVolume(&request) +func (s *awsSdkEC2) DetachVolume(request *ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) { + return s.ec2.DetachVolume(request) } func (s *awsSdkEC2) Volumes(volumeIDs []string, filter *ec2.Filter) (resp *ec2.DescribeVolumesOutput, err error) { @@ -250,7 +246,7 @@ func (s *awsSdkEC2) DeleteVolume(volumeID string) (resp *ec2.DeleteVolumeOutput, } func init() { - cloudprovider.RegisterCloudProvider("aws", func(config io.Reader) (cloudprovider.Interface, error) { + cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { metadata := &awsSdkMetadata{} return newAWSCloud(config, getAuth, metadata) }) @@ -366,6 +362,11 @@ func (aws *AWSCloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false } +// ProviderName returns the cloud provider ID. +func (aws *AWSCloud) ProviderName() string { + return ProviderName +} + // TCPLoadBalancer returns an implementation of TCPLoadBalancer for Amazon Web Services. func (aws *AWSCloud) TCPLoadBalancer() (cloudprovider.TCPLoadBalancer, bool) { return nil, false @@ -420,7 +421,7 @@ func (aws *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) { return addresses, nil } -// ExternalID returns the cloud provider ID of the specified instance. +// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (aws *AWSCloud) ExternalID(name string) (string, error) { inst, err := aws.getInstancesByDnsName(name) if err != nil { @@ -429,6 +430,17 @@ func (aws *AWSCloud) ExternalID(name string) (string, error) { return *inst.InstanceID, nil } +// InstanceID returns the cloud provider ID of the specified instance. +func (aws *AWSCloud) InstanceID(name string) (string, error) { + inst, err := aws.getInstancesByDnsName(name) + if err != nil { + return "", err + } + // In the future it is possible to also return an endpoint as: + // // + return "/" + *inst.Placement.AvailabilityZone + "/" + *inst.InstanceID, nil +} + // Return the instances matching the relevant private dns name. func (aws *AWSCloud) getInstancesByDnsName(name string) (*ec2.Instance, error) { f := &ec2InstanceFilter{} @@ -963,6 +975,27 @@ func (aws *AWSCloud) getSelfAWSInstance() (*awsInstance, error) { return i, nil } +// Gets the awsInstance named instanceName, or the 'self' instance if instanceName == "" +func (aws *AWSCloud) getAwsInstance(instanceName string) (*awsInstance, error) { + var awsInstance *awsInstance + var err error + if instanceName == "" { + awsInstance, err = aws.getSelfAWSInstance() + if err != nil { + return nil, fmt.Errorf("error getting self-instance: %v", err) + } + } else { + instance, err := aws.getInstancesByDnsName(instanceName) + if err != nil { + return nil, fmt.Errorf("error finding instance: %v", err) + } + + awsInstance = newAWSInstance(aws.ec2, *instance.InstanceID) + } + + return awsInstance, nil +} + // Implements Volumes.AttachDisk func (aws *AWSCloud) AttachDisk(instanceName string, diskName string, readOnly bool) (string, error) { disk, err := newAWSDisk(aws.ec2, diskName) @@ -970,19 +1003,9 @@ func (aws *AWSCloud) AttachDisk(instanceName string, diskName string, readOnly b return "", err } - var awsInstance *awsInstance - if instanceName == "" { - awsInstance, err = aws.getSelfAWSInstance() - if err != nil { - return "", fmt.Errorf("Error getting self-instance: %v", err) - } - } else { - instance, err := aws.getInstancesByDnsName(instanceName) - if err != nil { - return "", fmt.Errorf("Error finding instance: %v", err) - } - - awsInstance = newAWSInstance(aws.ec2, *instance.InstanceID) + awsInstance, err := aws.getAwsInstance(instanceName) + if err != nil { + return "", err } if readOnly { @@ -1035,8 +1058,17 @@ func (aws *AWSCloud) DetachDisk(instanceName string, diskName string) error { return err } - // TODO: We should specify the InstanceID and the Device, for safety - response, err := aws.ec2.DetachVolume(disk.awsID, instanceName, diskName) + awsInstance, err := aws.getAwsInstance(instanceName) + if err != nil { + return err + } + + request := ec2.DetachVolumeInput{ + InstanceID: &awsInstance.awsID, + VolumeID: &disk.awsID, + } + + response, err := aws.ec2.DetachVolume(&request) if err != nil { return fmt.Errorf("error detaching EBS volume: %v", err) } diff --git a/pkg/cloudprovider/aws/aws_test.go b/pkg/cloudprovider/aws/aws_test.go index e5eeb929ee5..40e62f21edf 100644 --- a/pkg/cloudprovider/aws/aws_test.go +++ b/pkg/cloudprovider/aws/aws_test.go @@ -208,7 +208,7 @@ func (ec2 *FakeEC2) AttachVolume(volumeID, instanceId, mountDevice string) (resp panic("Not implemented") } -func (ec2 *FakeEC2) DetachVolume(volumeID, instanceId, mountDevice string) (resp *ec2.VolumeAttachment, err error) { +func (ec2 *FakeEC2) DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error) { panic("Not implemented") } diff --git a/pkg/cloudprovider/cloud.go b/pkg/cloudprovider/cloud.go index 31054f1e3bd..f0835821703 100644 --- a/pkg/cloudprovider/cloud.go +++ b/pkg/cloudprovider/cloud.go @@ -18,6 +18,7 @@ package cloudprovider import ( "errors" + "fmt" "net" "strings" @@ -36,6 +37,8 @@ type Interface interface { Clusters() (Clusters, bool) // Routes returns a routes interface along with whether the interface is supported. Routes() (Routes, bool) + // ProviderName returns the cloud provider ID. + ProviderName() string } // Clusters is an abstract, pluggable interface for clusters of containers. @@ -59,6 +62,18 @@ func GetLoadBalancerName(service *api.Service) string { return ret } +func GetInstanceProviderID(cloud Interface, nodeName string) (string, error) { + instances, ok := cloud.Instances() + if !ok { + return "", fmt.Errorf("failed to get instances from cloud provider") + } + instanceID, err := instances.InstanceID(nodeName) + if err != nil { + return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err) + } + return cloud.ProviderName() + "://" + instanceID, nil +} + // TCPLoadBalancer is an abstract, pluggable interface for TCP load balancers. type TCPLoadBalancer interface { // TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service @@ -81,9 +96,14 @@ type TCPLoadBalancer interface { // Instances is an abstract, pluggable interface for sets of instances. type Instances interface { // NodeAddresses returns the addresses of the specified instance. + // TODO(roberthbailey): This currently is only used in such a way that it + // returns the address of the calling instance. We should do a rename to + // make this clearer. NodeAddresses(name string) ([]api.NodeAddress, error) - // ExternalID returns the cloud provider ID of the specified instance. + // ExternalID returns the cloud provider ID of the specified instance (deprecated). ExternalID(name string) (string, error) + // InstanceID returns the cloud provider ID of the specified instance. + InstanceID(name string) (string, error) // List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn) List(filter string) ([]string, error) // GetNodeResources gets the resources for a particular node diff --git a/pkg/cloudprovider/fake/fake.go b/pkg/cloudprovider/fake/fake.go index d9db3b6c187..15ef13c1522 100644 --- a/pkg/cloudprovider/fake/fake.go +++ b/pkg/cloudprovider/fake/fake.go @@ -26,6 +26,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider" ) +const ProviderName = "fake" + // FakeBalancer is a fake storage of balancer information type FakeBalancer struct { Name string @@ -81,6 +83,11 @@ func (f *FakeCloud) Clusters() (cloudprovider.Clusters, bool) { return f, true } +// ProviderName returns the cloud provider ID. +func (f *FakeCloud) ProviderName() string { + return ProviderName +} + // TCPLoadBalancer returns a fake implementation of TCPLoadBalancer. // Actually it just returns f itself. func (f *FakeCloud) TCPLoadBalancer() (cloudprovider.TCPLoadBalancer, bool) { @@ -152,6 +159,12 @@ func (f *FakeCloud) ExternalID(instance string) (string, error) { return f.ExtID[instance], f.Err } +// InstanceID returns the cloud provider ID of the specified instance. +func (f *FakeCloud) InstanceID(instance string) (string, error) { + f.addCall("instance-id") + return f.ExtID[instance], nil +} + // List is a test-spy implementation of Instances.List. // It adds an entry "list" into the internal method call record. func (f *FakeCloud) List(filter string) ([]string, error) { diff --git a/pkg/cloudprovider/gce/gce.go b/pkg/cloudprovider/gce/gce.go index b7451b97bb3..38544416060 100644 --- a/pkg/cloudprovider/gce/gce.go +++ b/pkg/cloudprovider/gce/gce.go @@ -42,6 +42,10 @@ import ( "google.golang.org/cloud/compute/metadata" ) +const ProviderName = "gce" + +const EXTERNAL_IP_METADATA_URL = "http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip" + // GCECloud is an implementation of Interface, TCPLoadBalancer and Instances for Google Compute Engine. type GCECloud struct { service *compute.Service @@ -65,7 +69,7 @@ type Config struct { } func init() { - cloudprovider.RegisterCloudProvider("gce", func(config io.Reader) (cloudprovider.Interface, error) { return newGCECloud(config) }) + cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { return newGCECloud(config) }) } func getMetadata(url string) (string, error) { @@ -180,6 +184,11 @@ func (gce *GCECloud) Clusters() (cloudprovider.Clusters, bool) { return gce, true } +// ProviderName returns the cloud provider ID. +func (gce *GCECloud) ProviderName() string { + return ProviderName +} + // TCPLoadBalancer returns an implementation of TCPLoadBalancer for Google Compute Engine. func (gce *GCECloud) TCPLoadBalancer() (cloudprovider.TCPLoadBalancer, bool) { return gce, true @@ -466,10 +475,10 @@ func (gce *GCECloud) getInstanceByName(name string) (*compute.Instance, error) { } // NodeAddresses is an implementation of Instances.NodeAddresses. -func (gce *GCECloud) NodeAddresses(instance string) ([]api.NodeAddress, error) { - externalIP, err := gce.getExternalIP(instance) +func (gce *GCECloud) NodeAddresses(_ string) ([]api.NodeAddress, error) { + externalIP, err := gce.metadataAccess(EXTERNAL_IP_METADATA_URL) if err != nil { - return nil, fmt.Errorf("couldn't get external IP for instance %s: %v", instance, err) + return nil, fmt.Errorf("couldn't get external IP: %v", err) } return []api.NodeAddress{ @@ -479,19 +488,7 @@ func (gce *GCECloud) NodeAddresses(instance string) ([]api.NodeAddress, error) { }, nil } -func (gce *GCECloud) getExternalIP(instance string) (string, error) { - inst, err := gce.getInstanceByName(instance) - if err != nil { - return "", err - } - ip := net.ParseIP(inst.NetworkInterfaces[0].AccessConfigs[0].NatIP) - if ip == nil { - return "", fmt.Errorf("invalid network IP: %s", inst.NetworkInterfaces[0].AccessConfigs[0].NatIP) - } - return ip.String(), nil -} - -// ExternalID returns the cloud provider ID of the specified instance. +// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (gce *GCECloud) ExternalID(instance string) (string, error) { inst, err := gce.getInstanceByName(instance) if err != nil { @@ -500,6 +497,11 @@ func (gce *GCECloud) ExternalID(instance string) (string, error) { return strconv.FormatUint(inst.Id, 10), nil } +// InstanceID returns the cloud provider ID of the specified instance. +func (gce *GCECloud) InstanceID(instance string) (string, error) { + return gce.projectID + "/" + gce.zone + "/" + canonicalizeInstanceName(instance), nil +} + // List is an implementation of Instances.List. func (gce *GCECloud) List(filter string) ([]string, error) { listCall := gce.service.Instances.List(gce.projectID, gce.zone) diff --git a/pkg/cloudprovider/mesos/mesos.go b/pkg/cloudprovider/mesos/mesos.go index 97cd5407d1c..cacc3b40b17 100644 --- a/pkg/cloudprovider/mesos/mesos.go +++ b/pkg/cloudprovider/mesos/mesos.go @@ -31,7 +31,7 @@ import ( ) var ( - PluginName = "mesos" + ProviderName = "mesos" CloudProvider *MesosCloud noHostNameSpecified = errors.New("No hostname specified") @@ -39,7 +39,7 @@ var ( func init() { cloudprovider.RegisterCloudProvider( - PluginName, + ProviderName, func(configReader io.Reader) (cloudprovider.Interface, error) { provider, err := newMesosCloud(configReader) if err == nil { @@ -110,6 +110,11 @@ func (c *MesosCloud) Routes() (cloudprovider.Routes, bool) { return nil, false } +// ProviderName returns the cloud provider ID. +func (c *MesosCloud) ProviderName() string { + return ProviderName +} + // ListClusters lists the names of the available Mesos clusters. func (c *MesosCloud) ListClusters() ([]string, error) { // Always returns a single cluster (this one!) @@ -161,7 +166,7 @@ func ipAddress(name string) (net.IP, error) { return ipaddr, nil } -// ExternalID returns the cloud provider ID of the specified instance. +// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (c *MesosCloud) ExternalID(instance string) (string, error) { ip, err := ipAddress(instance) if err != nil { @@ -170,6 +175,11 @@ func (c *MesosCloud) ExternalID(instance string) (string, error) { return ip.String(), nil } +// InstanceID returns the cloud provider ID of the specified instance. +func (c *MesosCloud) InstanceID(name string) (string, error) { + return "", nil +} + // List lists instances that match 'filter' which is a regular expression // which must match the entire instance name (fqdn). func (c *MesosCloud) List(filter string) ([]string, error) { diff --git a/pkg/cloudprovider/nodecontroller/nodecontroller.go b/pkg/cloudprovider/nodecontroller/nodecontroller.go index 6a0bb4f6d53..1bf7f6a015a 100644 --- a/pkg/cloudprovider/nodecontroller/nodecontroller.go +++ b/pkg/cloudprovider/nodecontroller/nodecontroller.go @@ -159,13 +159,13 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) { if node.Spec.PodCIDR == "" { podCIDR, found := availableCIDRs.PopAny() if !found { - glog.Errorf("No available CIDR for node %s", node.Name) + nc.recordNodeEvent(&node, "No available CIDR") continue } glog.V(4).Infof("Assigning node %s CIDR %s", node.Name, podCIDR) node.Spec.PodCIDR = podCIDR if _, err := nc.kubeClient.Nodes().Update(&node); err != nil { - glog.Errorf("Unable to assign node %s CIDR %s: %v", node.Name, podCIDR, err) + nc.recordNodeEvent(&node, "CIDR assignment failed") } } } @@ -425,7 +425,7 @@ func (nc *NodeController) deletePods(nodeID string) error { } for _, pod := range pods.Items { // Defensive check, also needed for tests. - if pod.Spec.Host != nodeID { + if pod.Spec.NodeName != nodeID { continue } glog.V(2).Infof("Delete pod %v", pod.Name) diff --git a/pkg/cloudprovider/nodecontroller/nodecontroller_test.go b/pkg/cloudprovider/nodecontroller/nodecontroller_test.go index b1aea3f9bd6..d6e03d33cb1 100644 --- a/pkg/cloudprovider/nodecontroller/nodecontroller_test.go +++ b/pkg/cloudprovider/nodecontroller/nodecontroller_test.go @@ -566,7 +566,7 @@ func newNode(name string) *api.Node { } func newPod(name, host string) *api.Pod { - return &api.Pod{ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.PodSpec{Host: host}} + return &api.Pod{ObjectMeta: api.ObjectMeta{Name: name}, Spec: api.PodSpec{NodeName: host}} } func sortedNodeNames(nodes []*api.Node) []string { diff --git a/pkg/cloudprovider/openstack/openstack.go b/pkg/cloudprovider/openstack/openstack.go index fb855bec152..53db708f1a1 100644 --- a/pkg/cloudprovider/openstack/openstack.go +++ b/pkg/cloudprovider/openstack/openstack.go @@ -42,6 +42,8 @@ import ( "github.com/golang/glog" ) +const ProviderName = "openstack" + var ErrNotFound = errors.New("Failed to find object") var ErrMultipleResults = errors.New("Multiple results where only one expected") var ErrNoAddressFound = errors.New("No address found for host") @@ -99,7 +101,7 @@ type Config struct { } func init() { - cloudprovider.RegisterCloudProvider("openstack", func(config io.Reader) (cloudprovider.Interface, error) { + cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { cfg, err := readConfig(config) if err != nil { return nil, err @@ -355,7 +357,7 @@ func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) { return addrs, nil } -// ExternalID returns the cloud provider ID of the specified instance. +// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (i *Instances) ExternalID(name string) (string, error) { srv, err := getServerByName(i.compute, name) if err != nil { @@ -364,6 +366,17 @@ func (i *Instances) ExternalID(name string) (string, error) { return srv.ID, nil } +// InstanceID returns the cloud provider ID of the specified instance. +func (i *Instances) InstanceID(name string) (string, error) { + srv, err := getServerByName(i.compute, name) + if err != nil { + return "", err + } + // In the future it is possible to also return an endpoint as: + // / + return "/" + srv.ID, nil +} + func (i *Instances) GetNodeResources(name string) (*api.NodeResources, error) { glog.V(4).Infof("GetNodeResources(%v) called", name) @@ -394,6 +407,11 @@ func (os *OpenStack) Clusters() (cloudprovider.Clusters, bool) { return nil, false } +// ProviderName returns the cloud provider ID. +func (os *OpenStack) ProviderName() string { + return ProviderName +} + type LoadBalancer struct { network *gophercloud.ServiceClient compute *gophercloud.ServiceClient diff --git a/pkg/cloudprovider/ovirt/ovirt.go b/pkg/cloudprovider/ovirt/ovirt.go index f4325a4f7d8..6c78202df1d 100644 --- a/pkg/cloudprovider/ovirt/ovirt.go +++ b/pkg/cloudprovider/ovirt/ovirt.go @@ -33,6 +33,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider" ) +const ProviderName = "ovirt" + type OVirtInstance struct { UUID string Name string @@ -75,7 +77,7 @@ type XmlVmsList struct { } func init() { - cloudprovider.RegisterCloudProvider("ovirt", + cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { return newOVirtCloud(config) }) @@ -115,6 +117,11 @@ func (aws *OVirtCloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false } +// ProviderName returns the cloud provider ID. +func (v *OVirtCloud) ProviderName() string { + return ProviderName +} + // TCPLoadBalancer returns an implementation of TCPLoadBalancer for oVirt cloud func (v *OVirtCloud) TCPLoadBalancer() (cloudprovider.TCPLoadBalancer, bool) { return nil, false @@ -160,7 +167,7 @@ func (v *OVirtCloud) NodeAddresses(name string) ([]api.NodeAddress, error) { return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: address.String()}}, nil } -// ExternalID returns the cloud provider ID of the specified instance. +// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (v *OVirtCloud) ExternalID(name string) (string, error) { instance, err := v.fetchInstance(name) if err != nil { @@ -169,6 +176,17 @@ func (v *OVirtCloud) ExternalID(name string) (string, error) { return instance.UUID, nil } +// InstanceID returns the cloud provider ID of the specified instance. +func (v *OVirtCloud) InstanceID(name string) (string, error) { + instance, err := v.fetchInstance(name) + if err != nil { + return "", err + } + // TODO: define a way to identify the provider instance to complete + // the format /. + return "/" + instance.UUID, err +} + func getInstancesFromXml(body io.Reader) (OVirtInstanceMap, error) { if body == nil { return nil, fmt.Errorf("ovirt rest-api response body is missing") diff --git a/pkg/cloudprovider/rackspace/rackspace.go b/pkg/cloudprovider/rackspace/rackspace.go index 72fce7bcc00..8df8230da65 100644 --- a/pkg/cloudprovider/rackspace/rackspace.go +++ b/pkg/cloudprovider/rackspace/rackspace.go @@ -38,6 +38,8 @@ import ( "github.com/golang/glog" ) +const ProviderName = "rackspace" + var ErrNotFound = errors.New("Failed to find object") var ErrMultipleResults = errors.New("Multiple results where only one expected") var ErrNoAddressFound = errors.New("No address found for host") @@ -89,7 +91,7 @@ type Config struct { } func init() { - cloudprovider.RegisterCloudProvider("rackspace", func(config io.Reader) (cloudprovider.Interface, error) { + cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { cfg, err := readConfig(config) if err != nil { return nil, err @@ -364,11 +366,16 @@ func (i *Instances) NodeAddresses(name string) ([]api.NodeAddress, error) { return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: net.ParseIP(ip).String()}}, nil } -// ExternalID returns the cloud provider ID of the specified instance. +// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (i *Instances) ExternalID(name string) (string, error) { return "", fmt.Errorf("unimplemented") } +// InstanceID returns the cloud provider ID of the specified instance. +func (i *Instances) InstanceID(name string) (string, error) { + return "", nil +} + func (i *Instances) GetNodeResources(name string) (*api.NodeResources, error) { glog.V(2).Infof("GetNodeResources(%v) called", name) @@ -399,6 +406,11 @@ func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) { return nil, false } +// ProviderName returns the cloud provider ID. +func (os *Rackspace) ProviderName() string { + return ProviderName +} + func (os *Rackspace) TCPLoadBalancer() (cloudprovider.TCPLoadBalancer, bool) { return nil, false } diff --git a/pkg/cloudprovider/servicecontroller/servicecontroller.go b/pkg/cloudprovider/servicecontroller/servicecontroller.go index d85ec368b04..0acdd93b136 100644 --- a/pkg/cloudprovider/servicecontroller/servicecontroller.go +++ b/pkg/cloudprovider/servicecontroller/servicecontroller.go @@ -27,6 +27,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" @@ -62,22 +63,30 @@ type serviceCache struct { } type ServiceController struct { - cloud cloudprovider.Interface - kubeClient client.Interface - clusterName string - balancer cloudprovider.TCPLoadBalancer - zone cloudprovider.Zone - cache *serviceCache + cloud cloudprovider.Interface + kubeClient client.Interface + clusterName string + balancer cloudprovider.TCPLoadBalancer + zone cloudprovider.Zone + cache *serviceCache + eventBroadcaster record.EventBroadcaster + eventRecorder record.EventRecorder } // New returns a new service controller to keep cloud provider service resources // (like external load balancers) in sync with the registry. func New(cloud cloudprovider.Interface, kubeClient client.Interface, clusterName string) *ServiceController { + broadcaster := record.NewBroadcaster() + broadcaster.StartRecordingToSink(kubeClient.Events("")) + recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"}) + return &ServiceController{ - cloud: cloud, - kubeClient: kubeClient, - clusterName: clusterName, - cache: &serviceCache{serviceMap: make(map[string]*cachedService)}, + cloud: cloud, + kubeClient: kubeClient, + clusterName: clusterName, + cache: &serviceCache{serviceMap: make(map[string]*cachedService)}, + eventBroadcaster: broadcaster, + eventRecorder: recorder, } } @@ -206,6 +215,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) { case cache.Sync: err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.service) if err != nil { + s.eventRecorder.Event(service, "creating loadbalancer failed", err.Error()) return err, retry } // Always update the cache upon success. @@ -217,6 +227,7 @@ func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) { case cache.Deleted: err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region) if err != nil { + s.eventRecorder.Event(service, "deleting loadbalancer failed", err.Error()) return err, retryable } s.cache.delete(namespacedName.String()) diff --git a/pkg/cloudprovider/vagrant/vagrant.go b/pkg/cloudprovider/vagrant/vagrant.go index fcbb1f04ca2..2bd18150d17 100644 --- a/pkg/cloudprovider/vagrant/vagrant.go +++ b/pkg/cloudprovider/vagrant/vagrant.go @@ -31,6 +31,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider" ) +const ProviderName = "vagrant" + // VagrantCloud is an implementation of Interface, TCPLoadBalancer and Instances for developer managed Vagrant cluster. type VagrantCloud struct { saltURL string @@ -40,7 +42,7 @@ type VagrantCloud struct { } func init() { - cloudprovider.RegisterCloudProvider("vagrant", func(config io.Reader) (cloudprovider.Interface, error) { return newVagrantCloud() }) + cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { return newVagrantCloud() }) } // SaltToken is an authorization token required by Salt REST API. @@ -84,6 +86,11 @@ func (v *VagrantCloud) Clusters() (cloudprovider.Clusters, bool) { return nil, false } +// ProviderName returns the cloud provider ID. +func (v *VagrantCloud) ProviderName() string { + return ProviderName +} + // TCPLoadBalancer returns an implementation of TCPLoadBalancer for Vagrant cloud. func (v *VagrantCloud) TCPLoadBalancer() (cloudprovider.TCPLoadBalancer, bool) { return nil, false @@ -135,7 +142,7 @@ func (v *VagrantCloud) NodeAddresses(instance string) ([]api.NodeAddress, error) return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: ip.String()}}, nil } -// ExternalID returns the cloud provider ID of the specified instance. +// ExternalID returns the cloud provider ID of the specified instance (deprecated). func (v *VagrantCloud) ExternalID(instance string) (string, error) { // Due to vagrant not running with a dedicated DNS setup, we return the IP address of a minion as its hostname at this time minion, err := v.getInstanceByAddress(instance) @@ -145,6 +152,15 @@ func (v *VagrantCloud) ExternalID(instance string) (string, error) { return minion.IP, nil } +// InstanceID returns the cloud provider ID of the specified instance. +func (v *VagrantCloud) InstanceID(instance string) (string, error) { + minion, err := v.getInstanceByAddress(instance) + if err != nil { + return "", err + } + return minion.IP, nil +} + // saltMinionsByRole filters a list of minions that have a matching role. func (v *VagrantCloud) saltMinionsByRole(minions []SaltMinion, role string) []SaltMinion { var filteredMinions []SaltMinion diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 5d24e25dc6e..53954232dcd 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -274,7 +274,7 @@ func (s activePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s activePods) Less(i, j int) bool { // Unassigned < assigned - if s[i].Spec.Host == "" && s[j].Spec.Host != "" { + if s[i].Spec.NodeName == "" && s[j].Spec.NodeName != "" { return true } // PodPending < PodUnknown < PodRunning diff --git a/pkg/controller/replication_controller_test.go b/pkg/controller/replication_controller_test.go index c71e52fe1e3..a2e960941d0 100644 --- a/pkg/controller/replication_controller_test.go +++ b/pkg/controller/replication_controller_test.go @@ -436,19 +436,19 @@ func TestSortingActivePods(t *testing.T) { pods[i] = &podList.Items[i] } // pods[0] is not scheduled yet. - pods[0].Spec.Host = "" + pods[0].Spec.NodeName = "" pods[0].Status.Phase = api.PodPending // pods[1] is scheduled but pending. - pods[1].Spec.Host = "bar" + pods[1].Spec.NodeName = "bar" pods[1].Status.Phase = api.PodPending // pods[2] is unknown. - pods[2].Spec.Host = "foo" + pods[2].Spec.NodeName = "foo" pods[2].Status.Phase = api.PodUnknown // pods[3] is running but not ready. - pods[3].Spec.Host = "foo" + pods[3].Spec.NodeName = "foo" pods[3].Status.Phase = api.PodRunning // pods[4] is running and ready. - pods[4].Spec.Host = "foo" + pods[4].Spec.NodeName = "foo" pods[4].Status.Phase = api.PodRunning pods[4].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}} diff --git a/pkg/credentialprovider/gcp/metadata.go b/pkg/credentialprovider/gcp/metadata.go index 3960688ba0b..ad24921dd95 100644 --- a/pkg/credentialprovider/gcp/metadata.go +++ b/pkg/credentialprovider/gcp/metadata.go @@ -37,7 +37,9 @@ const ( storageScopePrefix = "https://www.googleapis.com/auth/devstorage" ) -var containerRegistryUrls = []string{"container.cloud.google.com", "gcr.io"} +// For these urls, the parts of the host name can be glob, for example '*.gcr.io" will match +// "foo.gcr.io" and "bar.gcr.io". +var containerRegistryUrls = []string{"container.cloud.google.com", "gcr.io", "*.gcr.io"} var metadataHeader = &http.Header{ "Metadata-Flavor": []string{"Google"}, diff --git a/pkg/credentialprovider/keyring.go b/pkg/credentialprovider/keyring.go index 334cfadb1a9..bab19a62cfa 100644 --- a/pkg/credentialprovider/keyring.go +++ b/pkg/credentialprovider/keyring.go @@ -18,7 +18,9 @@ package credentialprovider import ( "encoding/json" + "net" "net/url" + "path/filepath" "sort" "strings" @@ -118,6 +120,79 @@ func isDefaultRegistryMatch(image string) bool { return !strings.ContainsAny(parts[0], ".:") } +// url.Parse require a scheme, but ours don't have schemes. Adding a +// scheme to make url.Parse happy, then clear out the resulting scheme. +func parseSchemelessUrl(schemelessUrl string) (*url.URL, error) { + parsed, err := url.Parse("https://" + schemelessUrl) + if err != nil { + return nil, err + } + // clear out the resulting scheme + parsed.Scheme = "" + return parsed, nil +} + +// split the host name into parts, as well as the port +func splitUrl(url *url.URL) (parts []string, port string) { + host, port, err := net.SplitHostPort(url.Host) + if err != nil { + // could not parse port + host, port = url.Host, "" + } + return strings.Split(host, "."), port +} + +// overloaded version of urlsMatch, operating on strings instead of URLs. +func urlsMatchStr(glob string, target string) (bool, error) { + globUrl, err := parseSchemelessUrl(glob) + if err != nil { + return false, err + } + targetUrl, err := parseSchemelessUrl(target) + if err != nil { + return false, err + } + return urlsMatch(globUrl, targetUrl) +} + +// check whether the given target url matches the glob url, which may have +// glob wild cards in the host name. +// +// Examples: +// globUrl=*.docker.io, targetUrl=blah.docker.io => match +// globUrl=*.docker.io, targetUrl=not.right.io => no match +// +// Note that we don't support wildcards in ports and paths yet. +func urlsMatch(globUrl *url.URL, targetUrl *url.URL) (bool, error) { + globUrlParts, globPort := splitUrl(globUrl) + targetUrlParts, targetPort := splitUrl(targetUrl) + if globPort != targetPort { + // port doesn't match + return false, nil + } + if len(globUrlParts) != len(targetUrlParts) { + // host name does not have the same number of parts + return false, nil + } + if !strings.HasPrefix(targetUrl.Path, globUrl.Path) { + // the path of the credential must be a prefix + return false, nil + } + for k, globUrlPart := range globUrlParts { + targetUrlPart := targetUrlParts[k] + matched, err := filepath.Match(globUrlPart, targetUrlPart) + if err != nil { + return false, err + } + if !matched { + // glob mismatch for some part + return false, nil + } + } + // everything matches + return true, nil +} + // Lookup implements the DockerKeyring method for fetching credentials based on image name. // Multiple credentials may be returned if there are multiple potentially valid credentials // available. This allows for rotation. @@ -125,9 +200,9 @@ func (dk *BasicDockerKeyring) Lookup(image string) ([]docker.AuthConfiguration, // range over the index as iterating over a map does not provide a predictable ordering ret := []docker.AuthConfiguration{} for _, k := range dk.index { - // NOTE: prefix is a sufficient check because while scheme is allowed, - // it is stripped as part of 'Add' - if !strings.HasPrefix(image, k) { + // both k and image are schemeless URLs because even though schemes are allowed + // in the credential configurations, we remove them in Add. + if matched, _ := urlsMatchStr(k, image); !matched { continue } diff --git a/pkg/credentialprovider/keyring_test.go b/pkg/credentialprovider/keyring_test.go index 77bb78b4d69..412d283c91a 100644 --- a/pkg/credentialprovider/keyring_test.go +++ b/pkg/credentialprovider/keyring_test.go @@ -22,71 +22,246 @@ import ( "testing" ) -func TestDockerKeyringFromBytes(t *testing.T) { - url := "hello.kubernetes.io" - email := "foo@bar.baz" - username := "foo" - password := "bar" - auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) - sampleDockerConfig := fmt.Sprintf(`{ +func TestUrlsMatch(t *testing.T) { + tests := []struct { + globUrl string + targetUrl string + matchExpected bool + }{ + // match when there is no path component + { + globUrl: "*.kubernetes.io", + targetUrl: "prefix.kubernetes.io", + matchExpected: true, + }, + { + globUrl: "prefix.*.io", + targetUrl: "prefix.kubernetes.io", + matchExpected: true, + }, + { + globUrl: "prefix.kubernetes.*", + targetUrl: "prefix.kubernetes.io", + matchExpected: true, + }, + { + globUrl: "*-good.kubernetes.io", + targetUrl: "prefix-good.kubernetes.io", + matchExpected: true, + }, + // match with path components + { + globUrl: "*.kubernetes.io/blah", + targetUrl: "prefix.kubernetes.io/blah", + matchExpected: true, + }, + { + globUrl: "prefix.*.io/foo", + targetUrl: "prefix.kubernetes.io/foo/bar", + matchExpected: true, + }, + // match with path components and ports + { + globUrl: "*.kubernetes.io:1111/blah", + targetUrl: "prefix.kubernetes.io:1111/blah", + matchExpected: true, + }, + { + globUrl: "prefix.*.io:1111/foo", + targetUrl: "prefix.kubernetes.io:1111/foo/bar", + matchExpected: true, + }, + // no match when number of parts mismatch + { + globUrl: "*.kubernetes.io", + targetUrl: "kubernetes.io", + matchExpected: false, + }, + { + globUrl: "*.*.kubernetes.io", + targetUrl: "prefix.kubernetes.io", + matchExpected: false, + }, + { + globUrl: "*.*.kubernetes.io", + targetUrl: "kubernetes.io", + matchExpected: false, + }, + // no match when some parts mismatch + { + globUrl: "kubernetes.io", + targetUrl: "kubernetes.com", + matchExpected: false, + }, + { + globUrl: "k*.io", + targetUrl: "quay.io", + matchExpected: false, + }, + // no match when ports mismatch + { + globUrl: "*.kubernetes.io:1234/blah", + targetUrl: "prefix.kubernetes.io:1111/blah", + matchExpected: false, + }, + { + globUrl: "prefix.*.io/foo", + targetUrl: "prefix.kubernetes.io:1111/foo/bar", + matchExpected: false, + }, + } + for _, test := range tests { + matched, _ := urlsMatchStr(test.globUrl, test.targetUrl) + if matched != test.matchExpected { + t.Errorf("Expected match result of %s and %s to be %t, but was %t", + test.globUrl, test.targetUrl, test.matchExpected, matched) + } + } +} + +func TestDockerKeyringForGlob(t *testing.T) { + tests := []struct { + globUrl string + targetUrl string + }{ + { + globUrl: "hello.kubernetes.io", + targetUrl: "hello.kubernetes.io", + }, + { + globUrl: "*.docker.io", + targetUrl: "prefix.docker.io", + }, + { + globUrl: "prefix.*.io", + targetUrl: "prefix.docker.io", + }, + { + globUrl: "prefix.docker.*", + targetUrl: "prefix.docker.io", + }, + { + globUrl: "*.docker.io/path", + targetUrl: "prefix.docker.io/path", + }, + { + globUrl: "prefix.*.io/path", + targetUrl: "prefix.docker.io/path/subpath", + }, + { + globUrl: "prefix.docker.*/path", + targetUrl: "prefix.docker.io/path", + }, + { + globUrl: "*.docker.io:8888", + targetUrl: "prefix.docker.io:8888", + }, + { + globUrl: "prefix.*.io:8888", + targetUrl: "prefix.docker.io:8888", + }, + { + globUrl: "prefix.docker.*:8888", + targetUrl: "prefix.docker.io:8888", + }, + { + globUrl: "*.docker.io/path:1111", + targetUrl: "prefix.docker.io/path:1111", + }, + { + globUrl: "prefix.*.io/path:1111", + targetUrl: "prefix.docker.io/path/subpath:1111", + }, + { + globUrl: "prefix.docker.*/path:1111", + targetUrl: "prefix.docker.io/path:1111", + }, + } + for _, test := range tests { + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ "https://%s": { "email": %q, "auth": %q } -}`, url, email, auth) +}`, test.globUrl, email, auth) - keyring := &BasicDockerKeyring{} - if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { - t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) - } else { - keyring.Add(cfg) - } + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } - creds, ok := keyring.Lookup(url + "/foo/bar") - if !ok { - t.Errorf("Didn't find expected URL: %s", url) - return - } - if len(creds) > 1 { - t.Errorf("Got more hits than expected: %s", creds) - } - val := creds[0] + creds, ok := keyring.Lookup(test.targetUrl + "/foo/bar") + if !ok { + t.Errorf("Didn't find expected URL: %s", test.targetUrl) + return + } + val := creds[0] - if username != val.Username { - t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) - } - if password != val.Password { - t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) - } - if email != val.Email { - t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + if username != val.Username { + t.Errorf("Unexpected username value, want: %s, got: %s", username, val.Username) + } + if password != val.Password { + t.Errorf("Unexpected password value, want: %s, got: %s", password, val.Password) + } + if email != val.Email { + t.Errorf("Unexpected email value, want: %s, got: %s", email, val.Email) + } } } func TestKeyringMiss(t *testing.T) { - url := "hello.kubernetes.io" - email := "foo@bar.baz" - username := "foo" - password := "bar" - auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) - sampleDockerConfig := fmt.Sprintf(`{ + tests := []struct { + globUrl string + lookupUrl string + }{ + { + globUrl: "hello.kubernetes.io", + lookupUrl: "world.mesos.org/foo/bar", + }, + { + globUrl: "*.docker.com", + lookupUrl: "prefix.docker.io", + }, + { + globUrl: "suffix.*.io", + lookupUrl: "prefix.docker.io", + }, + { + globUrl: "prefix.docker.c*", + lookupUrl: "prefix.docker.io", + }, + } + for _, test := range tests { + email := "foo@bar.baz" + username := "foo" + password := "bar" + auth := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", username, password))) + sampleDockerConfig := fmt.Sprintf(`{ "https://%s": { "email": %q, "auth": %q } -}`, url, email, auth) +}`, test.globUrl, email, auth) - keyring := &BasicDockerKeyring{} - if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { - t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) - } else { - keyring.Add(cfg) + keyring := &BasicDockerKeyring{} + if cfg, err := readDockerConfigFileFromBytes([]byte(sampleDockerConfig)); err != nil { + t.Errorf("Error processing json blob %q, %v", sampleDockerConfig, err) + } else { + keyring.Add(cfg) + } + + _, ok := keyring.Lookup(test.lookupUrl + "/foo/bar") + if ok { + t.Errorf("Expected not to find URL %s, but found", test.lookupUrl) + } } - val, ok := keyring.Lookup("world.mesos.org/foo/bar") - if ok { - t.Errorf("Found unexpected credential: %+v", val) - } } func TestKeyringMissWithDockerHubCredentials(t *testing.T) { diff --git a/pkg/kubectl/cmd/expose.go b/pkg/kubectl/cmd/expose.go index fe9ead8550f..50e9e544112 100644 --- a/pkg/kubectl/cmd/expose.go +++ b/pkg/kubectl/cmd/expose.go @@ -98,6 +98,9 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str if err != nil { return err } + if len(infos) > 1 { + return fmt.Errorf("multiple resources provided: %v", args) + } info := infos[0] // Get the input object @@ -118,10 +121,7 @@ func RunExpose(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str } names := generator.ParamNames() params := kubectl.MakeParams(cmd, names) - params["name"] = cmdutil.GetFlagString(cmd, "name") - if len(params["name"]) == 0 { - params["name"] = info.Name - } + params["default-name"] = info.Name if s, found := params["selector"]; !found || len(s) == 0 || cmdutil.GetFlagInt(cmd, "port") < 1 { if len(s) == 0 { s, err := f.PodSelectorForObject(inputObject) diff --git a/pkg/kubectl/cmd/expose_test.go b/pkg/kubectl/cmd/expose_test.go index 94669880769..f1189fc6127 100644 --- a/pkg/kubectl/cmd/expose_test.go +++ b/pkg/kubectl/cmd/expose_test.go @@ -29,16 +29,24 @@ import ( func TestRunExposeService(t *testing.T) { tests := []struct { - name string - args []string - input runtime.Object - flags map[string]string - output runtime.Object - status int + name string + args []string + ns string + calls map[string]string + input runtime.Object + flags map[string]string + output runtime.Object + expected string + status int }{ { name: "expose-service-from-service", args: []string{"service", "baz"}, + ns: "test", + calls: map[string]string{ + "GET": "/namespaces/test/services/baz", + "POST": "/namespaces/test/services", + }, input: &api.Service{ ObjectMeta: api.ObjectMeta{Name: "baz", Namespace: "test", ResourceVersion: "12"}, TypeMeta: api.TypeMeta{Kind: "Service", APIVersion: "v1beta3"}, @@ -61,7 +69,42 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"func": "stream"}, }, }, - status: 200, + expected: "services/foo", + status: 200, + }, + { + name: "no-name-passed-from-the-cli", + args: []string{"service", "mayor"}, + ns: "default", + calls: map[string]string{ + "GET": "/namespaces/default/services/mayor", + "POST": "/namespaces/default/services", + }, + input: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "mayor", Namespace: "default", ResourceVersion: "12"}, + TypeMeta: api.TypeMeta{Kind: "Service", APIVersion: "v1beta3"}, + Spec: api.ServiceSpec{ + Selector: map[string]string{"run": "this"}, + }, + }, + // No --name flag specified below. Service will use the rc's name passed via the 'default-name' parameter + flags: map[string]string{"selector": "run=this", "port": "80", "labels": "runas=amayor"}, + output: &api.Service{ + ObjectMeta: api.ObjectMeta{Name: "mayor", Namespace: "default", ResourceVersion: "12", Labels: map[string]string{"runas": "amayor"}}, + TypeMeta: api.TypeMeta{Kind: "Service", APIVersion: "v1beta3"}, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{ + { + Name: "default", + Protocol: api.Protocol("TCP"), + Port: 80, + }, + }, + Selector: map[string]string{"run": "this"}, + }, + }, + expected: "services/mayor", + status: 200, }, } @@ -72,9 +115,9 @@ func TestRunExposeService(t *testing.T) { Codec: codec, Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { - case p == "/namespaces/test/services/baz" && m == "GET": + case p == test.calls[m] && m == "GET": return &http.Response{StatusCode: test.status, Body: objBody(codec, test.input)}, nil - case p == "/namespaces/test/services" && m == "POST": + case p == test.calls[m] && m == "POST": return &http.Response{StatusCode: test.status, Body: objBody(codec, test.output)}, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) @@ -82,7 +125,7 @@ func TestRunExposeService(t *testing.T) { } }), } - tf.Namespace = "test" + tf.Namespace = test.ns buf := bytes.NewBuffer([]byte{}) cmd := NewCmdExposeService(f, buf) @@ -93,7 +136,7 @@ func TestRunExposeService(t *testing.T) { cmd.Run(cmd, test.args) out := buf.String() - if strings.Contains(out, "services/foo") { + if strings.Contains(out, test.expected) { t.Errorf("%s: unexpected output: %s", test.name, out) } } diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go index d09a90c6abd..4ef1d034446 100644 --- a/pkg/kubectl/cmd/get.go +++ b/pkg/kubectl/cmd/get.go @@ -32,7 +32,9 @@ const ( get_long = `Display one or many resources. Possible resources include pods (po), replication controllers (rc), services -(svc), nodes, events (ev), or component statuses (cs). +(svc), nodes, events (ev), component statuses (cs), limit ranges (limits), +minions (mi), persistent volumes (pv), persistent volume claims (pvc) +or resource quotas (quota). By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s).` diff --git a/pkg/kubectl/cmd/label.go b/pkg/kubectl/cmd/label.go index 44648c39354..495607a10e2 100644 --- a/pkg/kubectl/cmd/label.go +++ b/pkg/kubectl/cmd/label.go @@ -25,12 +25,14 @@ import ( cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/spf13/cobra" ) const ( label_long = `Update the labels on a resource. +A label must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters. If --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error. If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.` label_example = `// Update pod 'foo' with the label 'unhealthy' and the value 'true'. @@ -54,7 +56,7 @@ func NewCmdLabel(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd := &cobra.Command{ Use: "label [--overwrite] RESOURCE NAME KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", Short: "Update the labels on a resource", - Long: label_long, + Long: fmt.Sprintf(label_long, util.LabelValueMaxLength), Example: label_example, Run: func(cmd *cobra.Command, args []string) { err := RunLabel(f, out, cmd, args) @@ -103,7 +105,7 @@ func parseLabels(spec []string) (map[string]string, []string, error) { for _, labelSpec := range spec { if strings.Index(labelSpec, "=") != -1 { parts := strings.Split(labelSpec, "=") - if len(parts) != 2 { + if len(parts) != 2 || len(parts[1]) == 0 || !util.IsValidLabelValue(parts[1]) { return nil, nil, fmt.Errorf("invalid label spec: %v", labelSpec) } labels[parts[0]] = parts[1] @@ -185,7 +187,7 @@ func RunLabel(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri labels, remove, err := parseLabels(labelArgs) if err != nil { - return err + return cmdutil.UsageError(cmd, err.Error()) } mapper, typer := f.Object() diff --git a/pkg/kubectl/cmd/label_test.go b/pkg/kubectl/cmd/label_test.go index b59d91a8e9c..eb72e169237 100644 --- a/pkg/kubectl/cmd/label_test.go +++ b/pkg/kubectl/cmd/label_test.go @@ -125,6 +125,14 @@ func TestParseLabels(t *testing.T) { labels: []string{"a=b", "c=d", "a-"}, expectErr: true, }, + { + labels: []string{"a="}, + expectErr: true, + }, + { + labels: []string{"a=%^$"}, + expectErr: true, + }, } for _, test := range tests { labels, remove, err := parseLabels(test.labels) diff --git a/pkg/kubectl/cmd/proxy.go b/pkg/kubectl/cmd/proxy.go index 65badee7da7..234e0f5332b 100644 --- a/pkg/kubectl/cmd/proxy.go +++ b/pkg/kubectl/cmd/proxy.go @@ -32,8 +32,8 @@ const ( $ kubectl proxy --port=8011 --www=./local/www/ // Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api -// This makes e.g. the pods api available at localhost:8011/k8s-api/v1beta1/pods/ -$ kubectl proxy --api-prefix=k8s-api` +// This makes e.g. the pods api available at localhost:8011/k8s-api/v1beta3/pods/ +$ kubectl proxy --api-prefix=/k8s-api` ) func NewCmdProxy(f *cmdutil.Factory, out io.Writer) *cobra.Command { diff --git a/pkg/kubectl/cmd/util/helpers_test.go b/pkg/kubectl/cmd/util/helpers_test.go index d9359eace52..5f3f14b6e63 100644 --- a/pkg/kubectl/cmd/util/helpers_test.go +++ b/pkg/kubectl/cmd/util/helpers_test.go @@ -67,7 +67,7 @@ func TestMerge(t *testing.T) { Name: "baz", }, Spec: api.PodSpec{ - Host: "bar", + NodeName: "bar", RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, }, diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 30c54e1a378..a3947047e59 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -269,7 +269,7 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even return tabbedString(func(out io.Writer) error { fmt.Fprintf(out, "Name:\t%s\n", pod.Name) fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&pod.Spec)) - fmt.Fprintf(out, "Host:\t%s\n", pod.Spec.Host+"/"+pod.Status.HostIP) + fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP) fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(pod.Labels)) fmt.Fprintf(out, "Status:\t%s\n", string(pod.Status.Phase)) fmt.Fprintf(out, "Replication Controllers:\t%s\n", printReplicationControllersByLabels(rcs)) @@ -349,20 +349,20 @@ func describeContainers(containers []api.ContainerStatus, out io.Writer) { if container.State.Waiting.Reason != "" { fmt.Fprintf(out, " Reason:\t%s\n", container.State.Waiting.Reason) } - case container.State.Termination != nil: + case container.State.Terminated != nil: fmt.Fprintf(out, " State:\tTerminated\n") - if container.State.Termination.Reason != "" { - fmt.Fprintf(out, " Reason:\t%s\n", container.State.Termination.Reason) + if container.State.Terminated.Reason != "" { + fmt.Fprintf(out, " Reason:\t%s\n", container.State.Terminated.Reason) } - if container.State.Termination.Message != "" { - fmt.Fprintf(out, " Message:\t%s\n", container.State.Termination.Message) + if container.State.Terminated.Message != "" { + fmt.Fprintf(out, " Message:\t%s\n", container.State.Terminated.Message) } - fmt.Fprintf(out, " Exit Code:\t%d\n", container.State.Termination.ExitCode) - if container.State.Termination.Signal > 0 { - fmt.Fprintf(out, " Signal:\t%d\n", container.State.Termination.Signal) + fmt.Fprintf(out, " Exit Code:\t%d\n", container.State.Terminated.ExitCode) + if container.State.Terminated.Signal > 0 { + fmt.Fprintf(out, " Signal:\t%d\n", container.State.Terminated.Signal) } - fmt.Fprintf(out, " Started:\t%s\n", container.State.Termination.StartedAt.Time.Format(time.RFC1123Z)) - fmt.Fprintf(out, " Finished:\t%s\n", container.State.Termination.FinishedAt.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, " Started:\t%s\n", container.State.Terminated.StartedAt.Time.Format(time.RFC1123Z)) + fmt.Fprintf(out, " Finished:\t%s\n", container.State.Terminated.FinishedAt.Time.Format(time.RFC1123Z)) default: fmt.Fprintf(out, " State:\tWaiting\n") } @@ -506,7 +506,7 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(service.Labels)) fmt.Fprintf(out, "Selector:\t%s\n", formatLabels(service.Spec.Selector)) fmt.Fprintf(out, "Type:\t%s\n", service.Spec.Type) - fmt.Fprintf(out, "IP:\t%s\n", service.Spec.PortalIP) + fmt.Fprintf(out, "IP:\t%s\n", service.Spec.ClusterIP) if len(service.Status.LoadBalancer.Ingress) > 0 { list := buildIngressString(service.Status.LoadBalancer.Ingress) fmt.Fprintf(out, "LoadBalancer Ingress:\t%s\n", list) @@ -520,9 +520,9 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api } fmt.Fprintf(out, "Port:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol) if sp.NodePort != 0 { - fmt.Fprintf(out, "NodePort:\t%s\t%d/%s\n", name, sp.Port, sp.Protocol) + fmt.Fprintf(out, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) } - fmt.Fprintf(out, "Endpoints:\t%s\t%s\n", name, formatEndpoints(endpoints, util.NewStringSet(sp.Name))) + fmt.Fprintf(out, "Endpoints:\t%s\n", formatEndpoints(endpoints, util.NewStringSet(sp.Name))) } fmt.Fprintf(out, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) if events != nil { @@ -612,7 +612,7 @@ func (d *NodeDescriber) Describe(namespace, name string) (string, error) { } for i := range allPods.Items { pod := &allPods.Items[i] - if pod.Spec.Host != name { + if pod.Spec.NodeName != name { continue } pods = append(pods, pod) diff --git a/pkg/kubectl/describe_test.go b/pkg/kubectl/describe_test.go index a36b4bb9dfc..d7481d6a0b1 100644 --- a/pkg/kubectl/describe_test.go +++ b/pkg/kubectl/describe_test.go @@ -157,7 +157,7 @@ func TestDescribeContainers(t *testing.T) { input: api.ContainerStatus{ Name: "test", State: api.ContainerState{ - Termination: &api.ContainerStateTerminated{ + Terminated: &api.ContainerStateTerminated{ StartedAt: util.NewTime(time.Now()), FinishedAt: util.NewTime(time.Now()), Reason: "potato", diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index 30da9dab6a1..90ad20f3ef8 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -317,7 +317,7 @@ func formatEndpoints(endpoints *api.Endpoints, ports util.StringSet) string { list := []string{} max := 3 more := false -Loop: + count := 0 for i := range endpoints.Subsets { ss := &endpoints.Subsets[i] for i := range ss.Ports { @@ -326,17 +326,19 @@ Loop: for i := range ss.Addresses { if len(list) == max { more = true - break Loop } addr := &ss.Addresses[i] - list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port)) + if !more { + list = append(list, fmt.Sprintf("%s:%d", addr.IP, port.Port)) + } + count++ } } } } ret := strings.Join(list, ",") if more { - ret += "..." + return fmt.Sprintf("%s + %d more...", ret, count-max) } return ret } @@ -379,7 +381,7 @@ func interpretContainerStatus(status *api.ContainerStatus) (string, string, stri } else if state.Running != nil { // Get the information of the last termination state. This is useful if // a container is stuck in a crash loop. - message := getTermMsg(status.LastTerminationState.Termination) + message := getTermMsg(status.LastTerminationState.Terminated) if message != "" { message = "last termination: " + message } @@ -388,8 +390,8 @@ func interpretContainerStatus(status *api.ContainerStatus) (string, string, stri stateMsg = stateMsg + " *not ready*" } return stateMsg, translateTimestamp(state.Running.StartedAt), message, nil - } else if state.Termination != nil { - return "Terminated", translateTimestamp(state.Termination.StartedAt), getTermMsg(state.Termination), nil + } else if state.Terminated != nil { + return "Terminated", translateTimestamp(state.Terminated.StartedAt), getTermMsg(state.Terminated), nil } return "", "", "", fmt.Errorf("unknown container state %#v", *state) } @@ -406,7 +408,7 @@ func printPod(pod *api.Pod, w io.Writer, withNamespace bool) error { name, pod.Status.PodIP, "", "", - podHostString(pod.Spec.Host, pod.Status.HostIP), + podHostString(pod.Spec.NodeName, pod.Status.HostIP), formatLabels(pod.Labels), pod.Status.Phase, translateTimestamp(pod.CreationTimestamp), @@ -551,7 +553,7 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool) error { name = svc.Name } - ips := []string{svc.Spec.PortalIP} + ips := []string{svc.Spec.ClusterIP} ingress := svc.Status.LoadBalancer.Ingress for i := range ingress { diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index 8e518a58e42..df6e41d3e27 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -645,7 +645,7 @@ func TestPrintHumanReadableService(t *testing.T) { tests := []api.Service{ { Spec: api.ServiceSpec{ - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ { Port: 80, @@ -668,7 +668,7 @@ func TestPrintHumanReadableService(t *testing.T) { }, { Spec: api.ServiceSpec{ - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ { Port: 80, @@ -687,7 +687,7 @@ func TestPrintHumanReadableService(t *testing.T) { }, { Spec: api.ServiceSpec{ - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ { Port: 80, @@ -715,7 +715,7 @@ func TestPrintHumanReadableService(t *testing.T) { }, { Spec: api.ServiceSpec{ - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ { Port: 80, @@ -754,9 +754,9 @@ func TestPrintHumanReadableService(t *testing.T) { buff := bytes.Buffer{} printService(&svc, &buff, false) output := string(buff.Bytes()) - ip := svc.Spec.PortalIP + ip := svc.Spec.ClusterIP if !strings.Contains(output, ip) { - t.Errorf("expected to contain portal ip %s, but doesn't: %s", ip, output) + t.Errorf("expected to contain ClusterIP %s, but doesn't: %s", ip, output) } for _, ingress := range svc.Status.LoadBalancer.Ingress { @@ -772,7 +772,7 @@ func TestPrintHumanReadableService(t *testing.T) { t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output) } } - // Max of # ports and (# public ip + portal ip) + // Max of # ports and (# public ip + cluster ip) count := len(svc.Spec.Ports) if len(svc.Status.LoadBalancer.Ingress)+1 > count { count = len(svc.Status.LoadBalancer.Ingress) + 1 @@ -838,7 +838,7 @@ func TestInterpretContainerStatus(t *testing.T) { { status: &api.ContainerStatus{ State: api.ContainerState{ - Termination: &api.ContainerStateTerminated{ + Terminated: &api.ContainerStateTerminated{ ExitCode: 3, }, }, @@ -851,7 +851,7 @@ func TestInterpretContainerStatus(t *testing.T) { { status: &api.ContainerStatus{ State: api.ContainerState{ - Termination: &api.ContainerStateTerminated{ + Terminated: &api.ContainerStateTerminated{ ExitCode: 5, Reason: "test reason", }, @@ -932,7 +932,7 @@ func TestPrintHumanReadableWithNamespace(t *testing.T) { obj: &api.Service{ ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespaceName}, Spec: api.ServiceSpec{ - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ { Port: 80, diff --git a/pkg/kubectl/run.go b/pkg/kubectl/run.go index 3d4906e82fd..e02058221bf 100644 --- a/pkg/kubectl/run.go +++ b/pkg/kubectl/run.go @@ -29,7 +29,8 @@ type BasicReplicationController struct{} func (BasicReplicationController) ParamNames() []GeneratorParam { return []GeneratorParam{ {"labels", false}, - {"name", true}, + {"default-name", true}, + {"name", false}, {"replicas", true}, {"image", true}, {"port", false}, @@ -38,6 +39,13 @@ func (BasicReplicationController) ParamNames() []GeneratorParam { } func (BasicReplicationController) Generate(params map[string]string) (runtime.Object, error) { + name, found := params["name"] + if !found || len(name) == 0 { + name, found = params["default-name"] + if !found || len(name) == 0 { + return nil, fmt.Errorf("'name' is a required parameter.") + } + } // TODO: extract this flag to a central location. labelString, found := params["labels"] var labels map[string]string @@ -49,7 +57,7 @@ func (BasicReplicationController) Generate(params map[string]string) (runtime.Ob } } else { labels = map[string]string{ - "run": params["name"], + "run": name, } } count, err := strconv.Atoi(params["replicas"]) @@ -58,7 +66,7 @@ func (BasicReplicationController) Generate(params map[string]string) (runtime.Ob } controller := api.ReplicationController{ ObjectMeta: api.ObjectMeta{ - Name: params["name"], + Name: name, Labels: labels, }, Spec: api.ReplicationControllerSpec{ @@ -71,7 +79,7 @@ func (BasicReplicationController) Generate(params map[string]string) (runtime.Ob Spec: api.PodSpec{ Containers: []api.Container{ { - Name: params["name"], + Name: name, Image: params["image"], }, }, diff --git a/pkg/kubectl/service.go b/pkg/kubectl/service.go index 8c631b2751a..5f41a0da013 100644 --- a/pkg/kubectl/service.go +++ b/pkg/kubectl/service.go @@ -29,7 +29,8 @@ type ServiceGenerator struct{} func (ServiceGenerator) ParamNames() []GeneratorParam { return []GeneratorParam{ - {"name", true}, + {"default-name", true}, + {"name", false}, {"selector", true}, {"port", true}, {"labels", false}, @@ -62,8 +63,11 @@ func (ServiceGenerator) Generate(params map[string]string) (runtime.Object, erro } name, found := params["name"] - if !found { - return nil, fmt.Errorf("'name' is a required parameter.") + if !found || len(name) == 0 { + name, found = params["default-name"] + if !found || len(name) == 0 { + return nil, fmt.Errorf("'name' is a required parameter.") + } } portString, found := params["port"] if !found { diff --git a/pkg/kubelet/config/common.go b/pkg/kubelet/config/common.go index cbfc85207a7..936944af44c 100644 --- a/pkg/kubelet/config/common.go +++ b/pkg/kubelet/config/common.go @@ -68,7 +68,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, hostname string) er glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source) // Set the Host field to indicate this pod is scheduled on the current node. - pod.Spec.Host = hostname + pod.Spec.NodeName = hostname pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace) return nil diff --git a/pkg/kubelet/config/file_test.go b/pkg/kubelet/config/file_test.go index f649fdcacbc..901866e1c57 100644 --- a/pkg/kubelet/config/file_test.go +++ b/pkg/kubelet/config/file_test.go @@ -99,7 +99,7 @@ func TestReadContainerManifestFromFile(t *testing.T) { SelfLink: getSelfLink("test-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -126,7 +126,7 @@ func TestReadContainerManifestFromFile(t *testing.T) { SelfLink: getSelfLink("12345-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -196,7 +196,7 @@ func TestReadPodsFromFile(t *testing.T) { SelfLink: getSelfLink("test-"+hostname, "mynamespace"), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -231,7 +231,7 @@ func TestReadPodsFromFile(t *testing.T) { SelfLink: getSelfLink("12345-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -371,7 +371,7 @@ func exampleManifestAndPod(id string) (v1beta1.ContainerManifest, *api.Pod) { SelfLink: getSelfLink(id+"-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, Containers: []api.Container{ { Name: "c" + id, diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index 789dd17b9dc..a724167f1ea 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -145,7 +145,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { SelfLink: getSelfLink("foo-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -172,7 +172,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { SelfLink: getSelfLink("111-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -199,7 +199,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { SelfLink: getSelfLink("foo-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -230,7 +230,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { SelfLink: getSelfLink("foo-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -250,7 +250,7 @@ func TestExtractManifestFromHTTP(t *testing.T) { SelfLink: getSelfLink("bar-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -328,7 +328,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { Namespace: "mynamespace", }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, }, }, @@ -343,7 +343,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { SelfLink: getSelfLink("foo-"+hostname, "mynamespace"), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -369,7 +369,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { UID: "111", }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, }, }, @@ -379,7 +379,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { UID: "222", }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, Containers: []api.Container{{Name: "2", Image: "bar", ImagePullPolicy: ""}}, }, }, @@ -396,7 +396,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { SelfLink: getSelfLink("foo-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ @@ -416,7 +416,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { SelfLink: getSelfLink("bar-"+hostname, kubelet.NamespaceDefault), }, Spec: api.PodSpec{ - Host: hostname, + NodeName: hostname, RestartPolicy: api.RestartPolicyAlways, DNSPolicy: api.DNSClusterFirst, Containers: []api.Container{{ diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index d6272e039f8..647f7898160 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -57,7 +57,7 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu // Get all dead container status. var resultStatus []*api.ContainerStatus for i, containerStatus := range podStatus.ContainerStatuses { - if containerStatus.Name == container.Name && containerStatus.State.Termination != nil { + if containerStatus.Name == container.Name && containerStatus.State.Terminated != nil { resultStatus = append(resultStatus, &podStatus.ContainerStatuses[i]) } } @@ -76,7 +76,7 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure { // Check the exit code of last run. Note: This assumes the result is sorted // by the created time in reverse order. - if resultStatus[0].State.Termination.ExitCode == 0 { + if resultStatus[0].State.Terminated.ExitCode == 0 { glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, podFullName) return false } diff --git a/pkg/kubelet/container_manager.go b/pkg/kubelet/container_manager.go index 84b20de18da..188646c45d3 100644 --- a/pkg/kubelet/container_manager.go +++ b/pkg/kubelet/container_manager.go @@ -20,5 +20,6 @@ package kubelet type containerManager interface { // Runs the container manager's housekeeping. // - Ensures that the Docker daemon is in a container. + // - Creates the system container where all non-containerized processes run. Start() error } diff --git a/pkg/kubelet/container_manager_linux.go b/pkg/kubelet/container_manager_linux.go index 330e4bf43f9..af1d53a5007 100644 --- a/pkg/kubelet/container_manager_linux.go +++ b/pkg/kubelet/container_manager_linux.go @@ -35,33 +35,60 @@ import ( ) type containerManagerImpl struct { - // Absolute name of the desired container that Docker should be in. - dockerContainerName string + // Whether to create and use the specified containers. + useDockerContainer bool + useSystemContainer bool - // The manager of the resource-only container Docker should be in. - manager fs.Manager + // OOM score for the Docker container. dockerOomScoreAdj int + + // Managers for containers. + dockerContainer fs.Manager + systemContainer fs.Manager + rootContainer fs.Manager } var _ containerManager = &containerManagerImpl{} -// Takes the absolute name that the Docker daemon should be in. -// Empty container name disables moving the Docker daemon. -func newContainerManager(dockerDaemonContainer string) (containerManager, error) { +// Takes the absolute name of the specified containers. +// Empty container name disables use of the specified container. +func newContainerManager(dockerDaemonContainer, systemContainer string) (containerManager, error) { + if systemContainer == "/" { + return nil, fmt.Errorf("system container cannot be root (\"/\")") + } + return &containerManagerImpl{ - dockerContainerName: dockerDaemonContainer, - manager: fs.Manager{ + useDockerContainer: dockerDaemonContainer != "", + useSystemContainer: systemContainer != "", + dockerOomScoreAdj: -900, + dockerContainer: fs.Manager{ Cgroups: &configs.Cgroup{ Name: dockerDaemonContainer, AllowAllDevices: true, }, }, - dockerOomScoreAdj: -900, + systemContainer: fs.Manager{ + Cgroups: &configs.Cgroup{ + Name: systemContainer, + AllowAllDevices: true, + }, + }, + rootContainer: fs.Manager{ + Cgroups: &configs.Cgroup{ + Name: "/", + }, + }, }, nil } func (cm *containerManagerImpl) Start() error { - if cm.dockerContainerName != "" { + if cm.useSystemContainer { + err := cm.ensureSystemContainer() + if err != nil { + return err + } + } + if cm.useDockerContainer { go util.Until(func() { err := cm.ensureDockerInContainer() if err != nil { @@ -99,10 +126,10 @@ func (cm *containerManagerImpl) ensureDockerInContainer() error { errs = append(errs, fmt.Errorf("failed to find container of PID %q: %v", pid, err)) } - if cont != cm.dockerContainerName { - err = cm.manager.Apply(pid) + if cont != cm.dockerContainer.Cgroups.Name { + err = cm.dockerContainer.Apply(pid) if err != nil { - errs = append(errs, fmt.Errorf("failed to move PID %q (in %q) to %q", pid, cont, cm.dockerContainerName)) + errs = append(errs, fmt.Errorf("failed to move PID %q (in %q) to %q", pid, cont, cm.dockerContainer.Cgroups.Name)) } } @@ -125,3 +152,60 @@ func getContainer(pid int) (string, error) { return cgroups.ParseCgroupFile("cpu", f) } + +// Ensures the system container is created and all non-kernel processes without +// a container are moved to it. +func (cm *containerManagerImpl) ensureSystemContainer() error { + // Move non-kernel PIDs to the system container. + attemptsRemaining := 10 + var errs []error + for attemptsRemaining >= 0 { + // Only keep errors on latest attempt. + errs = []error{} + attemptsRemaining-- + + allPids, err := cm.rootContainer.GetPids() + if err != nil { + errs = append(errs, fmt.Errorf("Failed to list PIDs for root: %v", err)) + continue + } + + // Remove kernel pids + pids := make([]int, 0, len(allPids)) + for _, pid := range allPids { + if isKernelPid(pid) { + continue + } + + pids = append(pids, pid) + } + glog.Infof("Found %d PIDs in root, %d of them are kernel related", len(allPids), len(allPids)-len(pids)) + + // Check if we moved all the non-kernel PIDs. + if len(pids) == 0 { + break + } + + glog.Infof("Moving non-kernel threads: %v", pids) + for _, pid := range pids { + err := cm.systemContainer.Apply(pid) + if err != nil { + errs = append(errs, fmt.Errorf("failed to move PID %d into the system container %q: %v", pid, cm.systemContainer.Cgroups.Name, err)) + continue + } + } + + } + if attemptsRemaining < 0 { + errs = append(errs, fmt.Errorf("ran out of attempts to create system containers %q", cm.systemContainer.Cgroups.Name)) + } + + return errors.NewAggregate(errs) +} + +// Determines whether the specified PID is a kernel PID. +func isKernelPid(pid int) bool { + // Kernel threads have no associated executable. + _, err := os.Readlink(fmt.Sprintf("/proc/%d/exe", pid)) + return err != nil +} diff --git a/pkg/kubelet/container_manager_unsupported.go b/pkg/kubelet/container_manager_unsupported.go index 6c543e1e617..77246f174a4 100644 --- a/pkg/kubelet/container_manager_unsupported.go +++ b/pkg/kubelet/container_manager_unsupported.go @@ -31,6 +31,6 @@ func (unsupportedContainerManager) Start() error { return fmt.Errorf("Container Manager is unsupported in this build") } -func newContainerManager(dockerDaemonContainer string) (containerManager, error) { +func newContainerManager(dockerDaemonContainer, systemContainer string) (containerManager, error) { return &unsupportedContainerManager{}, nil } diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index 5ce43aded11..d1bbf459493 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -290,7 +290,7 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string) } else { reason = inspectResult.State.Error } - result.status.State.Termination = &api.ContainerStateTerminated{ + result.status.State.Terminated = &api.ContainerStateTerminated{ ExitCode: inspectResult.State.ExitCode, Reason: reason, StartedAt: util.NewTime(inspectResult.State.StartedAt), @@ -304,7 +304,7 @@ func (dm *DockerManager) inspectContainer(dockerID, containerName, tPath string) if err != nil { glog.Errorf("Error on reading termination-log %s: %v", path, err) } else { - result.status.State.Termination.Message = string(data) + result.status.State.Terminated.Message = string(data) } } } @@ -329,8 +329,8 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { lastObservedTime := make(map[string]util.Time, len(pod.Spec.Containers)) for _, status := range pod.Status.ContainerStatuses { oldStatuses[status.Name] = status - if status.LastTerminationState.Termination != nil { - lastObservedTime[status.Name] = status.LastTerminationState.Termination.FinishedAt + if status.LastTerminationState.Terminated != nil { + lastObservedTime[status.Name] = status.LastTerminationState.Terminated.FinishedAt } } @@ -381,19 +381,19 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { result := dm.inspectContainer(value.ID, dockerContainerName, terminationMessagePath) if result.err != nil { return nil, result.err - } else if result.status.State.Termination != nil { + } else if result.status.State.Terminated != nil { terminationState = &result.status.State } if containerStatus, found := statuses[dockerContainerName]; found { - if containerStatus.LastTerminationState.Termination == nil && terminationState != nil { + if containerStatus.LastTerminationState.Terminated == nil && terminationState != nil { // Populate the last termination state. containerStatus.LastTerminationState = *terminationState } count := true // Only count dead containers terminated after last time we observed, if lastObservedTime, ok := lastObservedTime[dockerContainerName]; ok { - if terminationState != nil && terminationState.Termination.FinishedAt.After(lastObservedTime.Time) { + if terminationState != nil && terminationState.Terminated.FinishedAt.After(lastObservedTime.Time) { count = false } else { // The container finished before the last observation. No diff --git a/pkg/kubelet/dockertools/manager_test.go b/pkg/kubelet/dockertools/manager_test.go index 311982e84b2..38b89301915 100644 --- a/pkg/kubelet/dockertools/manager_test.go +++ b/pkg/kubelet/dockertools/manager_test.go @@ -859,8 +859,12 @@ func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, p t.Fatalf("unexpected error: %v", err) } runningPod := kubecontainer.Pods(runningPods).FindPodByID(pod.UID) + podStatus, err := dm.GetPodStatus(pod) + if err != nil { + t.Errorf("unexpected error: %v", err) + } fakeDocker.ClearCalls() - err = dm.SyncPod(pod, runningPod, api.PodStatus{}, []api.Secret{}) + err = dm.SyncPod(pod, runningPod, *podStatus, []api.Secret{}) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -1271,3 +1275,507 @@ func TestSyncPodsDoesNothing(t *testing.T) { "inspect_container", }) } + +func TestSyncPodWithPullPolicy(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + puller := dm.Puller.(*FakeDockerPuller) + puller.HasImages = []string{"existing_one", "want:latest"} + dm.PodInfraContainerImage = "custom_image_name" + fakeDocker.ContainerList = []docker.APIContainers{} + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: "bar", Image: "pull_always_image", ImagePullPolicy: api.PullAlways}, + {Name: "bar1", Image: "pull_never_image", ImagePullPolicy: api.PullNever}, + {Name: "bar2", Image: "pull_if_not_present_image", ImagePullPolicy: api.PullIfNotPresent}, + {Name: "bar3", Image: "existing_one", ImagePullPolicy: api.PullIfNotPresent}, + {Name: "bar4", Image: "want:latest", ImagePullPolicy: api.PullIfNotPresent}, + }, + }, + } + + runSyncPod(t, dm, fakeDocker, pod) + + fakeDocker.Lock() + + pulledImageSet := make(map[string]empty) + for v := range puller.ImagesPulled { + pulledImageSet[puller.ImagesPulled[v]] = empty{} + } + + if !reflect.DeepEqual(pulledImageSet, map[string]empty{ + "custom_image_name": {}, + "pull_always_image": {}, + "pull_if_not_present_image": {}, + }) { + t.Errorf("Unexpected pulled containers: %v", puller.ImagesPulled) + } + + if len(fakeDocker.Created) != 6 { + t.Errorf("Unexpected containers created %v", fakeDocker.Created) + } + fakeDocker.Unlock() +} + +func TestSyncPodWithRestartPolicy(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + containers := []api.Container{ + {Name: "succeeded"}, + {Name: "failed"}, + } + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: containers, + }, + } + + runningAPIContainers := []docker.APIContainers{ + { + // pod infra container + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"}, + ID: "9876", + }, + } + exitedAPIContainers := []docker.APIContainers{ + { + // format is // k8s___ + Names: []string{"/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0"}, + ID: "1234", + }, + { + // format is // k8s___ + Names: []string{"/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0"}, + ID: "5678", + }, + } + + containerMap := map[string]*docker.Container{ + "9876": { + ID: "9876", + Name: "POD", + Config: &docker.Config{}, + State: docker.State{ + StartedAt: time.Now(), + Running: true, + }, + }, + "1234": { + ID: "1234", + Name: "succeeded", + Config: &docker.Config{}, + State: docker.State{ + ExitCode: 0, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + }, + "5678": { + ID: "5678", + Name: "failed", + Config: &docker.Config{}, + State: docker.State{ + ExitCode: 42, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + }, + } + + tests := []struct { + policy api.RestartPolicy + calls []string + created []string + stopped []string + }{ + { + api.RestartPolicyAlways, + []string{ + // Check the pod infra container. + "inspect_container", + // Restart both containers. + "create", "start", "create", "start", + }, + []string{"succeeded", "failed"}, + []string{}, + }, + { + api.RestartPolicyOnFailure, + []string{ + // Check the pod infra container. + "inspect_container", + // Restart the failed container. + "create", "start", + }, + []string{"failed"}, + []string{}, + }, + { + api.RestartPolicyNever, + []string{ + // Check the pod infra container. + "inspect_container", + // Stop the last pod infra container. + "inspect_container", "stop", + }, + []string{}, + []string{"9876"}, + }, + } + + for i, tt := range tests { + fakeDocker.ContainerList = runningAPIContainers + fakeDocker.ExitedContainerList = exitedAPIContainers + fakeDocker.ContainerMap = containerMap + pod.Spec.RestartPolicy = tt.policy + + runSyncPod(t, dm, fakeDocker, pod) + + // 'stop' is because the pod infra container is killed when no container is running. + verifyCalls(t, fakeDocker, tt.calls) + + if err := fakeDocker.AssertCreated(tt.created); err != nil { + t.Errorf("%d: %v", i, err) + } + if err := fakeDocker.AssertStopped(tt.stopped); err != nil { + t.Errorf("%d: %v", i, err) + } + } +} + +func TestGetPodStatusWithLastTermination(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + containers := []api.Container{ + {Name: "succeeded"}, + {Name: "failed"}, + } + + exitedAPIContainers := []docker.APIContainers{ + { + // format is // k8s___ + Names: []string{"/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0"}, + ID: "1234", + }, + { + // format is // k8s___ + Names: []string{"/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0"}, + ID: "5678", + }, + } + + containerMap := map[string]*docker.Container{ + "9876": { + ID: "9876", + Name: "POD", + Config: &docker.Config{}, + HostConfig: &docker.HostConfig{}, + State: docker.State{ + StartedAt: time.Now(), + FinishedAt: time.Now(), + Running: true, + }, + }, + "1234": { + ID: "1234", + Name: "succeeded", + Config: &docker.Config{}, + HostConfig: &docker.HostConfig{}, + State: docker.State{ + ExitCode: 0, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + }, + "5678": { + ID: "5678", + Name: "failed", + Config: &docker.Config{}, + HostConfig: &docker.HostConfig{}, + State: docker.State{ + ExitCode: 42, + StartedAt: time.Now(), + FinishedAt: time.Now(), + }, + }, + } + + tests := []struct { + policy api.RestartPolicy + created []string + stopped []string + lastTerminations []string + }{ + { + api.RestartPolicyAlways, + []string{"succeeded", "failed"}, + []string{}, + []string{"docker://1234", "docker://5678"}, + }, + { + api.RestartPolicyOnFailure, + []string{"failed"}, + []string{}, + []string{"docker://5678"}, + }, + { + api.RestartPolicyNever, + []string{}, + []string{"9876"}, + []string{}, + }, + } + + for i, tt := range tests { + fakeDocker.ExitedContainerList = exitedAPIContainers + fakeDocker.ContainerMap = containerMap + fakeDocker.ClearCalls() + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: containers, + RestartPolicy: tt.policy, + }, + } + fakeDocker.ContainerList = []docker.APIContainers{ + { + // pod infra container + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"}, + ID: "9876", + }, + } + + runSyncPod(t, dm, fakeDocker, pod) + + // Check if we can retrieve the pod status. + status, err := dm.GetPodStatus(pod) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + terminatedContainers := []string{} + for _, cs := range status.ContainerStatuses { + if cs.LastTerminationState.Terminated != nil { + terminatedContainers = append(terminatedContainers, cs.LastTerminationState.Terminated.ContainerID) + } + } + sort.StringSlice(terminatedContainers).Sort() + sort.StringSlice(tt.lastTerminations).Sort() + if !reflect.DeepEqual(terminatedContainers, tt.lastTerminations) { + t.Errorf("Expected(sorted): %#v, Actual(sorted): %#v", tt.lastTerminations, terminatedContainers) + } + + if err := fakeDocker.AssertCreated(tt.created); err != nil { + t.Errorf("%d: %v", i, err) + } + if err := fakeDocker.AssertStopped(tt.stopped); err != nil { + t.Errorf("%d: %v", i, err) + } + } +} + +func TestGetPodCreationFailureReason(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + + // Inject the creation failure error to docker. + failureReason := "creation failure" + fakeDocker.Errors = map[string]error{ + "create": fmt.Errorf("%s", failureReason), + } + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "bar"}}, + }, + } + + // Pretend that the pod infra container has already been created, so that + // we can run the user containers. + fakeDocker.ContainerList = []docker.APIContainers{ + { + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"}, + ID: "9876", + }, + } + fakeDocker.ContainerMap = map[string]*docker.Container{ + "9876": { + ID: "9876", + HostConfig: &docker.HostConfig{}, + Config: &docker.Config{}, + }, + } + + runSyncPod(t, dm, fakeDocker, pod) + // Check if we can retrieve the pod status. + status, err := dm.GetPodStatus(pod) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if len(status.ContainerStatuses) < 1 { + t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses)) + } else { + state := status.ContainerStatuses[0].State + if state.Waiting == nil { + t.Errorf("expected waiting state, got %#v", state) + } else if state.Waiting.Reason != failureReason { + t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason) + } + } +} + +func TestGetPodPullImageFailureReason(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + // Initialize the FakeDockerPuller so that it'd try to pull non-existent + // images. + puller := dm.Puller.(*FakeDockerPuller) + puller.HasImages = []string{} + // Inject the pull image failure error. + failureReason := "pull image faiulre" + puller.ErrorsToInject = []error{fmt.Errorf("%s", failureReason)} + + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: []api.Container{{Name: "bar", Image: "realImage", ImagePullPolicy: api.PullAlways}}, + }, + } + + // Pretend that the pod infra container has already been created, so that + // we can run the user containers. + fakeDocker.ContainerList = []docker.APIContainers{ + { + Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"}, + ID: "9876", + }, + } + fakeDocker.ContainerMap = map[string]*docker.Container{ + "9876": { + ID: "9876", + HostConfig: &docker.HostConfig{}, + Config: &docker.Config{}, + }, + } + + runSyncPod(t, dm, fakeDocker, pod) + // Check if we can retrieve the pod status. + status, err := dm.GetPodStatus(pod) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if len(status.ContainerStatuses) < 1 { + t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses)) + } else { + state := status.ContainerStatuses[0].State + if state.Waiting == nil { + t.Errorf("expected waiting state, got %#v", state) + } else if state.Waiting.Reason != failureReason { + t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason) + } + } +} + +func TestGetRestartCount(t *testing.T) { + dm, fakeDocker := newTestDockerManager() + containers := []api.Container{ + {Name: "bar"}, + } + pod := api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + }, + Spec: api.PodSpec{ + Containers: containers, + }, + } + + // format is // k8s___ + names := []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0"} + currTime := time.Now() + containerMap := map[string]*docker.Container{ + "1234": { + ID: "1234", + Name: "bar", + Config: &docker.Config{}, + State: docker.State{ + ExitCode: 42, + StartedAt: currTime.Add(-60 * time.Second), + FinishedAt: currTime.Add(-60 * time.Second), + }, + }, + "5678": { + ID: "5678", + Name: "bar", + Config: &docker.Config{}, + State: docker.State{ + ExitCode: 42, + StartedAt: currTime.Add(-30 * time.Second), + FinishedAt: currTime.Add(-30 * time.Second), + }, + }, + "9101": { + ID: "9101", + Name: "bar", + Config: &docker.Config{}, + State: docker.State{ + ExitCode: 42, + StartedAt: currTime.Add(30 * time.Minute), + FinishedAt: currTime.Add(30 * time.Minute), + }, + }, + } + fakeDocker.ContainerMap = containerMap + + // Helper function for verifying the restart count. + verifyRestartCount := func(pod *api.Pod, expectedCount int) api.PodStatus { + status, err := dm.GetPodStatus(pod) + if err != nil { + t.Fatalf("unexpected error %v", err) + } + restartCount := status.ContainerStatuses[0].RestartCount + if restartCount != expectedCount { + t.Errorf("expected %d restart count, got %d", expectedCount, restartCount) + } + return *status + } + + // Container "bar" has failed twice; create two dead docker containers. + // TODO: container lists are expected to be sorted reversely by time. + // We should fix FakeDockerClient to sort the list before returning. + fakeDocker.ExitedContainerList = []docker.APIContainers{{Names: names, ID: "5678"}, {Names: names, ID: "1234"}} + pod.Status = verifyRestartCount(&pod, 1) + + // Found a new dead container. The restart count should be incremented. + fakeDocker.ExitedContainerList = []docker.APIContainers{ + {Names: names, ID: "9101"}, {Names: names, ID: "5678"}, {Names: names, ID: "1234"}} + pod.Status = verifyRestartCount(&pod, 2) + + // All dead containers have been GC'd. The restart count should persist + // (i.e., remain the same). + fakeDocker.ExitedContainerList = []docker.APIContainers{} + verifyRestartCount(&pod, 2) +} diff --git a/pkg/kubelet/envvars/envvars.go b/pkg/kubelet/envvars/envvars.go index 6bb05484469..ac918aac213 100644 --- a/pkg/kubelet/envvars/envvars.go +++ b/pkg/kubelet/envvars/envvars.go @@ -32,16 +32,16 @@ func FromServices(services *api.ServiceList) []api.EnvVar { for i := range services.Items { service := &services.Items[i] - // ignore services where PortalIP is "None" or empty + // ignore services where ClusterIP is "None" or empty // the services passed to this method should be pre-filtered - // only services that have the portal IP set should be included here + // only services that have the cluster IP set should be included here if !api.IsServiceIPSet(service) { continue } // Host name := makeEnvVariableName(service.Name) + "_SERVICE_HOST" - result = append(result, api.EnvVar{Name: name, Value: service.Spec.PortalIP}) + result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP}) // First port - give it the backwards-compatible name name = makeEnvVariableName(service.Name) + "_SERVICE_PORT" result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(service.Spec.Ports[0].Port)}) @@ -81,14 +81,14 @@ func makeLinkVariables(service *api.Service) []api.EnvVar { // Docker special-cases the first port. all = append(all, api.EnvVar{ Name: prefix + "_PORT", - Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.PortalIP, sp.Port), + Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port), }) } portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol)) all = append(all, []api.EnvVar{ { Name: portPrefix, - Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.PortalIP, sp.Port), + Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port), }, { Name: portPrefix + "_PROTO", @@ -100,7 +100,7 @@ func makeLinkVariables(service *api.Service) []api.EnvVar { }, { Name: portPrefix + "_ADDR", - Value: service.Spec.PortalIP, + Value: service.Spec.ClusterIP, }, }...) } diff --git a/pkg/kubelet/envvars/envvars_test.go b/pkg/kubelet/envvars/envvars_test.go index c7be883d915..0c9eba3045d 100644 --- a/pkg/kubelet/envvars/envvars_test.go +++ b/pkg/kubelet/envvars/envvars_test.go @@ -30,8 +30,8 @@ func TestFromServices(t *testing.T) { { ObjectMeta: api.ObjectMeta{Name: "foo-bar"}, Spec: api.ServiceSpec{ - Selector: map[string]string{"bar": "baz"}, - PortalIP: "1.2.3.4", + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "1.2.3.4", Ports: []api.ServicePort{ {Port: 8080, Protocol: "TCP"}, }, @@ -40,8 +40,8 @@ func TestFromServices(t *testing.T) { { ObjectMeta: api.ObjectMeta{Name: "abc-123"}, Spec: api.ServiceSpec{ - Selector: map[string]string{"bar": "baz"}, - PortalIP: "5.6.7.8", + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "5.6.7.8", Ports: []api.ServicePort{ {Name: "u-d-p", Port: 8081, Protocol: "UDP"}, {Name: "t-c-p", Port: 8081, Protocol: "TCP"}, @@ -51,8 +51,8 @@ func TestFromServices(t *testing.T) { { ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"}, Spec: api.ServiceSpec{ - Selector: map[string]string{"bar": "baz"}, - PortalIP: "9.8.7.6", + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "9.8.7.6", Ports: []api.ServicePort{ {Port: 8082, Protocol: "TCP"}, {Name: "8083", Port: 8083, Protocol: "TCP"}, @@ -60,20 +60,20 @@ func TestFromServices(t *testing.T) { }, }, { - ObjectMeta: api.ObjectMeta{Name: "svrc-portalip-none"}, + ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-none"}, Spec: api.ServiceSpec{ - Selector: map[string]string{"bar": "baz"}, - PortalIP: "None", + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "None", Ports: []api.ServicePort{ {Port: 8082, Protocol: "TCP"}, }, }, }, { - ObjectMeta: api.ObjectMeta{Name: "svrc-portalip-empty"}, + ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-empty"}, Spec: api.ServiceSpec{ - Selector: map[string]string{"bar": "baz"}, - PortalIP: "", + Selector: map[string]string{"bar": "baz"}, + ClusterIP: "", Ports: []api.ServicePort{ {Port: 8082, Protocol: "TCP"}, }, diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index db752463a5e..2b218c190da 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -139,6 +139,7 @@ func NewMainKubelet( containerRuntime string, mounter mount.Interface, dockerDaemonContainer string, + systemContainer string, configureCBR0 bool, pods int) (*Kubelet, error) { if rootDirectory == "" { @@ -147,6 +148,9 @@ func NewMainKubelet( if resyncInterval <= 0 { return nil, fmt.Errorf("invalid sync frequency %d", resyncInterval) } + if systemContainer != "" && cgroupRoot == "" { + return nil, fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified") + } dockerClient = dockertools.NewInstrumentedDockerInterface(dockerClient) serviceStore := cache.NewStore(cache.MetaNamespaceKeyFunc) @@ -295,7 +299,9 @@ func NewMainKubelet( return nil, fmt.Errorf("unsupported container runtime %q specified", containerRuntime) } - containerManager, err := newContainerManager(dockerDaemonContainer) + // Setup container manager, can fail if the devices hierarchy is not mounted + // (it is required by Docker however). + containerManager, err := newContainerManager(dockerDaemonContainer, systemContainer) if err != nil { return nil, fmt.Errorf("failed to create the Container Manager: %v", err) } @@ -700,11 +706,19 @@ func (kl *Kubelet) initialNodeStatus() (*api.Node, error) { } // TODO(roberthbailey): Can we do this without having credentials to talk // to the cloud provider? - instanceID, err := instances.ExternalID(kl.hostname) + // TODO: ExternalID is deprecated, we'll have to drop this code + externalID, err := instances.ExternalID(kl.hostname) if err != nil { - return nil, fmt.Errorf("failed to get instance ID from cloud provider: %v", err) + return nil, fmt.Errorf("failed to get external ID from cloud provider: %v", err) + } + node.Spec.ExternalID = externalID + // TODO: We can't assume that the node has credentials to talk to the + // cloudprovider from arbitrary nodes. At most, we should talk to a + // local metadata server here. + node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(kl.cloud, kl.hostname) + if err != nil { + return nil, err } - node.Spec.ExternalID = instanceID } else { node.Spec.ExternalID = kl.hostname } @@ -877,7 +891,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { // project the services in namespace ns onto the master services for _, service := range services.Items { - // ignore services where PortalIP is "None" or empty + // ignore services where ClusterIP is "None" or empty if !api.IsServiceIPSet(&service) { continue } @@ -1654,10 +1668,10 @@ func (kl *Kubelet) validateContainerStatus(podStatus *api.PodStatus, containerNa return "", fmt.Errorf("container %q not found in pod", containerName) } if previous { - if cStatus.LastTerminationState.Termination == nil { + if cStatus.LastTerminationState.Terminated == nil { return "", fmt.Errorf("previous terminated container %q not found in pod", containerName) } - cID = cStatus.LastTerminationState.Termination.ContainerID + cID = cStatus.LastTerminationState.Terminated.ContainerID } else { if cStatus.State.Waiting != nil { return "", fmt.Errorf("container %q is in waiting state.", containerName) @@ -1839,6 +1853,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { node.Status.Capacity = api.ResourceList{ api.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI), api.ResourceMemory: resource.MustParse("0Gi"), + api.ResourcePods: *resource.NewQuantity(int64(kl.pods), resource.DecimalSI), } glog.Errorf("Error getting machine info: %v", err) } else { @@ -1968,9 +1983,9 @@ func getPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase { if containerStatus, ok := api.GetContainerStatus(info, container.Name); ok { if containerStatus.State.Running != nil { running++ - } else if containerStatus.State.Termination != nil { + } else if containerStatus.State.Terminated != nil { stopped++ - if containerStatus.State.Termination.ExitCode == 0 { + if containerStatus.State.Terminated.ExitCode == 0 { succeeded++ } else { failed++ diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 1e08cc539dc..3b8bd06495e 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -651,49 +651,6 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) { fakeRuntime.AssertKilledPods([]string{"12345678"}) } -func TestSyncPodsDeletes(t *testing.T) { - testKubelet := newTestKubelet(t) - testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - fakeDocker.ContainerList = []docker.APIContainers{ - { - // the k8s prefix is required for the kubelet to manage the container - Names: []string{"/k8s_foo_bar_new_12345678_42"}, - ID: "1234", - }, - { - // pod infra container - Names: []string{"/k8s_POD_foo_new_12345678_42"}, - ID: "9876", - }, - { - Names: []string{"foo"}, - ID: "4567", - }, - } - err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - verifyCalls(t, fakeDocker, []string{"list", "inspect_container", "stop", "inspect_container", "stop", "list"}) - - // A map iteration is used to delete containers, so must not depend on - // order here. - expectedToStop := map[string]bool{ - "1234": true, - "9876": true, - } - if len(fakeDocker.Stopped) != 2 || - !expectedToStop[fakeDocker.Stopped[0]] || - !expectedToStop[fakeDocker.Stopped[1]] { - t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped) - } -} - func TestMountExternalVolumes(t *testing.T) { testKubelet := newTestKubelet(t) kubelet := testKubelet.kubelet @@ -851,22 +808,15 @@ func TestGetContainerInfo(t *testing.T) { }, } - testKubelet := newTestKubelet(t) + testKubelet := newTestKubeletWithFakeRuntime(t) + fakeRuntime := testKubelet.fakeRuntime kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - mockCadvisor := testKubelet.fakeCadvisor cadvisorReq := &cadvisorApi.ContainerInfoRequest{} + mockCadvisor := testKubelet.fakeCadvisor mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil) - - fakeDocker.ContainerList = []docker.APIContainers{ - { - ID: containerID, - // pod id: qux - // container id: foo - Names: []string{"/k8s_foo_qux_ns_1234_42"}, - }, + fakeRuntime.PodList = []*kubecontainer.Pod{ + {ID: "12345678", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{{Name: "foo", ID: types.UID(containerID)}}}, } - stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", cadvisorReq) if err != nil { t.Errorf("unexpected error: %v", err) @@ -884,17 +834,12 @@ func TestGetRawContainerInfoRoot(t *testing.T) { Name: containerPath, }, } - fakeDocker := dockertools.FakeDockerClient{} - - mockCadvisor := &cadvisor.Mock{} + testKubelet := newTestKubeletWithFakeRuntime(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor cadvisorReq := &cadvisorApi.ContainerInfoRequest{} mockCadvisor.On("ContainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) - kubelet := Kubelet{ - dockerClient: &fakeDocker, - cadvisor: mockCadvisor, - } - _, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, false) if err != nil { t.Errorf("unexpected error: %v", err) @@ -916,17 +861,12 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) { }, }, } - fakeDocker := dockertools.FakeDockerClient{} - - mockCadvisor := &cadvisor.Mock{} + testKubelet := newTestKubeletWithFakeRuntime(t) + kubelet := testKubelet.kubelet + mockCadvisor := testKubelet.fakeCadvisor cadvisorReq := &cadvisorApi.ContainerInfoRequest{} mockCadvisor.On("SubcontainerInfo", containerPath, cadvisorReq).Return(containerInfo, nil) - kubelet := Kubelet{ - dockerClient: &fakeDocker, - cadvisor: mockCadvisor, - } - result, err := kubelet.GetRawContainerInfo(containerPath, cadvisorReq, true) if err != nil { t.Errorf("unexpected error: %v", err) @@ -939,24 +879,17 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) { func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) { containerID := "ab2cdf" - - testKubelet := newTestKubelet(t) + testKubelet := newTestKubeletWithFakeRuntime(t) kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker mockCadvisor := testKubelet.fakeCadvisor + fakeRuntime := testKubelet.fakeRuntime cadvisorApiFailure := fmt.Errorf("cAdvisor failure") containerInfo := cadvisorApi.ContainerInfo{} cadvisorReq := &cadvisorApi.ContainerInfoRequest{} mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, cadvisorApiFailure) - fakeDocker.ContainerList = []docker.APIContainers{ - { - ID: containerID, - // pod id: qux - // container id: foo - Names: []string{"/k8s_foo_qux_ns_uuid_1234"}, - }, + fakeRuntime.PodList = []*kubecontainer.Pod{ + {ID: "uuid", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{{Name: "foo", ID: types.UID(containerID)}}}, } - stats, err := kubelet.GetContainerInfo("qux_ns", "uuid", "foo", cadvisorReq) if stats != nil { t.Errorf("non-nil stats on error") @@ -972,11 +905,11 @@ func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) { } func TestGetContainerInfoOnNonExistContainer(t *testing.T) { - testKubelet := newTestKubelet(t) + testKubelet := newTestKubeletWithFakeRuntime(t) kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker mockCadvisor := testKubelet.fakeCadvisor - fakeDocker.ContainerList = []docker.APIContainers{} + fakeRuntime := testKubelet.fakeRuntime + fakeRuntime.PodList = []*kubecontainer.Pod{} stats, _ := kubelet.GetContainerInfo("qux", "", "foo", nil) if stats != nil { @@ -985,13 +918,13 @@ func TestGetContainerInfoOnNonExistContainer(t *testing.T) { mockCadvisor.AssertExpectations(t) } -func TestGetContainerInfoWhenDockerToolsFailed(t *testing.T) { - testKubelet := newTestKubelet(t) +func TestGetContainerInfoWhenContainerRuntimeFailed(t *testing.T) { + testKubelet := newTestKubeletWithFakeRuntime(t) kubelet := testKubelet.kubelet mockCadvisor := testKubelet.fakeCadvisor - fakeDocker := testKubelet.fakeDocker + fakeRuntime := testKubelet.fakeRuntime expectedErr := fmt.Errorf("List containers error") - fakeDocker.Errors["list"] = expectedErr + fakeRuntime.Err = expectedErr stats, err := kubelet.GetContainerInfo("qux", "", "foo", nil) if err == nil { @@ -1007,7 +940,7 @@ func TestGetContainerInfoWhenDockerToolsFailed(t *testing.T) { } func TestGetContainerInfoWithNoContainers(t *testing.T) { - testKubelet := newTestKubelet(t) + testKubelet := newTestKubeletWithFakeRuntime(t) kubelet := testKubelet.kubelet mockCadvisor := testKubelet.fakeCadvisor @@ -1025,15 +958,12 @@ func TestGetContainerInfoWithNoContainers(t *testing.T) { } func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) { - testKubelet := newTestKubelet(t) + testKubelet := newTestKubeletWithFakeRuntime(t) + fakeRuntime := testKubelet.fakeRuntime kubelet := testKubelet.kubelet mockCadvisor := testKubelet.fakeCadvisor - fakeDocker := testKubelet.fakeDocker - fakeDocker.ContainerList = []docker.APIContainers{ - { - ID: "fakeId", - Names: []string{"/k8s_bar_qux_ns_1234_42"}, - }, + fakeRuntime.PodList = []*kubecontainer.Pod{ + {ID: "12345678", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{{Name: "bar", ID: types.UID("fakeID")}}}, } stats, err := kubelet.GetContainerInfo("qux_ns", "", "foo", nil) @@ -1243,66 +1173,6 @@ func TestSyncPodEventHandlerFails(t *testing.T) { } } -func TestSyncPodsWithPullPolicy(t *testing.T) { - testKubelet := newTestKubelet(t) - testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - // TODO: Move this test to dockertools so that we don't have to do the hacky - // type assertion here. - dm := kubelet.containerRuntime.(*dockertools.DockerManager) - puller := dm.Puller.(*dockertools.FakeDockerPuller) - puller.HasImages = []string{"existing_one", "want:latest"} - dm.PodInfraContainerImage = "custom_image_name" - fakeDocker.ContainerList = []docker.APIContainers{} - - pods := []*api.Pod{ - { - ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: "foo", - Namespace: "new", - }, - Spec: api.PodSpec{ - Containers: []api.Container{ - {Name: "bar", Image: "pull_always_image", ImagePullPolicy: api.PullAlways}, - {Name: "bar1", Image: "pull_never_image", ImagePullPolicy: api.PullNever}, - {Name: "bar2", Image: "pull_if_not_present_image", ImagePullPolicy: api.PullIfNotPresent}, - {Name: "bar3", Image: "existing_one", ImagePullPolicy: api.PullIfNotPresent}, - {Name: "bar4", Image: "want:latest", ImagePullPolicy: api.PullIfNotPresent}, - }, - }, - }, - } - kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - fakeDocker.Lock() - - pulledImageSet := make(map[string]empty) - for v := range puller.ImagesPulled { - pulledImageSet[puller.ImagesPulled[v]] = empty{} - } - - if !reflect.DeepEqual(pulledImageSet, map[string]empty{ - "custom_image_name": {}, - "pull_always_image": {}, - "pull_if_not_present_image": {}, - }) { - t.Errorf("Unexpected pulled containers: %v", puller.ImagesPulled) - } - - if len(fakeDocker.Created) != 6 { - t.Errorf("Unexpected containers created %v", fakeDocker.Created) - } - fakeDocker.Unlock() -} - func TestParseResolvConf(t *testing.T) { testCases := []struct { data string @@ -1390,7 +1260,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8081, }}, - PortalIP: "1.2.3.1", + ClusterIP: "1.2.3.1", }, }, { @@ -1400,7 +1270,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8083, }}, - PortalIP: "1.2.3.3", + ClusterIP: "1.2.3.3", }, }, { @@ -1410,7 +1280,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8084, }}, - PortalIP: "1.2.3.4", + ClusterIP: "1.2.3.4", }, }, { @@ -1420,7 +1290,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8085, }}, - PortalIP: "1.2.3.5", + ClusterIP: "1.2.3.5", }, }, { @@ -1430,7 +1300,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8085, }}, - PortalIP: "None", + ClusterIP: "None", }, }, { @@ -1449,7 +1319,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8086, }}, - PortalIP: "1.2.3.6", + ClusterIP: "1.2.3.6", }, }, { @@ -1459,7 +1329,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8088, }}, - PortalIP: "1.2.3.8", + ClusterIP: "1.2.3.8", }, }, { @@ -1469,7 +1339,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8088, }}, - PortalIP: "None", + ClusterIP: "None", }, }, { @@ -1479,7 +1349,7 @@ func TestMakeEnvironmentVariables(t *testing.T) { Protocol: "TCP", Port: 8088, }}, - PortalIP: "", + ClusterIP: "", }, }, } @@ -1842,7 +1712,7 @@ func stoppedState(cName string) api.ContainerStatus { return api.ContainerStatus{ Name: cName, State: api.ContainerState{ - Termination: &api.ContainerStateTerminated{}, + Terminated: &api.ContainerStateTerminated{}, }, } } @@ -1850,7 +1720,7 @@ func succeededState(cName string) api.ContainerStatus { return api.ContainerStatus{ Name: cName, State: api.ContainerState{ - Termination: &api.ContainerStateTerminated{ + Terminated: &api.ContainerStateTerminated{ ExitCode: 0, }, }, @@ -1860,7 +1730,7 @@ func failedState(cName string) api.ContainerStatus { return api.ContainerStatus{ Name: cName, State: api.ContainerState{ - Termination: &api.ContainerStateTerminated{ + Terminated: &api.ContainerStateTerminated{ ExitCode: -1, }, }, @@ -1869,7 +1739,7 @@ func failedState(cName string) api.ContainerStatus { func TestPodPhaseWithRestartAlways(t *testing.T) { desiredState := api.PodSpec{ - Host: "machine", + NodeName: "machine", Containers: []api.Container{ {Name: "containerA"}, {Name: "containerB"}, @@ -1944,7 +1814,7 @@ func TestPodPhaseWithRestartAlways(t *testing.T) { func TestPodPhaseWithRestartNever(t *testing.T) { desiredState := api.PodSpec{ - Host: "machine", + NodeName: "machine", Containers: []api.Container{ {Name: "containerA"}, {Name: "containerB"}, @@ -2032,7 +1902,7 @@ func TestPodPhaseWithRestartNever(t *testing.T) { func TestPodPhaseWithRestartOnFailure(t *testing.T) { desiredState := api.PodSpec{ - Host: "machine", + NodeName: "machine", Containers: []api.Container{ {Name: "containerA"}, {Name: "containerB"}, @@ -2707,7 +2577,7 @@ func TestValidateContainerStatus(t *testing.T) { Running: &api.ContainerStateRunning{}, }, LastTerminationState: api.ContainerState{ - Termination: &api.ContainerStateTerminated{}, + Terminated: &api.ContainerStateTerminated{}, }, }, }, @@ -2718,7 +2588,7 @@ func TestValidateContainerStatus(t *testing.T) { { Name: containerName, State: api.ContainerState{ - Termination: &api.ContainerStateTerminated{}, + Terminated: &api.ContainerStateTerminated{}, }, }, }, @@ -3221,18 +3091,16 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) { }, } - testKubelet := newTestKubelet(t) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker + testKubelet := newTestKubeletWithFakeRuntime(t) + fakeRuntime := testKubelet.fakeRuntime mockCadvisor := testKubelet.fakeCadvisor cadvisorReq := &cadvisorApi.ContainerInfoRequest{} mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil) + kubelet := testKubelet.kubelet - fakeDocker.ContainerList = []docker.APIContainers{ - { - ID: containerID, - Names: []string{"/k8s_foo_qux_ns_1234_42"}, - }, + fakeRuntime.PodList = []*kubecontainer.Pod{ + {ID: "1234", Name: "qux", Namespace: "ns", Containers: []*kubecontainer.Container{ + {Name: "foo", ID: types.UID(containerID)}}}, } kubelet.podManager.SetPods(pods) @@ -3395,531 +3263,6 @@ func TestPrivilegeContainerDisallowed(t *testing.T) { } } -func TestSyncPodsWithRestartPolicy(t *testing.T) { - testKubelet := newTestKubelet(t) - testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - - containers := []api.Container{ - {Name: "succeeded"}, - {Name: "failed"}, - } - pods := []*api.Pod{ - { - ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: "foo", - Namespace: "new", - }, - Spec: api.PodSpec{ - Containers: containers, - }, - }, - } - - runningAPIContainers := []docker.APIContainers{ - { - // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"}, - ID: "9876", - }, - } - exitedAPIContainers := []docker.APIContainers{ - { - // format is // k8s___ - Names: []string{"/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0"}, - ID: "1234", - }, - { - // format is // k8s___ - Names: []string{"/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0"}, - ID: "5678", - }, - } - - containerMap := map[string]*docker.Container{ - "9876": { - ID: "9876", - Name: "POD", - Config: &docker.Config{}, - State: docker.State{ - StartedAt: time.Now(), - Running: true, - }, - }, - "1234": { - ID: "1234", - Name: "succeeded", - Config: &docker.Config{}, - State: docker.State{ - ExitCode: 0, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - }, - "5678": { - ID: "5678", - Name: "failed", - Config: &docker.Config{}, - State: docker.State{ - ExitCode: 42, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - }, - } - - tests := []struct { - policy api.RestartPolicy - calls []string - created []string - stopped []string - }{ - { - api.RestartPolicyAlways, - []string{"list", "list", - // Get pod status. - "list", "inspect_container", "inspect_container", "inspect_container", - // Check the pod infra container. - "inspect_container", - // Restart both containers. - "create", "start", "create", "start", - // Get pod status. - "list", "inspect_container", "inspect_container", "inspect_container", "inspect_container", "inspect_container", - // Get pods for deleting orphaned volumes. - "list", - }, - []string{"succeeded", "failed"}, - []string{}, - }, - { - api.RestartPolicyOnFailure, - []string{"list", "list", - // Get pod status. - "list", "inspect_container", "inspect_container", "inspect_container", - // Check the pod infra container. - "inspect_container", - // Restart the failed container. - "create", "start", - // Get pod status. - "list", "inspect_container", "inspect_container", "inspect_container", "inspect_container", - // Get pods for deleting orphaned volumes. - "list", - }, - []string{"failed"}, - []string{}, - }, - { - api.RestartPolicyNever, - []string{"list", "list", - // Get pod status. - "list", "inspect_container", "inspect_container", "inspect_container", - // Check the pod infra container. - "inspect_container", - // Stop the last pod infra container. - "inspect_container", "stop", - // Get pod status. - "list", "inspect_container", "inspect_container", "inspect_container", - // Get pods for deleting orphaned volumes. - "list", - }, - []string{}, - []string{"9876"}, - }, - } - - for i, tt := range tests { - fakeDocker.ContainerList = runningAPIContainers - fakeDocker.ExitedContainerList = exitedAPIContainers - fakeDocker.ContainerMap = containerMap - fakeDocker.ClearCalls() - pods[0].Spec.RestartPolicy = tt.policy - - kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) - if err != nil { - t.Errorf("%d: unexpected error: %v", i, err) - } - - // 'stop' is because the pod infra container is killed when no container is running. - verifyCalls(t, fakeDocker, tt.calls) - - if err := fakeDocker.AssertCreated(tt.created); err != nil { - t.Errorf("%d: %v", i, err) - } - if err := fakeDocker.AssertStopped(tt.stopped); err != nil { - t.Errorf("%d: %v", i, err) - } - } -} - -func TestGetPodStatusWithLastTermination(t *testing.T) { - testKubelet := newTestKubelet(t) - testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - - containers := []api.Container{ - {Name: "succeeded"}, - {Name: "failed"}, - } - - exitedAPIContainers := []docker.APIContainers{ - { - // format is // k8s___ - Names: []string{"/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0"}, - ID: "1234", - }, - { - // format is // k8s___ - Names: []string{"/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0"}, - ID: "5678", - }, - } - - containerMap := map[string]*docker.Container{ - "9876": { - ID: "9876", - Name: "POD", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - State: docker.State{ - StartedAt: time.Now(), - FinishedAt: time.Now(), - Running: true, - }, - }, - "1234": { - ID: "1234", - Name: "succeeded", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - State: docker.State{ - ExitCode: 0, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - }, - "5678": { - ID: "5678", - Name: "failed", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - State: docker.State{ - ExitCode: 42, - StartedAt: time.Now(), - FinishedAt: time.Now(), - }, - }, - } - - tests := []struct { - policy api.RestartPolicy - created []string - stopped []string - lastTerminations []string - }{ - { - api.RestartPolicyAlways, - []string{"succeeded", "failed"}, - []string{}, - []string{"docker://1234", "docker://5678"}, - }, - { - api.RestartPolicyOnFailure, - []string{"failed"}, - []string{}, - []string{"docker://5678"}, - }, - { - api.RestartPolicyNever, - []string{}, - []string{"9876"}, - []string{}, - }, - } - - for i, tt := range tests { - fakeDocker.ExitedContainerList = exitedAPIContainers - fakeDocker.ContainerMap = containerMap - fakeDocker.ClearCalls() - pods := []*api.Pod{ - { - ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: "foo", - Namespace: "new", - }, - Spec: api.PodSpec{ - Containers: containers, - RestartPolicy: tt.policy, - }, - }, - } - fakeDocker.ContainerList = []docker.APIContainers{ - { - // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"}, - ID: "9876", - }, - } - kubelet.podManager.SetPods(pods) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) - if err != nil { - t.Errorf("%d: unexpected error: %v", i, err) - } - - // Check if we can retrieve the pod status. - podName := kubecontainer.GetPodFullName(pods[0]) - status, found := kubelet.statusManager.GetPodStatus(podName) - if !found { - t.Fatalf("unable to retrieve pod status for pod %q.", podName) - } else { - terminatedContainers := []string{} - for _, cs := range status.ContainerStatuses { - if cs.LastTerminationState.Termination != nil { - terminatedContainers = append(terminatedContainers, cs.LastTerminationState.Termination.ContainerID) - } - } - sort.StringSlice(terminatedContainers).Sort() - sort.StringSlice(tt.lastTerminations).Sort() - if !reflect.DeepEqual(terminatedContainers, tt.lastTerminations) { - t.Errorf("Expected(sorted): %#v, Actual(sorted): %#v", tt.lastTerminations, terminatedContainers) - } - } - - if err := fakeDocker.AssertCreated(tt.created); err != nil { - t.Errorf("%d: %v", i, err) - } - if err := fakeDocker.AssertStopped(tt.stopped); err != nil { - t.Errorf("%d: %v", i, err) - } - } -} - -func TestGetPodCreationFailureReason(t *testing.T) { - testKubelet := newTestKubelet(t) - testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - - // Inject the creation failure error to docker. - failureReason := "creation failure" - fakeDocker.Errors = map[string]error{ - "create": fmt.Errorf("%s", failureReason), - } - - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: "foo", - Namespace: "new", - }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "bar"}}, - }, - } - - // Pretend that the pod infra container has already been created, so that - // we can run the user containers. - fakeDocker.ContainerList = []docker.APIContainers{ - { - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"}, - ID: "9876", - }, - } - fakeDocker.ContainerMap = map[string]*docker.Container{ - "9876": { - ID: "9876", - HostConfig: &docker.HostConfig{}, - Config: &docker.Config{}, - }, - } - - pods := []*api.Pod{pod} - kubelet.podManager.SetPods(pods) - kubelet.volumeManager.SetVolumes(pod.UID, kubecontainer.VolumeMap{}) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - status, found := kubelet.statusManager.GetPodStatus(kubecontainer.GetPodFullName(pod)) - if !found { - t.Fatalf("unexpected error %v", err) - } - if len(status.ContainerStatuses) < 1 { - t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses)) - } else { - state := status.ContainerStatuses[0].State - if state.Waiting == nil { - t.Errorf("expected waiting state, got %#v", state) - } else if state.Waiting.Reason != failureReason { - t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason) - } - } -} - -func TestGetPodPullImageFailureReason(t *testing.T) { - testKubelet := newTestKubelet(t) - testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - - // Initialize the FakeDockerPuller so that it'd try to pull non-existent - // images. - dm := kubelet.containerRuntime.(*dockertools.DockerManager) - puller := dm.Puller.(*dockertools.FakeDockerPuller) - puller.HasImages = []string{} - // Inject the pull image failure error. - failureReason := "pull image faiulre" - puller.ErrorsToInject = []error{fmt.Errorf("%s", failureReason)} - - pod := &api.Pod{ - ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: "foo", - Namespace: "new", - }, - Spec: api.PodSpec{ - Containers: []api.Container{{Name: "bar", Image: "realImage", ImagePullPolicy: api.PullAlways}}, - }, - } - - // Pretend that the pod infra container has already been created, so that - // we can run the user containers. - fakeDocker.ContainerList = []docker.APIContainers{ - { - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"}, - ID: "9876", - }, - } - fakeDocker.ContainerMap = map[string]*docker.Container{ - "9876": { - ID: "9876", - HostConfig: &docker.HostConfig{}, - Config: &docker.Config{}, - }, - } - - pods := []*api.Pod{pod} - kubelet.podManager.SetPods(pods) - kubelet.volumeManager.SetVolumes(pod.UID, kubecontainer.VolumeMap{}) - err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now()) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - status, found := kubelet.statusManager.GetPodStatus(kubecontainer.GetPodFullName(pod)) - if !found { - t.Errorf("expected status of pod %q to be found", kubecontainer.GetPodFullName(pod)) - } - if len(status.ContainerStatuses) < 1 { - t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses)) - } else { - state := status.ContainerStatuses[0].State - if state.Waiting == nil { - t.Errorf("expected waiting state, got %#v", state) - } else if state.Waiting.Reason != failureReason { - t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason) - } - } -} - -func TestGetRestartCount(t *testing.T) { - testKubelet := newTestKubelet(t) - testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) - testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil) - kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker - - containers := []api.Container{ - {Name: "bar"}, - } - pod := api.Pod{ - ObjectMeta: api.ObjectMeta{ - UID: "12345678", - Name: "foo", - Namespace: "new", - }, - Spec: api.PodSpec{ - Containers: containers, - }, - } - - // format is // k8s___ - names := []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0"} - currTime := time.Now() - containerMap := map[string]*docker.Container{ - "1234": { - ID: "1234", - Name: "bar", - Config: &docker.Config{}, - State: docker.State{ - ExitCode: 42, - StartedAt: currTime.Add(-60 * time.Second), - FinishedAt: currTime.Add(-60 * time.Second), - }, - }, - "5678": { - ID: "5678", - Name: "bar", - Config: &docker.Config{}, - State: docker.State{ - ExitCode: 42, - StartedAt: currTime.Add(-30 * time.Second), - FinishedAt: currTime.Add(-30 * time.Second), - }, - }, - "9101": { - ID: "9101", - Name: "bar", - Config: &docker.Config{}, - State: docker.State{ - ExitCode: 42, - StartedAt: currTime.Add(30 * time.Minute), - FinishedAt: currTime.Add(30 * time.Minute), - }, - }, - } - fakeDocker.ContainerMap = containerMap - - // Helper function for verifying the restart count. - verifyRestartCount := func(pod *api.Pod, expectedCount int) api.PodStatus { - status, err := kubelet.generatePodStatus(pod) - if err != nil { - t.Errorf("unexpected error %v", err) - } - restartCount := status.ContainerStatuses[0].RestartCount - if restartCount != expectedCount { - t.Errorf("expected %d restart count, got %d", expectedCount, restartCount) - } - return status - } - - // Container "bar" has failed twice; create two dead docker containers. - // TODO: container lists are expected to be sorted reversely by time. - // We should fix FakeDockerClient to sort the list before returning. - fakeDocker.ExitedContainerList = []docker.APIContainers{{Names: names, ID: "5678"}, {Names: names, ID: "1234"}} - pod.Status = verifyRestartCount(&pod, 1) - - // Found a new dead container. The restart count should be incremented. - fakeDocker.ExitedContainerList = []docker.APIContainers{ - {Names: names, ID: "9101"}, {Names: names, ID: "5678"}, {Names: names, ID: "1234"}} - pod.Status = verifyRestartCount(&pod, 2) - - // All dead containers have been GC'd. The restart count should persist - // (i.e., remain the same). - fakeDocker.ExitedContainerList = []docker.APIContainers{} - verifyRestartCount(&pod, 2) -} - func TestFilterOutTerminatedPods(t *testing.T) { testKubelet := newTestKubelet(t) kubelet := testKubelet.kubelet @@ -4094,10 +3437,10 @@ func TestIsPodPastActiveDeadline(t *testing.T) { } func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) { - testKubelet := newTestKubelet(t) + testKubelet := newTestKubeletWithFakeRuntime(t) + fakeRuntime := testKubelet.fakeRuntime testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker now := util.Now() startTime := util.NewTime(now.Time.Add(-1 * time.Minute)) @@ -4121,34 +3464,9 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) { }, }, } - fakeDocker.ContainerList = []docker.APIContainers{ - { - // the k8s prefix is required for the kubelet to manage the container - Names: []string{"/k8s_foo_bar_new_12345678_1111"}, - ID: "1234", - }, - { - // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_bar_new_12345678_2222"}, - ID: "9876", - }, - } - fakeDocker.ContainerMap = map[string]*docker.Container{ - "1234": { - ID: "1234", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - }, - "9876": { - ID: "9876", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - }, - "9999": { - ID: "9999", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - }, + + fakeRuntime.PodList = []*kubecontainer.Pod{ + {ID: "12345678", Name: "bar", Namespace: "new", Containers: []*kubecontainer.Container{{Name: "foo"}}}, } // Let the pod worker sets the status to fail after this sync. @@ -4167,10 +3485,10 @@ func TestSyncPodsSetStatusToFailedForPodsThatRunTooLong(t *testing.T) { } func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) { - testKubelet := newTestKubelet(t) + testKubelet := newTestKubeletWithFakeRuntime(t) + fakeRuntime := testKubelet.fakeRuntime testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil) kubelet := testKubelet.kubelet - fakeDocker := testKubelet.fakeDocker now := util.Now() startTime := util.NewTime(now.Time.Add(-1 * time.Minute)) @@ -4194,34 +3512,9 @@ func TestSyncPodsDoesNotSetPodsThatDidNotRunTooLongToFailed(t *testing.T) { }, }, } - fakeDocker.ContainerList = []docker.APIContainers{ - { - // the k8s prefix is required for the kubelet to manage the container - Names: []string{"/k8s_foo_bar_new_12345678_1111"}, - ID: "1234", - }, - { - // pod infra container - Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_bar_new_12345678_2222"}, - ID: "9876", - }, - } - fakeDocker.ContainerMap = map[string]*docker.Container{ - "1234": { - ID: "1234", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - }, - "9876": { - ID: "9876", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - }, - "9999": { - ID: "9999", - Config: &docker.Config{}, - HostConfig: &docker.HostConfig{}, - }, + + fakeRuntime.PodList = []*kubecontainer.Pod{ + {ID: "12345678", Name: "bar", Namespace: "new", Containers: []*kubecontainer.Container{{Name: "foo"}}}, } kubelet.podManager.SetPods(pods) diff --git a/pkg/kubelet/rkt/pod_info.go b/pkg/kubelet/rkt/pod_info.go index 559cfbb6629..207541734de 100644 --- a/pkg/kubelet/rkt/pod_info.go +++ b/pkg/kubelet/rkt/pod_info.go @@ -136,7 +136,7 @@ func (p *podInfo) getContainerStatus(container *kubecontainer.Container) api.Con } exitCode = -1 status.State = api.ContainerState{ - Termination: &api.ContainerStateTerminated{ + Terminated: &api.ContainerStateTerminated{ ExitCode: exitCode, StartedAt: util.Unix(container.Created, 0), }, diff --git a/pkg/master/controller.go b/pkg/master/controller.go index b1232bf2751..96099057a2d 100644 --- a/pkg/master/controller.go +++ b/pkg/master/controller.go @@ -36,21 +36,22 @@ import ( // Controller is the controller manager for the core bootstrap Kubernetes controller // loops, which manage creating the "kubernetes" and "kubernetes-ro" services, the "default" -// namespace, and provide the IP repair check on service PortalIPs +// namespace, and provide the IP repair check on service IPs type Controller struct { NamespaceRegistry namespace.Registry ServiceRegistry service.Registry - ServiceIPRegistry service.RangeRegistry - EndpointRegistry endpoint.Registry - PortalNet *net.IPNet // TODO: MasterCount is yucky MasterCount int + ServiceClusterIPRegistry service.RangeRegistry + ServiceClusterIPInterval time.Duration + ServiceClusterIPRange *net.IPNet + ServiceNodePortRegistry service.RangeRegistry ServiceNodePortInterval time.Duration - ServiceNodePorts util.PortRange + ServiceNodePortRange util.PortRange - PortalIPInterval time.Duration + EndpointRegistry endpoint.Registry EndpointInterval time.Duration PublicIP net.IP @@ -73,11 +74,11 @@ func (c *Controller) Start() { return } - repairPortals := servicecontroller.NewRepair(c.PortalIPInterval, c.ServiceRegistry, c.PortalNet, c.ServiceIPRegistry) - repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePorts, c.ServiceNodePortRegistry) + repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.ServiceRegistry, c.ServiceClusterIPRange, c.ServiceClusterIPRegistry) + repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.ServiceRegistry, c.ServiceNodePortRange, c.ServiceNodePortRegistry) // run all of the controllers once prior to returning from Start. - if err := repairPortals.RunOnce(); err != nil { + if err := repairClusterIPs.RunOnce(); err != nil { glog.Errorf("Unable to perform initial IP allocation check: %v", err) } if err := repairNodePorts.RunOnce(); err != nil { @@ -90,7 +91,7 @@ func (c *Controller) Start() { glog.Errorf("Unable to perform initial Kubernetes RO service initialization: %v", err) } - c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repairPortals.RunUntil, repairNodePorts.RunUntil) + c.runner = util.NewRunner(c.RunKubernetesService, c.RunKubernetesROService, repairClusterIPs.RunUntil, repairNodePorts.RunUntil) c.runner.Start() } @@ -189,7 +190,7 @@ func (c *Controller) CreateMasterServiceIfNeeded(serviceName string, serviceIP n Ports: []api.ServicePort{{Port: servicePort, Protocol: api.ProtocolTCP}}, // maintained by this code, not by the pod selector Selector: nil, - PortalIP: serviceIP.String(), + ClusterIP: serviceIP.String(), SessionAffinity: api.ServiceAffinityNone, }, } diff --git a/pkg/master/master.go b/pkg/master/master.go index ff6ced13f1c..6acaf36dd3b 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -85,7 +85,6 @@ type Config struct { EventTTL time.Duration MinionRegexp string KubeletClient client.KubeletClient - PortalNet *net.IPNet // allow downstream consumers to disable the core controller loops EnableCoreControllers bool EnableLogsSupport bool @@ -142,16 +141,19 @@ type Config struct { // The name of the cluster. ClusterName string + // The range of IPs to be assigned to services with type=ClusterIP or greater + ServiceClusterIPRange *net.IPNet + // The range of ports to be assigned to services with type=NodePort or greater - ServiceNodePorts util.PortRange + ServiceNodePortRange util.PortRange } // Master contains state for a Kubernetes cluster master/api server. type Master struct { // "Inputs", Copied from Config - portalNet *net.IPNet - serviceNodePorts util.PortRange - cacheTimeout time.Duration + serviceClusterIPRange *net.IPNet + serviceNodePortRange util.PortRange + cacheTimeout time.Duration mux apiserver.Mux muxHelper *apiserver.MuxHelper @@ -192,12 +194,12 @@ type Master struct { // registries are internal client APIs for accessing the storage layer // TODO: define the internal typed interface in a way that clients can // also be replaced - nodeRegistry minion.Registry - namespaceRegistry namespace.Registry - serviceRegistry service.Registry - endpointRegistry endpoint.Registry - portalAllocator service.RangeRegistry - serviceNodePortAllocator service.RangeRegistry + nodeRegistry minion.Registry + namespaceRegistry namespace.Registry + serviceRegistry service.Registry + endpointRegistry endpoint.Registry + serviceClusterIPAllocator service.RangeRegistry + serviceNodePortAllocator service.RangeRegistry // "Outputs" Handler http.Handler @@ -219,26 +221,26 @@ func NewEtcdHelper(client tools.EtcdGetSet, version string, prefix string) (help // setDefaults fills in any fields not set that are required to have valid data. func setDefaults(c *Config) { - if c.PortalNet == nil { + if c.ServiceClusterIPRange == nil { defaultNet := "10.0.0.0/24" - glog.Warningf("Portal net unspecified. Defaulting to %v.", defaultNet) - _, portalNet, err := net.ParseCIDR(defaultNet) + glog.Warningf("Network range for service cluster IPs is unspecified. Defaulting to %v.", defaultNet) + _, serviceClusterIPRange, err := net.ParseCIDR(defaultNet) if err != nil { glog.Fatalf("Unable to parse CIDR: %v", err) } - if size := ipallocator.RangeSize(portalNet); size < 8 { - glog.Fatalf("The portal net range must be at least %d IP addresses", 8) + if size := ipallocator.RangeSize(serviceClusterIPRange); size < 8 { + glog.Fatalf("The service cluster IP range must be at least %d IP addresses", 8) } - c.PortalNet = portalNet + c.ServiceClusterIPRange = serviceClusterIPRange } - if c.ServiceNodePorts.Size == 0 { + if c.ServiceNodePortRange.Size == 0 { // TODO: Currently no way to specify an empty range (do we need to allow this?) // We should probably allow this for clouds that don't require NodePort to do load-balancing (GCE) // but then that breaks the strict nestedness of ServiceType. // Review post-v1 - defaultServiceNodePorts := util.PortRange{Base: 30000, Size: 2767} - c.ServiceNodePorts = defaultServiceNodePorts - glog.Infof("Node port range unspecified. Defaulting to %v.", c.ServiceNodePorts) + defaultServiceNodePortRange := util.PortRange{Base: 30000, Size: 2767} + c.ServiceNodePortRange = defaultServiceNodePortRange + glog.Infof("Node port range unspecified. Defaulting to %v.", c.ServiceNodePortRange) } if c.MasterCount == 0 { // Clearly, there will be at least one master. @@ -273,8 +275,8 @@ func setDefaults(c *Config) { // New returns a new instance of Master from the given config. // Certain config fields will be set to a default value if unset, // including: -// PortalNet -// ServiceNodePorts +// ServiceClusterIPRange +// ServiceNodePortRange // MasterCount // ReadOnlyPort // ReadWritePort @@ -301,20 +303,20 @@ func New(c *Config) *Master { glog.Fatalf("master.New() called with config.KubeletClient == nil") } - // Select the first two valid IPs from portalNet to use as the master service portalIPs - serviceReadOnlyIP, err := ipallocator.GetIndexedIP(c.PortalNet, 1) + // Select the first two valid IPs from serviceClusterIPRange to use as the master service IPs + serviceReadOnlyIP, err := ipallocator.GetIndexedIP(c.ServiceClusterIPRange, 1) if err != nil { glog.Fatalf("Failed to generate service read-only IP for master service: %v", err) } - serviceReadWriteIP, err := ipallocator.GetIndexedIP(c.PortalNet, 2) + serviceReadWriteIP, err := ipallocator.GetIndexedIP(c.ServiceClusterIPRange, 2) if err != nil { glog.Fatalf("Failed to generate service read-write IP for master service: %v", err) } - glog.V(4).Infof("Setting master service IPs based on PortalNet subnet to %q (read-only) and %q (read-write).", serviceReadOnlyIP, serviceReadWriteIP) + glog.V(4).Infof("Setting master service IPs based to %q (read-only) and %q (read-write).", serviceReadOnlyIP, serviceReadWriteIP) m := &Master{ - portalNet: c.PortalNet, - serviceNodePorts: c.ServiceNodePorts, + serviceClusterIPRange: c.ServiceClusterIPRange, + serviceNodePortRange: c.ServiceNodePortRange, rootWebService: new(restful.WebService), enableCoreControllers: c.EnableCoreControllers, enableLogsSupport: c.EnableLogsSupport, @@ -440,17 +442,17 @@ func (m *Master) init(c *Config) { registry := etcd.NewRegistry(c.EtcdHelper, podRegistry, m.endpointRegistry) m.serviceRegistry = registry - var portalRangeRegistry service.RangeRegistry - portalAllocator := ipallocator.NewAllocatorCIDRRange(m.portalNet, func(max int, rangeSpec string) allocator.Interface { + var serviceClusterIPRegistry service.RangeRegistry + serviceClusterIPAllocator := ipallocator.NewAllocatorCIDRRange(m.serviceClusterIPRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", c.EtcdHelper) - portalRangeRegistry = etcd + serviceClusterIPRegistry = etcd return etcd }) - m.portalAllocator = portalRangeRegistry + m.serviceClusterIPAllocator = serviceClusterIPRegistry var serviceNodePortRegistry service.RangeRegistry - serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePorts, func(max int, rangeSpec string) allocator.Interface { + serviceNodePortAllocator := portallocator.NewPortAllocatorCustom(m.serviceNodePortRange, func(max int, rangeSpec string) allocator.Interface { mem := allocator.NewAllocationMap(max, rangeSpec) etcd := etcdallocator.NewEtcd(mem, "/ranges/servicenodeports", "servicenodeportallocation", c.EtcdHelper) serviceNodePortRegistry = etcd @@ -474,7 +476,7 @@ func (m *Master) init(c *Config) { "podTemplates": podTemplateStorage, "replicationControllers": controllerStorage, - "services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, portalAllocator, serviceNodePortAllocator, c.ClusterName), + "services": service.NewStorage(m.serviceRegistry, m.nodeRegistry, m.endpointRegistry, serviceClusterIPAllocator, serviceNodePortAllocator, c.ClusterName), "endpoints": endpointsStorage, "minions": nodeStorage, "minions/status": nodeStatusStorage, @@ -612,17 +614,18 @@ func (m *Master) NewBootstrapController() *Controller { return &Controller{ NamespaceRegistry: m.namespaceRegistry, ServiceRegistry: m.serviceRegistry, - ServiceIPRegistry: m.portalAllocator, - EndpointRegistry: m.endpointRegistry, - PortalNet: m.portalNet, MasterCount: m.masterCount, - ServiceNodePortRegistry: m.serviceNodePortAllocator, - ServiceNodePorts: m.serviceNodePorts, + EndpointRegistry: m.endpointRegistry, + EndpointInterval: 10 * time.Second, + ServiceClusterIPRegistry: m.serviceClusterIPAllocator, + ServiceClusterIPRange: m.serviceClusterIPRange, + ServiceClusterIPInterval: 3 * time.Minute, + + ServiceNodePortRegistry: m.serviceNodePortAllocator, + ServiceNodePortRange: m.serviceNodePortRange, ServiceNodePortInterval: 3 * time.Minute, - PortalIPInterval: 3 * time.Minute, - EndpointInterval: 10 * time.Second, PublicIP: m.clusterIP, diff --git a/pkg/proxy/proxier.go b/pkg/proxy/proxier.go index 7f36e63b63d..c2e78747197 100644 --- a/pkg/proxy/proxier.go +++ b/pkg/proxy/proxier.go @@ -33,9 +33,13 @@ import ( "github.com/golang/glog" ) +type portal struct { + ip net.IP + port int +} + type serviceInfo struct { - portalIP net.IP - portalPort int + portal portal protocol api.Protocol proxyPort int socket proxySocket @@ -252,9 +256,9 @@ func (proxier *Proxier) OnUpdate(services []api.Service) { for i := range services { service := &services[i] - // if PortalIP is "None" or empty, skip proxying + // if ClusterIP is "None" or empty, skip proxying if !api.IsServiceIPSet(service) { - glog.V(3).Infof("Skipping service %s due to portal IP = %q", types.NamespacedName{service.Namespace, service.Name}, service.Spec.PortalIP) + glog.V(3).Infof("Skipping service %s due to clusterIP = %q", types.NamespacedName{service.Namespace, service.Name}, service.Spec.ClusterIP) continue } @@ -263,7 +267,7 @@ func (proxier *Proxier) OnUpdate(services []api.Service) { serviceName := ServicePortName{types.NamespacedName{service.Namespace, service.Name}, servicePort.Name} activeServices[serviceName] = true - serviceIP := net.ParseIP(service.Spec.PortalIP) + serviceIP := net.ParseIP(service.Spec.ClusterIP) info, exists := proxier.getServiceInfo(serviceName) // TODO: check health of the socket? What if ProxyLoop exited? if exists && sameConfig(info, service, servicePort) { @@ -287,8 +291,8 @@ func (proxier *Proxier) OnUpdate(services []api.Service) { glog.Errorf("Failed to start proxy for %q: %v", serviceName, err) continue } - info.portalIP = serviceIP - info.portalPort = servicePort.Port + info.portal.ip = serviceIP + info.portal.port = servicePort.Port info.deprecatedPublicIPs = service.Spec.DeprecatedPublicIPs // Deep-copy in case the service instance changes info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) @@ -321,10 +325,10 @@ func (proxier *Proxier) OnUpdate(services []api.Service) { } func sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool { - if info.protocol != port.Protocol || info.portalPort != port.Port || info.nodePort != port.NodePort { + if info.protocol != port.Protocol || info.portal.port != port.Port || info.nodePort != port.NodePort { return false } - if !info.portalIP.Equal(net.ParseIP(service.Spec.PortalIP)) { + if !info.portal.ip.Equal(net.ParseIP(service.Spec.ClusterIP)) { return false } if !ipsEqual(info.deprecatedPublicIPs, service.Spec.DeprecatedPublicIPs) { @@ -352,19 +356,19 @@ func ipsEqual(lhs, rhs []string) bool { } func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) error { - err := proxier.openOnePortal(info.portalIP, info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) + err := proxier.openOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service) if err != nil { return err } for _, publicIP := range info.deprecatedPublicIPs { - err = proxier.openOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) + err = proxier.openOnePortal(portal{net.ParseIP(publicIP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service) if err != nil { return err } } for _, ingress := range info.loadBalancerStatus.Ingress { if ingress.IP != "" { - err = proxier.openOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) + err = proxier.openOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service) if err != nil { return err } @@ -379,27 +383,27 @@ func (proxier *Proxier) openPortal(service ServicePortName, info *serviceInfo) e return nil } -func (proxier *Proxier) openOnePortal(portalIP net.IP, portalPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error { +func (proxier *Proxier) openOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) error { // Handle traffic from containers. - args := proxier.iptablesContainerPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) + args := proxier.iptablesContainerPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name) existed, err := proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesContainerPortalChain, args...) if err != nil { glog.Errorf("Failed to install iptables %s rule for service %q", iptablesContainerPortalChain, name) return err } if !existed { - glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s:%d", name, protocol, portalIP, portalPort) + glog.V(3).Infof("Opened iptables from-containers portal for service %q on %s %s:%d", name, protocol, portal.ip, portal.port) } // Handle traffic from the host. - args = proxier.iptablesHostPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) + args = proxier.iptablesHostPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name) existed, err = proxier.iptables.EnsureRule(iptables.Append, iptables.TableNAT, iptablesHostPortalChain, args...) if err != nil { glog.Errorf("Failed to install iptables %s rule for service %q", iptablesHostPortalChain, name) return err } if !existed { - glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s:%d", name, protocol, portalIP, portalPort) + glog.V(3).Infof("Opened iptables from-host portal for service %q on %s %s:%d", name, protocol, portal.ip, portal.port) } return nil } @@ -480,13 +484,13 @@ func (proxier *Proxier) openNodePort(nodePort int, protocol api.Protocol, proxyI func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo) error { // Collect errors and report them all at the end. - el := proxier.closeOnePortal(info.portalIP, info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service) + el := proxier.closeOnePortal(info.portal, info.protocol, proxier.listenIP, info.proxyPort, service) for _, publicIP := range info.deprecatedPublicIPs { - el = append(el, proxier.closeOnePortal(net.ParseIP(publicIP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...) + el = append(el, proxier.closeOnePortal(portal{net.ParseIP(publicIP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)...) } for _, ingress := range info.loadBalancerStatus.Ingress { if ingress.IP != "" { - el = append(el, proxier.closeOnePortal(net.ParseIP(ingress.IP), info.portalPort, info.protocol, proxier.listenIP, info.proxyPort, service)...) + el = append(el, proxier.closeOnePortal(portal{net.ParseIP(ingress.IP), info.portal.port}, info.protocol, proxier.listenIP, info.proxyPort, service)...) } } if info.nodePort != 0 { @@ -500,18 +504,18 @@ func (proxier *Proxier) closePortal(service ServicePortName, info *serviceInfo) return errors.NewAggregate(el) } -func (proxier *Proxier) closeOnePortal(portalIP net.IP, portalPort int, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) []error { +func (proxier *Proxier) closeOnePortal(portal portal, protocol api.Protocol, proxyIP net.IP, proxyPort int, name ServicePortName) []error { el := []error{} // Handle traffic from containers. - args := proxier.iptablesContainerPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) + args := proxier.iptablesContainerPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesContainerPortalChain, args...); err != nil { glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesContainerPortalChain, name) el = append(el, err) } // Handle traffic from the host. - args = proxier.iptablesHostPortalArgs(portalIP, portalPort, protocol, proxyIP, proxyPort, name) + args = proxier.iptablesHostPortalArgs(portal.ip, portal.port, protocol, proxyIP, proxyPort, name) if err := proxier.iptables.DeleteRule(iptables.TableNAT, iptablesHostPortalChain, args...); err != nil { glog.Errorf("Failed to delete iptables %s rule for service %q", iptablesHostPortalChain, name) el = append(el, err) @@ -556,7 +560,7 @@ var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST" // Ensure that the iptables infrastructure we use is set up. This can safely be called periodically. func iptablesInit(ipt iptables.Interface) error { // TODO: There is almost certainly room for optimization here. E.g. If - // we knew the portal_net CIDR we could fast-track outbound packets not + // we knew the service_cluster_ip_range CIDR we could fast-track outbound packets not // destined for a service. There's probably more, help wanted. // Danger - order of these rules matters here: @@ -576,8 +580,8 @@ func iptablesInit(ipt iptables.Interface) error { // the NodePort would take priority (incorrectly). // This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems // doubly-unlikely), but we need to be careful to keep the rules in the right order. - args := []string{ /* portal_net matching could go here */ } - args = append(args, "-m", "comment", "--comment", "handle Portals; NOTE: this must be before the NodePort rules") + args := []string{ /* service_cluster_ip_range matching could go here */ } + args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules") if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil { return err } diff --git a/pkg/proxy/proxier_test.go b/pkg/proxy/proxier_test.go index ddf9c92e6c2..49c02e28873 100644 --- a/pkg/proxy/proxier_test.go +++ b/pkg/proxy/proxier_test.go @@ -300,7 +300,7 @@ func TestMultiPortOnUpdate(t *testing.T) { p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: serviceP.Name, Namespace: serviceP.Namespace}, - Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", Port: 80, Protocol: "TCP", @@ -315,7 +315,7 @@ func TestMultiPortOnUpdate(t *testing.T) { if !exists { t.Fatalf("can't find serviceInfo for %s", serviceP) } - if svcInfo.portalIP.String() != "1.2.3.4" || svcInfo.portalPort != 80 || svcInfo.protocol != "TCP" { + if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 80 || svcInfo.protocol != "TCP" { t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo) } @@ -323,7 +323,7 @@ func TestMultiPortOnUpdate(t *testing.T) { if !exists { t.Fatalf("can't find serviceInfo for %s", serviceQ) } - if svcInfo.portalIP.String() != "1.2.3.4" || svcInfo.portalPort != 81 || svcInfo.protocol != "UDP" { + if svcInfo.portal.ip.String() != "1.2.3.4" || svcInfo.portal.port != 81 || svcInfo.protocol != "UDP" { t.Errorf("unexpected serviceInfo for %s: %#v", serviceQ, svcInfo) } @@ -530,7 +530,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) { p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", Port: svcInfo.proxyPort, Protocol: "TCP", @@ -582,7 +582,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) { p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", Port: svcInfo.proxyPort, Protocol: "UDP", @@ -624,7 +624,7 @@ func TestTCPProxyUpdatePort(t *testing.T) { p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", Port: 99, Protocol: "TCP", @@ -671,7 +671,7 @@ func TestUDPProxyUpdatePort(t *testing.T) { p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", Port: 99, Protocol: "UDP", @@ -720,10 +720,10 @@ func TestProxyUpdatePublicIPs(t *testing.T) { Spec: api.ServiceSpec{ Ports: []api.ServicePort{{ Name: "p", - Port: svcInfo.portalPort, + Port: svcInfo.portal.port, Protocol: "TCP", }}, - PortalIP: svcInfo.portalIP.String(), + ClusterIP: svcInfo.portal.ip.String(), DeprecatedPublicIPs: []string{"4.3.2.1"}, }, }}) @@ -769,7 +769,7 @@ func TestProxyUpdatePortal(t *testing.T) { p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: api.ServiceSpec{PortalIP: "", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "", Ports: []api.ServicePort{{ Name: "p", Port: svcInfo.proxyPort, Protocol: "TCP", @@ -777,12 +777,12 @@ func TestProxyUpdatePortal(t *testing.T) { }}) _, exists := p.getServiceInfo(service) if exists { - t.Fatalf("service with empty portalIP should not be included in the proxy") + t.Fatalf("service with empty ClusterIP should not be included in the proxy") } p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: api.ServiceSpec{PortalIP: "None", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "None", Ports: []api.ServicePort{{ Name: "p", Port: svcInfo.proxyPort, Protocol: "TCP", @@ -790,12 +790,12 @@ func TestProxyUpdatePortal(t *testing.T) { }}) _, exists = p.getServiceInfo(service) if exists { - t.Fatalf("service with 'None' as portalIP should not be included in the proxy") + t.Fatalf("service with 'None' as ClusterIP should not be included in the proxy") } p.OnUpdate([]api.Service{{ ObjectMeta: api.ObjectMeta{Name: service.Name, Namespace: service.Namespace}, - Spec: api.ServiceSpec{PortalIP: "1.2.3.4", Ports: []api.ServicePort{{ + Spec: api.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []api.ServicePort{{ Name: "p", Port: svcInfo.proxyPort, Protocol: "TCP", @@ -803,7 +803,7 @@ func TestProxyUpdatePortal(t *testing.T) { }}) svcInfo, exists = p.getServiceInfo(service) if !exists { - t.Fatalf("service with portalIP set not found in the proxy") + t.Fatalf("service with ClusterIP set not found in the proxy") } testEchoTCP(t, "127.0.0.1", svcInfo.proxyPort) waitForNumProxyLoops(t, p, 1) diff --git a/pkg/registry/event/registry.go b/pkg/registry/event/registry.go index a3fba17f6c4..fda5e32aa33 100644 --- a/pkg/registry/event/registry.go +++ b/pkg/registry/event/registry.go @@ -44,7 +44,7 @@ func NewEtcdRegistry(h tools.EtcdHelper, ttl uint64) generic.Registry { KeyFunc: func(ctx api.Context, id string) (string, error) { return etcdgeneric.NamespaceKeyFunc(ctx, prefix, id) }, - TTLFunc: func(runtime.Object, bool) (uint64, error) { + TTLFunc: func(runtime.Object, uint64, bool) (uint64, error) { return ttl, nil }, Helper: h, diff --git a/pkg/registry/generic/etcd/etcd.go b/pkg/registry/generic/etcd/etcd.go index 5c2a70812c8..ee148c7f43f 100644 --- a/pkg/registry/generic/etcd/etcd.go +++ b/pkg/registry/generic/etcd/etcd.go @@ -18,6 +18,7 @@ package etcd import ( "fmt" + "reflect" "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" @@ -72,8 +73,9 @@ type Etcd struct { ObjectNameFunc func(obj runtime.Object) (string, error) // Return the TTL objects should be persisted with. Update is true if this - // is an operation against an existing object. - TTLFunc func(obj runtime.Object, update bool) (uint64, error) + // is an operation against an existing object. Existing is the current TTL + // or the default for this operation. + TTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error) // Returns a matcher corresponding to the provided labels and fields. PredicateFunc func(label labels.Selector, field fields.Selector) generic.Matcher @@ -146,9 +148,9 @@ func (e *Etcd) List(ctx api.Context, label labels.Selector, field fields.Selecto // ListPredicate returns a list of all the items matching m. func (e *Etcd) ListPredicate(ctx api.Context, m generic.Matcher) (runtime.Object, error) { - trace := util.NewTrace("List") - defer trace.LogIfLong(time.Second) list := e.NewListFunc() + trace := util.NewTrace("List " + reflect.TypeOf(list).String()) + defer trace.LogIfLong(600 * time.Millisecond) if name, ok := m.MatchesSingle(); ok { trace.Step("About to read single object") key, err := e.KeyFunc(ctx, name) @@ -184,12 +186,9 @@ func (e *Etcd) CreateWithName(ctx api.Context, name string, obj runtime.Object) return err } } - ttl := uint64(0) - if e.TTLFunc != nil { - ttl, err = e.TTLFunc(obj, false) - if err != nil { - return err - } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return err } err = e.Helper.CreateObj(key, obj, nil, ttl) err = etcderr.InterpretCreateError(err, e.EndpointName, name) @@ -201,7 +200,7 @@ func (e *Etcd) CreateWithName(ctx api.Context, name string, obj runtime.Object) // Create inserts a new item according to the unique key from the object. func (e *Etcd) Create(ctx api.Context, obj runtime.Object) (runtime.Object, error) { - trace := util.NewTrace("Create") + trace := util.NewTrace("Create " + reflect.TypeOf(obj).String()) defer trace.LogIfLong(time.Second) if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { return nil, err @@ -214,12 +213,9 @@ func (e *Etcd) Create(ctx api.Context, obj runtime.Object) (runtime.Object, erro if err != nil { return nil, err } - ttl := uint64(0) - if e.TTLFunc != nil { - ttl, err = e.TTLFunc(obj, false) - if err != nil { - return nil, err - } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, err } trace.Step("About to create object") out := e.NewFunc() @@ -249,12 +245,9 @@ func (e *Etcd) UpdateWithName(ctx api.Context, name string, obj runtime.Object) if err != nil { return err } - ttl := uint64(0) - if e.TTLFunc != nil { - ttl, err = e.TTLFunc(obj, true) - if err != nil { - return err - } + ttl, err := e.calculateTTL(obj, 0, true) + if err != nil { + return err } err = e.Helper.SetObj(key, obj, nil, ttl) err = etcderr.InterpretUpdateError(err, e.EndpointName, name) @@ -268,7 +261,7 @@ func (e *Etcd) UpdateWithName(ctx api.Context, name string, obj runtime.Object) // or an error. If the registry allows create-on-update, the create flow will be executed. // A bool is returned along with the object and any errors, to indicate object creation. func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool, error) { - trace := util.NewTrace("Update") + trace := util.NewTrace("Update " + reflect.TypeOf(obj).String()) defer trace.LogIfLong(time.Second) name, err := e.ObjectNameFunc(obj) if err != nil { @@ -281,49 +274,46 @@ func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool // TODO: expose TTL creating := false out := e.NewFunc() - err = e.Helper.GuaranteedUpdate(key, out, true, func(existing runtime.Object) (runtime.Object, uint64, error) { + err = e.Helper.GuaranteedUpdate(key, out, true, func(existing runtime.Object, res tools.ResponseMeta) (runtime.Object, *uint64, error) { version, err := e.Helper.Versioner.ObjectResourceVersion(existing) if err != nil { - return nil, 0, err + return nil, nil, err } if version == 0 { if !e.UpdateStrategy.AllowCreateOnUpdate() { - return nil, 0, kubeerr.NewNotFound(e.EndpointName, name) + return nil, nil, kubeerr.NewNotFound(e.EndpointName, name) } creating = true if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil { - return nil, 0, err + return nil, nil, err } - ttl := uint64(0) - if e.TTLFunc != nil { - ttl, err = e.TTLFunc(obj, false) - if err != nil { - return nil, 0, err - } + ttl, err := e.calculateTTL(obj, 0, false) + if err != nil { + return nil, nil, err } - return obj, ttl, nil + return obj, &ttl, nil } creating = false newVersion, err := e.Helper.Versioner.ObjectResourceVersion(obj) if err != nil { - return nil, 0, err + return nil, nil, err } if newVersion != version { // TODO: return the most recent version to a client? - return nil, 0, kubeerr.NewConflict(e.EndpointName, name, fmt.Errorf("the resource was updated to %d", version)) + return nil, nil, kubeerr.NewConflict(e.EndpointName, name, fmt.Errorf("the resource was updated to %d", version)) } if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil { - return nil, 0, err + return nil, nil, err } - ttl := uint64(0) - if e.TTLFunc != nil { - ttl, err = e.TTLFunc(obj, true) - if err != nil { - return nil, 0, err - } + ttl, err := e.calculateTTL(obj, res.TTL, true) + if err != nil { + return nil, nil, err } - return obj, ttl, nil + if int64(ttl) != res.TTL { + return obj, &ttl, nil + } + return obj, nil, nil }) if err != nil { @@ -358,9 +348,9 @@ func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool // Get retrieves the item from etcd. func (e *Etcd) Get(ctx api.Context, name string) (runtime.Object, error) { - trace := util.NewTrace("Get") - defer trace.LogIfLong(time.Second) obj := e.NewFunc() + trace := util.NewTrace("Get " + reflect.TypeOf(obj).String()) + defer trace.LogIfLong(time.Second) key, err := e.KeyFunc(ctx, name) if err != nil { return nil, err @@ -380,14 +370,14 @@ func (e *Etcd) Get(ctx api.Context, name string) (runtime.Object, error) { // Delete removes the item from etcd. func (e *Etcd) Delete(ctx api.Context, name string, options *api.DeleteOptions) (runtime.Object, error) { - trace := util.NewTrace("Delete") - defer trace.LogIfLong(time.Second) key, err := e.KeyFunc(ctx, name) if err != nil { return nil, err } obj := e.NewFunc() + trace := util.NewTrace("Delete " + reflect.TypeOf(obj).String()) + defer trace.LogIfLong(time.Second) trace.Step("About to read object") if err := e.Helper.ExtractObj(key, obj, false); err != nil { return nil, etcderr.InterpretDeleteError(err, e.EndpointName, name) @@ -479,3 +469,19 @@ func (e *Etcd) WatchPredicate(ctx api.Context, m generic.Matcher, resourceVersio return e.Helper.WatchList(e.KeyRootFunc(ctx), version, filterFunc) } + +// calculateTTL is a helper for retrieving the updated TTL for an object or returning an error +// if the TTL cannot be calculated. The defaultTTL is changed to 1 if less than zero. Zero means +// no TTL, not expire immediately. +func (e *Etcd) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) { + // etcd may return a negative TTL for a node if the expiration has not occured due + // to server lag - we will ensure that the value is at least set. + if defaultTTL < 0 { + defaultTTL = 1 + } + ttl = uint64(defaultTTL) + if e.TTLFunc != nil { + ttl, err = e.TTLFunc(obj, ttl, update) + } + return ttl, err +} diff --git a/pkg/registry/generic/etcd/etcd_test.go b/pkg/registry/generic/etcd/etcd_test.go index 5041ffb3800..7431b813582 100644 --- a/pkg/registry/generic/etcd/etcd_test.go +++ b/pkg/registry/generic/etcd/etcd_test.go @@ -123,11 +123,11 @@ func (everythingMatcher) MatchesSingle() (string, bool) { func TestEtcdList(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } podB := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "bar"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } singleElemListResp := &etcd.Response{ @@ -230,11 +230,11 @@ func TestEtcdList(t *testing.T) { func TestEtcdCreate(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } podB := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Spec: api.PodSpec{Host: "machine2"}, + Spec: api.PodSpec{NodeName: "machine2"}, } nodeWithPodA := tools.EtcdResponseWithError{ @@ -308,11 +308,11 @@ func TestEtcdCreate(t *testing.T) { func TestEtcdCreateWithName(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } podB := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Spec: api.PodSpec{Host: "machine2"}, + Spec: api.PodSpec{NodeName: "machine2"}, } nodeWithPodA := tools.EtcdResponseWithError{ @@ -384,11 +384,11 @@ func TestEtcdCreateWithName(t *testing.T) { func TestEtcdUpdate(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } podB := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault, ResourceVersion: "1"}, - Spec: api.PodSpec{Host: "machine2"}, + Spec: api.PodSpec{NodeName: "machine2"}, } nodeWithPodA := tools.EtcdResponseWithError{ @@ -499,11 +499,11 @@ func TestEtcdUpdate(t *testing.T) { func TestEtcdUpdateWithName(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } podB := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, - Spec: api.PodSpec{Host: "machine2"}, + Spec: api.PodSpec{NodeName: "machine2"}, } nodeWithPodA := tools.EtcdResponseWithError{ @@ -574,7 +574,7 @@ func TestEtcdUpdateWithName(t *testing.T) { func TestEtcdGet(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } nodeWithPodA := tools.EtcdResponseWithError{ @@ -630,7 +630,7 @@ func TestEtcdGet(t *testing.T) { func TestEtcdDelete(t *testing.T) { podA := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } nodeWithPodA := tools.EtcdResponseWithError{ @@ -699,7 +699,7 @@ func TestEtcdWatch(t *testing.T) { Namespace: api.NamespaceDefault, ResourceVersion: "1", }, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, } respWithPodA := &etcd.Response{ Node: &etcd.Node{ diff --git a/pkg/registry/pod/etcd/etcd.go b/pkg/registry/pod/etcd/etcd.go index 000075f08d7..05538b9c8b4 100644 --- a/pkg/registry/pod/etcd/etcd.go +++ b/pkg/registry/pod/etcd/etcd.go @@ -142,15 +142,18 @@ func (r *BindingREST) setPodHostAndAnnotations(ctx api.Context, podID, oldMachin if err != nil { return nil, err } - err = r.store.Helper.GuaranteedUpdate(podKey, &api.Pod{}, false, func(obj runtime.Object) (runtime.Object, uint64, error) { + err = r.store.Helper.GuaranteedUpdate(podKey, &api.Pod{}, false, tools.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { pod, ok := obj.(*api.Pod) if !ok { - return nil, 0, fmt.Errorf("unexpected object: %#v", obj) + return nil, fmt.Errorf("unexpected object: %#v", obj) } - if pod.Spec.Host != oldMachine { - return nil, 0, fmt.Errorf("pod %v is already assigned to host %q", pod.Name, pod.Spec.Host) + if pod.DeletionTimestamp != nil { + return nil, fmt.Errorf("pod %s is being deleted, cannot be assigned to a host", pod.Name) } - pod.Spec.Host = machine + if pod.Spec.NodeName != oldMachine { + return nil, fmt.Errorf("pod %v is already assigned to node %q", pod.Name, pod.Spec.NodeName) + } + pod.Spec.NodeName = machine if pod.Annotations == nil { pod.Annotations = make(map[string]string) } @@ -158,8 +161,8 @@ func (r *BindingREST) setPodHostAndAnnotations(ctx api.Context, podID, oldMachin pod.Annotations[k] = v } finalPod = pod - return pod, 0, nil - }) + return pod, nil + })) return finalPod, err } diff --git a/pkg/registry/pod/etcd/etcd_test.go b/pkg/registry/pod/etcd/etcd_test.go index 81ede48d041..9d1e00f9264 100644 --- a/pkg/registry/pod/etcd/etcd_test.go +++ b/pkg/registry/pod/etcd/etcd_test.go @@ -231,14 +231,14 @@ func TestListPodList(t *testing.T) { { Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, Status: api.PodStatus{Phase: api.PodRunning}, }), }, { Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "bar"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, }), }, }, @@ -255,10 +255,10 @@ func TestListPodList(t *testing.T) { if len(pods.Items) != 2 { t.Errorf("Unexpected pod list: %#v", pods) } - if pods.Items[0].Name != "foo" || pods.Items[0].Status.Phase != api.PodRunning || pods.Items[0].Spec.Host != "machine" { + if pods.Items[0].Name != "foo" || pods.Items[0].Status.Phase != api.PodRunning || pods.Items[0].Spec.NodeName != "machine" { t.Errorf("Unexpected pod: %#v", pods.Items[0]) } - if pods.Items[1].Name != "bar" || pods.Items[1].Spec.Host != "machine" { + if pods.Items[1].Name != "bar" || pods.Items[1].Spec.NodeName != "machine" { t.Errorf("Unexpected pod: %#v", pods.Items[1]) } } @@ -278,7 +278,7 @@ func TestListPodListSelection(t *testing.T) { })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "bar"}, - Spec: api.PodSpec{Host: "barhost"}, + Spec: api.PodSpec{NodeName: "barhost"}, })}, {Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "baz"}, @@ -388,7 +388,7 @@ func TestPodDecode(t *testing.T) { func TestGet(t *testing.T) { expect := validNewPod() expect.Status.Phase = api.PodRunning - expect.Spec.Host = "machine" + expect.Spec.NodeName = "machine" fakeEtcdClient, helper := newHelper(t) key := etcdtest.AddPrefix("/pods/test/foo") @@ -485,7 +485,7 @@ func TestUpdateWithConflictingNamespace(t *testing.T) { Node: &etcd.Node{ Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, }), ModifiedIndex: 1, }, @@ -645,7 +645,7 @@ func TestDeletePod(t *testing.T) { Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, }), ModifiedIndex: 1, CreatedIndex: 1, @@ -1035,8 +1035,8 @@ func TestEtcdCreateBinding(t *testing.T) { pod, err := registry.Get(ctx, validNewPod().ObjectMeta.Name) if err != nil { t.Errorf("%s: unexpected error: %v", k, err) - } else if pod.(*api.Pod).Spec.Host != test.binding.Target.Name { - t.Errorf("%s: expected: %v, got: %v", k, pod.(*api.Pod).Spec.Host, test.binding.Target.Name) + } else if pod.(*api.Pod).Spec.NodeName != test.binding.Target.Name { + t.Errorf("%s: expected: %v, got: %v", k, pod.(*api.Pod).Spec.NodeName, test.binding.Target.Name) } } } @@ -1107,7 +1107,7 @@ func TestEtcdUpdateScheduled(t *testing.T) { Namespace: api.NamespaceDefault, }, Spec: api.PodSpec{ - Host: "machine", + NodeName: "machine", Containers: []api.Container{ { Name: "foobar", @@ -1127,7 +1127,7 @@ func TestEtcdUpdateScheduled(t *testing.T) { }, }, Spec: api.PodSpec{ - Host: "machine", + NodeName: "machine", Containers: []api.Container{ { Name: "foobar", @@ -1170,7 +1170,7 @@ func TestEtcdUpdateStatus(t *testing.T) { Namespace: api.NamespaceDefault, }, Spec: api.PodSpec{ - Host: "machine", + NodeName: "machine", Containers: []api.Container{ { Image: "foo:v1", @@ -1190,7 +1190,7 @@ func TestEtcdUpdateStatus(t *testing.T) { }, }, Spec: api.PodSpec{ - Host: "machine", + NodeName: "machine", Containers: []api.Container{ { Image: "foo:v2", @@ -1238,7 +1238,7 @@ func TestEtcdDeletePod(t *testing.T) { key = etcdtest.AddPrefix(key) fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, }), 0) _, err := registry.Delete(ctx, "foo", api.NewDeleteOptions(0)) if err != nil { @@ -1260,7 +1260,7 @@ func TestEtcdDeletePodMultipleContainers(t *testing.T) { key = etcdtest.AddPrefix(key) fakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, }), 0) _, err := registry.Delete(ctx, "foo", api.NewDeleteOptions(0)) if err != nil { @@ -1330,13 +1330,13 @@ func TestEtcdList(t *testing.T) { { Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, }), }, { Value: runtime.EncodeOrDie(latest.Codec, &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "bar"}, - Spec: api.PodSpec{Host: "machine"}, + Spec: api.PodSpec{NodeName: "machine"}, }), }, }, @@ -1353,8 +1353,8 @@ func TestEtcdList(t *testing.T) { if len(pods.Items) != 2 || pods.Items[0].Name != "foo" || pods.Items[1].Name != "bar" { t.Errorf("Unexpected pod list: %#v", pods) } - if pods.Items[0].Spec.Host != "machine" || - pods.Items[1].Spec.Host != "machine" { + if pods.Items[0].Spec.NodeName != "machine" || + pods.Items[1].Spec.NodeName != "machine" { t.Errorf("Failed to populate host name.") } } diff --git a/pkg/registry/pod/rest.go b/pkg/registry/pod/rest.go index ff54ff0d0b4..7c1ed33e3bc 100644 --- a/pkg/registry/pod/rest.go +++ b/pkg/registry/pod/rest.go @@ -123,7 +123,7 @@ func MatchPod(label labels.Selector, field fields.Selector) generic.Matcher { func PodToSelectableFields(pod *api.Pod) fields.Set { return fields.Set{ "metadata.name": pod.Name, - "spec.host": pod.Spec.Host, + "spec.host": pod.Spec.NodeName, "status.phase": string(pod.Status.Phase), } } @@ -199,7 +199,7 @@ func LogLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ct return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s", name)) } } - nodeHost := pod.Spec.Host + nodeHost := pod.Spec.NodeName if len(nodeHost) == 0 { // If pod has not been assigned a host, return an empty location return nil, nil, nil @@ -242,7 +242,7 @@ func ExecLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, c return nil, nil, errors.NewBadRequest(fmt.Sprintf("a container name must be specified for pod %s", name)) } } - nodeHost := pod.Spec.Host + nodeHost := pod.Spec.NodeName if len(nodeHost) == 0 { // If pod has not been assigned a host, return an empty location return nil, nil, fmt.Errorf("pod %s does not have a host assigned", name) @@ -284,7 +284,7 @@ func PortForwardLocation(getter ResourceGetter, connInfo client.ConnectionInfoGe return nil, nil, err } - nodeHost := pod.Spec.Host + nodeHost := pod.Spec.NodeName if len(nodeHost) == 0 { // If pod has not been assigned a host, return an empty location return nil, nil, errors.NewBadRequest(fmt.Sprintf("pod %s does not have a host assigned", name)) diff --git a/pkg/registry/service/allocator/etcd/etcd.go b/pkg/registry/service/allocator/etcd/etcd.go index c2460405d64..f1a4981b27c 100644 --- a/pkg/registry/service/allocator/etcd/etcd.go +++ b/pkg/registry/service/allocator/etcd/etcd.go @@ -141,25 +141,25 @@ func (e *Etcd) Release(item int) error { // tryUpdate performs a read-update to persist the latest snapshot state of allocation. func (e *Etcd) tryUpdate(fn func() error) error { err := e.helper.GuaranteedUpdate(e.baseKey, &api.RangeAllocation{}, true, - func(input runtime.Object) (output runtime.Object, ttl uint64, err error) { + tools.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { existing := input.(*api.RangeAllocation) if len(existing.ResourceVersion) == 0 { - return nil, 0, fmt.Errorf("cannot allocate resources of type %s at this time", e.kind) + return nil, fmt.Errorf("cannot allocate resources of type %s at this time", e.kind) } if existing.ResourceVersion != e.last { if err := e.alloc.Restore(existing.Range, existing.Data); err != nil { - return nil, 0, err + return nil, err } if err := fn(); err != nil { - return nil, 0, err + return nil, err } } e.last = existing.ResourceVersion rangeSpec, data := e.alloc.Snapshot() existing.Range = rangeSpec existing.Data = data - return existing, 0, nil - }, + return existing, nil + }), ) return etcderr.InterpretUpdateError(err, e.kind, "") } @@ -198,19 +198,19 @@ func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error { last := "" err := e.helper.GuaranteedUpdate(e.baseKey, &api.RangeAllocation{}, true, - func(input runtime.Object) (output runtime.Object, ttl uint64, err error) { + tools.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { existing := input.(*api.RangeAllocation) switch { case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0: if snapshot.ResourceVersion != existing.ResourceVersion { - return nil, 0, k8serr.NewConflict(e.kind, "", fmt.Errorf("the provided resource version does not match")) + return nil, k8serr.NewConflict(e.kind, "", fmt.Errorf("the provided resource version does not match")) } case len(existing.ResourceVersion) != 0: - return nil, 0, k8serr.NewConflict(e.kind, "", fmt.Errorf("another caller has already initialized the resource")) + return nil, k8serr.NewConflict(e.kind, "", fmt.Errorf("another caller has already initialized the resource")) } last = snapshot.ResourceVersion - return snapshot, 0, nil - }, + return snapshot, nil + }), ) if err != nil { return etcderr.InterpretUpdateError(err, e.kind, "") diff --git a/pkg/registry/service/ipallocator/controller/repair.go b/pkg/registry/service/ipallocator/controller/repair.go index 451f3f32d69..2c4ce347e30 100644 --- a/pkg/registry/service/ipallocator/controller/repair.go +++ b/pkg/registry/service/ipallocator/controller/repair.go @@ -27,17 +27,17 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) -// Repair is a controller loop that periodically examines all service PortalIP allocations +// Repair is a controller loop that periodically examines all service ClusterIP allocations // and logs any errors, and then sets the compacted and accurate list of all allocated IPs. // // Handles: -// * Duplicate PortalIP assignments caused by operator action or undetected race conditions -// * PortalIPs that do not match the current portal network +// * Duplicate ClusterIP assignments caused by operator action or undetected race conditions +// * ClusterIPs that do not match the currently configured range // * Allocations to services that were not actually created due to a crash or powerloss // * Migrates old versions of Kubernetes services into the atomic ipallocator model automatically // // Can be run at infrequent intervals, and is best performed on startup of the master. -// Is level driven and idempotent - all valid PortalIPs will be updated into the ipallocator +// Is level driven and idempotent - all valid ClusterIPs will be updated into the ipallocator // map at the end of a single execution loop if no race is encountered. // // TODO: allocate new IPs if necessary @@ -49,7 +49,7 @@ type Repair struct { alloc service.RangeRegistry } -// NewRepair creates a controller that periodically ensures that all portalIPs are uniquely allocated across the cluster +// NewRepair creates a controller that periodically ensures that all clusterIPs are uniquely allocated across the cluster // and generates informational warnings for a cluster that is not in sync. func NewRepair(interval time.Duration, registry service.Registry, network *net.IPNet, alloc service.RangeRegistry) *Repair { return &Repair{ @@ -69,7 +69,7 @@ func (c *Repair) RunUntil(ch chan struct{}) { }, c.interval, ch) } -// RunOnce verifies the state of the portal IP allocations and returns an error if an unrecoverable problem occurs. +// RunOnce verifies the state of the cluster IP allocations and returns an error if an unrecoverable problem occurs. func (c *Repair) RunOnce() error { // TODO: (per smarterclayton) if Get() or ListServices() is a weak consistency read, // or if they are executed against different leaders, @@ -94,27 +94,27 @@ func (c *Repair) RunOnce() error { if !api.IsServiceIPSet(&svc) { continue } - ip := net.ParseIP(svc.Spec.PortalIP) + ip := net.ParseIP(svc.Spec.ClusterIP) if ip == nil { - // portal IP is broken, reallocate - util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.PortalIP, svc.Name, svc.Namespace)) + // cluster IP is broken, reallocate + util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not a valid IP; please recreate", svc.Spec.ClusterIP, svc.Name, svc.Namespace)) continue } switch err := r.Allocate(ip); err { case nil: case ipallocator.ErrAllocated: // TODO: send event - // portal IP is broken, reallocate - util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace)) + // cluster IP is broken, reallocate + util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s was assigned to multiple services; please recreate", ip, svc.Name, svc.Namespace)) case ipallocator.ErrNotInRange: // TODO: send event - // portal IP is broken, reallocate - util.HandleError(fmt.Errorf("the portal IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network)) + // cluster IP is broken, reallocate + util.HandleError(fmt.Errorf("the cluster IP %s for service %s/%s is not within the service CIDR %s; please recreate", ip, svc.Name, svc.Namespace, c.network)) case ipallocator.ErrFull: // TODO: send event return fmt.Errorf("the service CIDR %s is full; you must widen the CIDR in order to create new services") default: - return fmt.Errorf("unable to allocate portal IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err) + return fmt.Errorf("unable to allocate cluster IP %s for service %s/%s due to an unknown error, exiting: %v", ip, svc.Name, svc.Namespace, err) } } diff --git a/pkg/registry/service/ipallocator/controller/repair_test.go b/pkg/registry/service/ipallocator/controller/repair_test.go index ff1ea660c57..d9448944704 100644 --- a/pkg/registry/service/ipallocator/controller/repair_test.go +++ b/pkg/registry/service/ipallocator/controller/repair_test.go @@ -121,22 +121,22 @@ func TestRepairWithExisting(t *testing.T) { registry.List = api.ServiceList{ Items: []api.Service{ { - Spec: api.ServiceSpec{PortalIP: "192.168.1.1"}, + Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"}, }, { - Spec: api.ServiceSpec{PortalIP: "192.168.1.100"}, + Spec: api.ServiceSpec{ClusterIP: "192.168.1.100"}, }, { // outside CIDR, will be dropped - Spec: api.ServiceSpec{PortalIP: "192.168.0.1"}, + Spec: api.ServiceSpec{ClusterIP: "192.168.0.1"}, }, { // empty, ignored - Spec: api.ServiceSpec{PortalIP: ""}, + Spec: api.ServiceSpec{ClusterIP: ""}, }, { // duplicate, dropped - Spec: api.ServiceSpec{PortalIP: "192.168.1.1"}, + Spec: api.ServiceSpec{ClusterIP: "192.168.1.1"}, }, { // headless - Spec: api.ServiceSpec{PortalIP: "None"}, + Spec: api.ServiceSpec{ClusterIP: "None"}, }, }, } diff --git a/pkg/registry/service/rest.go b/pkg/registry/service/rest.go index 2be18a65903..add0e4c01df 100644 --- a/pkg/registry/service/rest.go +++ b/pkg/registry/service/rest.go @@ -46,19 +46,19 @@ type REST struct { registry Registry machines minion.Registry endpoints endpoint.Registry - portals ipallocator.Interface + serviceIPs ipallocator.Interface serviceNodePorts portallocator.Interface clusterName string } // NewStorage returns a new REST. -func NewStorage(registry Registry, machines minion.Registry, endpoints endpoint.Registry, portals ipallocator.Interface, +func NewStorage(registry Registry, machines minion.Registry, endpoints endpoint.Registry, serviceIPs ipallocator.Interface, serviceNodePorts portallocator.Interface, clusterName string) *REST { return &REST{ registry: registry, machines: machines, endpoints: endpoints, - portals: portals, + serviceIPs: serviceIPs, serviceNodePorts: serviceNodePorts, clusterName: clusterName, } @@ -75,7 +75,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err defer func() { if releaseServiceIP { if api.IsServiceIPSet(service) { - rs.portals.Release(net.ParseIP(service.Spec.PortalIP)) + rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP)) } } }() @@ -85,17 +85,17 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (runtime.Object, err if api.IsServiceIPRequested(service) { // Allocate next available. - ip, err := rs.portals.AllocateNext() + ip, err := rs.serviceIPs.AllocateNext() if err != nil { - el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, err.Error())} + el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())} return nil, errors.NewInvalid("Service", service.Name, el) } - service.Spec.PortalIP = ip.String() + service.Spec.ClusterIP = ip.String() releaseServiceIP = true } else if api.IsServiceIPSet(service) { // Try to respect the requested IP. - if err := rs.portals.Allocate(net.ParseIP(service.Spec.PortalIP)); err != nil { - el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.portalIP", service.Spec.PortalIP, err.Error())} + if err := rs.serviceIPs.Allocate(net.ParseIP(service.Spec.ClusterIP)); err != nil { + el := fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("spec.clusterIP", service.Spec.ClusterIP, err.Error())} return nil, errors.NewInvalid("Service", service.Name, el) } releaseServiceIP = true @@ -150,7 +150,7 @@ func (rs *REST) Delete(ctx api.Context, id string) (runtime.Object, error) { } if api.IsServiceIPSet(service) { - rs.portals.Release(net.ParseIP(service.Spec.PortalIP)) + rs.serviceIPs.Release(net.ParseIP(service.Spec.ClusterIP)) } for _, nodePort := range CollectServiceNodePorts(service) { diff --git a/pkg/registry/service/rest_test.go b/pkg/registry/service/rest_test.go index ec658d349bb..5cf064be0b8 100644 --- a/pkg/registry/service/rest_test.go +++ b/pkg/registry/service/rest_test.go @@ -96,8 +96,8 @@ func TestServiceRegistryCreate(t *testing.T) { if created_service.CreationTimestamp.IsZero() { t.Errorf("Expected timestamp to be set, got: %v", created_service.CreationTimestamp) } - if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) { - t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP) + if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) { + t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP) } srv, err := registry.GetService(ctx, svc.Name) if err != nil { @@ -517,8 +517,8 @@ func TestServiceRegistryIPAllocation(t *testing.T) { if created_service_1.Name != "foo" { t.Errorf("Expected foo, but got %v", created_service_1.Name) } - if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.PortalIP)) { - t.Errorf("Unexpected PortalIP: %s", created_service_1.Spec.PortalIP) + if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.ClusterIP)) { + t.Errorf("Unexpected ClusterIP: %s", created_service_1.Spec.ClusterIP) } svc2 := &api.Service{ @@ -538,14 +538,14 @@ func TestServiceRegistryIPAllocation(t *testing.T) { if created_service_2.Name != "bar" { t.Errorf("Expected bar, but got %v", created_service_2.Name) } - if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.PortalIP)) { - t.Errorf("Unexpected PortalIP: %s", created_service_2.Spec.PortalIP) + if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.ClusterIP)) { + t.Errorf("Unexpected ClusterIP: %s", created_service_2.Spec.ClusterIP) } testIPs := []string{"1.2.3.93", "1.2.3.94", "1.2.3.95", "1.2.3.96"} testIP := "" for _, ip := range testIPs { - if !rest.portals.(*ipallocator.Range).Has(net.ParseIP(ip)) { + if !rest.serviceIPs.(*ipallocator.Range).Has(net.ParseIP(ip)) { testIP = ip } } @@ -554,7 +554,7 @@ func TestServiceRegistryIPAllocation(t *testing.T) { ObjectMeta: api.ObjectMeta{Name: "quux"}, Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, - PortalIP: testIP, + ClusterIP: testIP, SessionAffinity: api.ServiceAffinityNone, Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{{ @@ -569,8 +569,8 @@ func TestServiceRegistryIPAllocation(t *testing.T) { t.Fatal(err) } created_service_3 := created_svc3.(*api.Service) - if created_service_3.Spec.PortalIP != testIP { // specific IP - t.Errorf("Unexpected PortalIP: %s", created_service_3.Spec.PortalIP) + if created_service_3.Spec.ClusterIP != testIP { // specific IP + t.Errorf("Unexpected ClusterIP: %s", created_service_3.Spec.ClusterIP) } } @@ -595,8 +595,8 @@ func TestServiceRegistryIPReallocation(t *testing.T) { if created_service_1.Name != "foo" { t.Errorf("Expected foo, but got %v", created_service_1.Name) } - if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.PortalIP)) { - t.Errorf("Unexpected PortalIP: %s", created_service_1.Spec.PortalIP) + if !makeIPNet(t).Contains(net.ParseIP(created_service_1.Spec.ClusterIP)) { + t.Errorf("Unexpected ClusterIP: %s", created_service_1.Spec.ClusterIP) } _, err := rest.Delete(ctx, created_service_1.Name) @@ -622,8 +622,8 @@ func TestServiceRegistryIPReallocation(t *testing.T) { if created_service_2.Name != "bar" { t.Errorf("Expected bar, but got %v", created_service_2.Name) } - if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.PortalIP)) { - t.Errorf("Unexpected PortalIP: %s", created_service_2.Spec.PortalIP) + if !makeIPNet(t).Contains(net.ParseIP(created_service_2.Spec.ClusterIP)) { + t.Errorf("Unexpected ClusterIP: %s", created_service_2.Spec.ClusterIP) } } @@ -648,8 +648,8 @@ func TestServiceRegistryIPUpdate(t *testing.T) { if created_service.Spec.Ports[0].Port != 6502 { t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port) } - if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) { - t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP) + if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) { + t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP) } update := deepCloneService(created_service) @@ -663,7 +663,7 @@ func TestServiceRegistryIPUpdate(t *testing.T) { update = deepCloneService(created_service) update.Spec.Ports[0].Port = 6503 - update.Spec.PortalIP = "1.2.3.76" // error + update.Spec.ClusterIP = "1.2.3.76" // error _, _, err := rest.Update(ctx, update) if err == nil || !errors.IsInvalid(err) { @@ -692,8 +692,8 @@ func TestServiceRegistryIPLoadBalancer(t *testing.T) { if created_service.Spec.Ports[0].Port != 6502 { t.Errorf("Expected port 6502, but got %v", created_service.Spec.Ports[0].Port) } - if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.PortalIP)) { - t.Errorf("Unexpected PortalIP: %s", created_service.Spec.PortalIP) + if !makeIPNet(t).Contains(net.ParseIP(created_service.Spec.ClusterIP)) { + t.Errorf("Unexpected ClusterIP: %s", created_service.Spec.ClusterIP) } update := deepCloneService(created_service) @@ -750,7 +750,7 @@ func TestCreate(t *testing.T) { &api.Service{ Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, - PortalIP: "None", + ClusterIP: "None", SessionAffinity: "None", Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{{ @@ -767,7 +767,7 @@ func TestCreate(t *testing.T) { &api.Service{ Spec: api.ServiceSpec{ Selector: map[string]string{"bar": "baz"}, - PortalIP: "invalid", + ClusterIP: "invalid", SessionAffinity: "None", Type: api.ServiceTypeClusterIP, Ports: []api.ServicePort{{ diff --git a/pkg/tools/etcd_helper.go b/pkg/tools/etcd_helper.go index 7446d889d20..d60cf68b43a 100644 --- a/pkg/tools/etcd_helper.go +++ b/pkg/tools/etcd_helper.go @@ -228,19 +228,15 @@ type etcdCache interface { } func (h *EtcdHelper) getFromCache(index uint64) (runtime.Object, bool) { - trace := util.NewTrace("getFromCache") - defer trace.LogIfLong(200 * time.Microsecond) startTime := time.Now() defer func() { cacheGetLatency.Observe(float64(time.Since(startTime) / time.Microsecond)) }() obj, found := h.cache.Get(index) - trace.Step("Raw get done") if found { // We should not return the object itself to avoid poluting the cache if someone // modifies returned values. objCopy, err := api.Scheme.DeepCopy(obj) - trace.Step("Deep copied") if err != nil { glog.Errorf("Error during DeepCopy of cached object: %q", err) return nil, false @@ -339,23 +335,25 @@ func (h *EtcdHelper) ExtractObjToList(key string, listObj runtime.Object) error // empty responses and nil response nodes exactly like a not found error. func (h *EtcdHelper) ExtractObj(key string, objPtr runtime.Object, ignoreNotFound bool) error { key = h.PrefixEtcdKey(key) - _, _, err := h.bodyAndExtractObj(key, objPtr, ignoreNotFound) + _, _, _, err := h.bodyAndExtractObj(key, objPtr, ignoreNotFound) return err } -func (h *EtcdHelper) bodyAndExtractObj(key string, objPtr runtime.Object, ignoreNotFound bool) (body string, modifiedIndex uint64, err error) { +// bodyAndExtractObj performs the normal Get path to etcd, returning the parsed node and response for additional information +// about the response, like the current etcd index and the ttl. +func (h *EtcdHelper) bodyAndExtractObj(key string, objPtr runtime.Object, ignoreNotFound bool) (body string, node *etcd.Node, res *etcd.Response, err error) { startTime := time.Now() response, err := h.Client.Get(key, false, false) recordEtcdRequestLatency("get", getTypeName(objPtr), startTime) if err != nil && !IsEtcdNotFound(err) { - return "", 0, err + return "", nil, nil, err } - return h.extractObj(response, err, objPtr, ignoreNotFound, false) + body, node, err = h.extractObj(response, err, objPtr, ignoreNotFound, false) + return body, node, response, err } -func (h *EtcdHelper) extractObj(response *etcd.Response, inErr error, objPtr runtime.Object, ignoreNotFound, prevNode bool) (body string, modifiedIndex uint64, err error) { - var node *etcd.Node +func (h *EtcdHelper) extractObj(response *etcd.Response, inErr error, objPtr runtime.Object, ignoreNotFound, prevNode bool) (body string, node *etcd.Node, err error) { if response != nil { if prevNode { node = response.PrevNode @@ -367,14 +365,14 @@ func (h *EtcdHelper) extractObj(response *etcd.Response, inErr error, objPtr run if ignoreNotFound { v, err := conversion.EnforcePtr(objPtr) if err != nil { - return "", 0, err + return "", nil, err } v.Set(reflect.Zero(v.Type())) - return "", 0, nil + return "", nil, nil } else if inErr != nil { - return "", 0, inErr + return "", nil, inErr } - return "", 0, fmt.Errorf("unable to locate a value on the response: %#v", response) + return "", nil, fmt.Errorf("unable to locate a value on the response: %#v", response) } body = node.Value err = h.Codec.DecodeInto([]byte(body), objPtr) @@ -382,7 +380,7 @@ func (h *EtcdHelper) extractObj(response *etcd.Response, inErr error, objPtr run _ = h.Versioner.UpdateObject(objPtr, node) // being unable to set the version does not prevent the object from being extracted } - return body, node.ModifiedIndex, err + return body, node, err } // CreateObj adds a new object at a key unless it already exists. 'ttl' is time-to-live in seconds, @@ -486,9 +484,28 @@ func (h *EtcdHelper) SetObj(key string, obj, out runtime.Object, ttl uint64) err return err } +// ResponseMeta contains information about the etcd metadata that is associated with +// an object. It abstracts the actual underlying objects to prevent coupling with etcd +// and to improve testability. +type ResponseMeta struct { + // TTL is the time to live of the node that contained the returned object. It may be + // zero or negative in some cases (objects may be expired after the requested + // expiration time due to server lag). + TTL int64 +} + // Pass an EtcdUpdateFunc to EtcdHelper.GuaranteedUpdate to make an etcd update that is guaranteed to succeed. // See the comment for GuaranteedUpdate for more detail. -type EtcdUpdateFunc func(input runtime.Object) (output runtime.Object, ttl uint64, err error) +type EtcdUpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) +type SimpleEtcdUpdateFunc func(runtime.Object) (runtime.Object, error) + +// SimpleUpdateFunc converts SimpleEtcdUpdateFunc into EtcdUpdateFunc +func SimpleUpdate(fn SimpleEtcdUpdateFunc) EtcdUpdateFunc { + return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { + out, err := fn(input) + return out, nil, err + } +} // GuaranteedUpdate calls "tryUpdate()" to update key "key" that is of type "ptrToType". It keeps // calling tryUpdate() and retrying the update until success if there is etcd index conflict. Note that object @@ -499,7 +516,7 @@ type EtcdUpdateFunc func(input runtime.Object) (output runtime.Object, ttl uint6 // Example: // // h := &util.EtcdHelper{client, encoding, versioning} -// err := h.GuaranteedUpdate("myKey", &MyType{}, true, func(input runtime.Object) (runtime.Object, uint64, error) { +// err := h.GuaranteedUpdate("myKey", &MyType{}, true, func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { // // Before each invocation of the user-defined function, "input" is reset to etcd's current contents for "myKey". // // cur := input.(*MyType) // Guaranteed to succeed. @@ -507,9 +524,9 @@ type EtcdUpdateFunc func(input runtime.Object) (output runtime.Object, ttl uint6 // // Make a *modification*. // cur.Counter++ // -// // Return the modified object. Return an error to stop iterating. Return a non-zero uint64 to set -// // the TTL on the object. -// return cur, 0, nil +// // Return the modified object. Return an error to stop iterating. Return a uint64 to alter +// // the TTL on the object, or nil to keep it the same value. +// return cur, nil, nil // }) // func (h *EtcdHelper) GuaranteedUpdate(key string, ptrToType runtime.Object, ignoreNotFound bool, tryUpdate EtcdUpdateFunc) error { @@ -521,14 +538,33 @@ func (h *EtcdHelper) GuaranteedUpdate(key string, ptrToType runtime.Object, igno key = h.PrefixEtcdKey(key) for { obj := reflect.New(v.Type()).Interface().(runtime.Object) - origBody, index, err := h.bodyAndExtractObj(key, obj, ignoreNotFound) + origBody, node, res, err := h.bodyAndExtractObj(key, obj, ignoreNotFound) + if err != nil { + return err + } + meta := ResponseMeta{} + if node != nil { + meta.TTL = node.TTL + } + + ret, newTTL, err := tryUpdate(obj, meta) if err != nil { return err } - ret, ttl, err := tryUpdate(obj) - if err != nil { - return err + index := uint64(0) + ttl := uint64(0) + if node != nil { + index = node.ModifiedIndex + if node.TTL > 0 { + ttl = uint64(node.TTL) + } + } else if res != nil { + index = res.EtcdIndex + } + + if newTTL != nil { + ttl = *newTTL } data, err := h.Codec.Encode(ret) diff --git a/pkg/tools/etcd_helper_test.go b/pkg/tools/etcd_helper_test.go index 89400d40619..5c39002d4cc 100644 --- a/pkg/tools/etcd_helper_test.go +++ b/pkg/tools/etcd_helper_test.go @@ -19,12 +19,16 @@ package tools import ( "errors" "fmt" + "math/rand" + "net" "net/http" "net/http/httptest" "path" "reflect" + "strconv" "sync" "testing" + "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" @@ -529,9 +533,9 @@ func TestGuaranteedUpdate(t *testing.T) { // Create a new node. fakeClient.ExpectNotFoundGet(key) obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object) (runtime.Object, uint64, error) { - return obj, 0, nil - }) + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + return obj, nil + })) if err != nil { t.Errorf("Unexpected error %#v", err) } @@ -548,15 +552,15 @@ func TestGuaranteedUpdate(t *testing.T) { // Update an existing node. callbackCalled := false objUpdate := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 2} - err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object) (runtime.Object, uint64, error) { + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { callbackCalled = true if in.(*TestResource).Value != 1 { t.Errorf("Callback input was not current set value") } - return objUpdate, 0, nil - }) + return objUpdate, nil + })) if err != nil { t.Errorf("Unexpected error %#v", err) } @@ -575,6 +579,107 @@ func TestGuaranteedUpdate(t *testing.T) { } } +func TestGuaranteedUpdateTTL(t *testing.T) { + fakeClient := NewFakeEtcdClient(t) + fakeClient.TestIndex = true + helper := NewEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) + key := etcdtest.AddPrefix("/some/key") + + // Create a new node. + fakeClient.ExpectNotFoundGet(key) + obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + if res.TTL != 0 { + t.Fatalf("unexpected response meta: %#v", res) + } + ttl := uint64(10) + return obj, &ttl, nil + }) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + data, err := codec.Encode(obj) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + expect := string(data) + got := fakeClient.Data[key].R.Node.Value + if expect != got { + t.Errorf("Wanted %v, got %v", expect, got) + } + if fakeClient.Data[key].R.Node.TTL != 10 { + t.Errorf("expected TTL set: %d", fakeClient.Data[key].R.Node.TTL) + } + + // Update an existing node. + callbackCalled := false + objUpdate := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 2} + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + if res.TTL != 10 { + t.Fatalf("unexpected response meta: %#v", res) + } + callbackCalled = true + + if in.(*TestResource).Value != 1 { + t.Errorf("Callback input was not current set value") + } + + return objUpdate, nil, nil + }) + + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + data, err = codec.Encode(objUpdate) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + expect = string(data) + got = fakeClient.Data[key].R.Node.Value + if expect != got { + t.Errorf("Wanted %v, got %v", expect, got) + } + if fakeClient.Data[key].R.Node.TTL != 10 { + t.Errorf("expected TTL remained set: %d", fakeClient.Data[key].R.Node.TTL) + } + + // Update an existing node and change ttl + callbackCalled = false + objUpdate = &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 3} + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + if res.TTL != 10 { + t.Fatalf("unexpected response meta: %#v", res) + } + callbackCalled = true + + if in.(*TestResource).Value != 2 { + t.Errorf("Callback input was not current set value") + } + + newTTL := uint64(20) + return objUpdate, &newTTL, nil + }) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + data, err = codec.Encode(objUpdate) + if err != nil { + t.Errorf("Unexpected error %#v", err) + } + expect = string(data) + got = fakeClient.Data[key].R.Node.Value + if expect != got { + t.Errorf("Wanted %v, got %v", expect, got) + } + if fakeClient.Data[key].R.Node.TTL != 20 { + t.Errorf("expected TTL changed: %d", fakeClient.Data[key].R.Node.TTL) + } + + if !callbackCalled { + t.Errorf("tryUpdate callback should have been called.") + } +} + func TestGuaranteedUpdateNoChange(t *testing.T) { fakeClient := NewFakeEtcdClient(t) fakeClient.TestIndex = true @@ -584,9 +689,9 @@ func TestGuaranteedUpdateNoChange(t *testing.T) { // Create a new node. fakeClient.ExpectNotFoundGet(key) obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object) (runtime.Object, uint64, error) { - return obj, 0, nil - }) + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + return obj, nil + })) if err != nil { t.Errorf("Unexpected error %#v", err) } @@ -594,11 +699,11 @@ func TestGuaranteedUpdateNoChange(t *testing.T) { // Update an existing node with the same data callbackCalled := false objUpdate := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object) (runtime.Object, uint64, error) { + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { fakeClient.Err = errors.New("should not be called") callbackCalled = true - return objUpdate, 0, nil - }) + return objUpdate, nil + })) if err != nil { t.Fatalf("Unexpected error %#v", err) } @@ -617,9 +722,9 @@ func TestGuaranteedUpdateKeyNotFound(t *testing.T) { fakeClient.ExpectNotFoundGet(key) obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - f := func(in runtime.Object) (runtime.Object, uint64, error) { - return obj, 0, nil - } + f := SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + return obj, nil + }) ignoreNotFound := false err := helper.GuaranteedUpdate("/some/key", &TestResource{}, ignoreNotFound, f) @@ -654,7 +759,7 @@ func TestGuaranteedUpdate_CreateCollision(t *testing.T) { defer wgDone.Done() firstCall := true - err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object) (runtime.Object, uint64, error) { + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { defer func() { firstCall = false }() if firstCall { @@ -665,8 +770,8 @@ func TestGuaranteedUpdate_CreateCollision(t *testing.T) { currValue := in.(*TestResource).Value obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: currValue + 1} - return obj, 0, nil - }) + return obj, nil + })) if err != nil { t.Errorf("Unexpected error %#v", err) } @@ -711,7 +816,24 @@ func TestGetEtcdVersion_ErrorStatus(t *testing.T) { } func TestGetEtcdVersion_NotListening(t *testing.T) { - _, err := GetEtcdVersion("http://127.0.0.1:4001") + portIsOpen := func(port int) bool { + conn, err := net.DialTimeout("tcp", "127.0.0.1:"+strconv.Itoa(port), 1*time.Second) + if err == nil { + conn.Close() + return true + } + return false + } + + port := rand.Intn((1 << 16) - 1) + for tried := 0; portIsOpen(port); tried++ { + if tried >= 10 { + t.Fatal("Couldn't find a closed TCP port to continue testing") + } + port++ + } + + _, err := GetEtcdVersion("http://127.0.0.1:" + strconv.Itoa(port)) assert.NotNil(t, err) } diff --git a/pkg/ui/datafile.go b/pkg/ui/datafile.go index 4331bda123d..5ed950767bc 100644 --- a/pkg/ui/datafile.go +++ b/pkg/ui/datafile.go @@ -1592,7 +1592,7 @@ func www_app_assets_css_app_css() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/assets/css/app.css", size: 37661, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/assets/css/app.css", size: 37661, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1630,7 +1630,7 @@ func www_app_assets_img_ic_arrow_drop_down_24px_svg() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/assets/img/ic_arrow_drop_down_24px.svg", size: 166, mode: os.FileMode(420), modTime: time.Unix(1432748239, 0)} + info := bindata_file_info{name: "www/app/assets/img/ic_arrow_drop_down_24px.svg", size: 166, mode: os.FileMode(420), modTime: time.Unix(1432773208, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1672,7 +1672,7 @@ func www_app_assets_img_ic_arrow_drop_up_24px_svg() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/assets/img/ic_arrow_drop_up_24px.svg", size: 795, mode: os.FileMode(420), modTime: time.Unix(1432748239, 0)} + info := bindata_file_info{name: "www/app/assets/img/ic_arrow_drop_up_24px.svg", size: 795, mode: os.FileMode(420), modTime: time.Unix(1432773208, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1689,7 +1689,7 @@ func www_app_assets_img_ic_keyboard_arrow_left_24px_svg() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/assets/img/ic_keyboard_arrow_left_24px.svg", size: 151, mode: os.FileMode(420), modTime: time.Unix(1432748239, 0)} + info := bindata_file_info{name: "www/app/assets/img/ic_keyboard_arrow_left_24px.svg", size: 151, mode: os.FileMode(420), modTime: time.Unix(1432773208, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -1706,7 +1706,7 @@ func www_app_assets_img_ic_keyboard_arrow_right_24px_svg() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/assets/img/ic_keyboard_arrow_right_24px.svg", size: 149, mode: os.FileMode(420), modTime: time.Unix(1432748239, 0)} + info := bindata_file_info{name: "www/app/assets/img/ic_keyboard_arrow_right_24px.svg", size: 149, mode: os.FileMode(420), modTime: time.Unix(1432773208, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -3526,7 +3526,7 @@ app.controller('ListPodsCtrl', [ app.controller('ListReplicationControllersCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$location', function($scope, $routeParams, k8sApi, $location) { 'use strict'; @@ -3582,27 +3582,26 @@ app.controller('ListReplicationControllersCtrl', [ var _name = '', _image = ''; - if (replicationController.desiredState.podTemplate.desiredState.manifest.containers) { - Object.keys(replicationController.desiredState.podTemplate.desiredState.manifest.containers) + if (replicationController.spec.template.spec.containers) { + Object.keys(replicationController.spec.template.spec.containers) .forEach(function(key) { - _name += replicationController.desiredState.podTemplate.desiredState.manifest.containers[key].name; - _image += replicationController.desiredState.podTemplate.desiredState.manifest.containers[key].image; + _name += replicationController.spec.template.spec.containers[key].name; + _image += replicationController.spec.template.spec.containers[key].image; }); } - var _name_selector = ''; + var _selectors = ''; - if (replicationController.desiredState.replicaSelector) { - Object.keys(replicationController.desiredState.replicaSelector) - .forEach(function(key) { _name_selector += replicationController.desiredState.replicaSelector[key]; }); + if (replicationController.spec.selector) { + _selectors = _.map(replicationController.spec.selector, function(v, k) { return k + '=' + v }).join(', '); } $scope.content.push({ - controller: replicationController.id, + controller: replicationController.metadata.name, containers: _name, images: _image, - selector: _name_selector, - replicas: replicationController.currentState.replicas + selector: _selectors, + replicas: replicationController.status.replicas }); }); @@ -3624,7 +3623,7 @@ app.controller('ListServicesCtrl', [ '$scope', '$interval', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$rootScope', '$location', function($scope, $interval, $routeParams, k8sApi, $rootScope, $location) { @@ -3636,7 +3635,7 @@ app.controller('ListServicesCtrl', [ {name: 'Labels', field: 'labels'}, {name: 'Selector', field: 'selector'}, {name: 'IP', field: 'ip'}, - {name: 'Port', field: 'port'} + {name: 'Ports', field: 'port'} ]; $scope.custom = { @@ -3684,41 +3683,36 @@ app.controller('ListServicesCtrl', [ if (data.items.constructor === Array) { data.items.forEach(function(service) { - var _name = '', _uses = '', _component = '', _provider = ''; + var _labels = ''; - if (service.labels !== null && typeof service.labels === 'object') { - Object.keys(service.labels) - .forEach(function(key) { - if (key == 'name') { - _name += ',' + service.labels[key]; - } - if (key == 'component') { - _component += ',' + service.labels[key]; - } - if (key == 'provider') { - _provider += ',' + service.labels[key]; - } - }); + if (service.metadata.labels) { + _labels = _.map(service.metadata.labels, function(v, k) { return k + '=' + v }).join(', '); } var _selectors = ''; - if (service.selector !== null && typeof service.selector === 'object') { - Object.keys(service.selector) - .forEach(function(key) { - if (key == 'name') { - _selectors += ',' + service.selector[key]; - } - }); + if (service.spec.selector) { + _selectors = _.map(service.spec.selector, function(v, k) { return k + '=' + v }).join(', '); + } + + var _ports = ''; + + if (service.spec.ports) { + _ports = _.map(service.spec.ports, function(p) { + var n = ''; + if(p.name) + n = p.name + ': '; + n = n + p.port; + return n; + }).join(', '); } $scope.content.push({ - name: service.id, - ip: service.portalIP, - port: service.port, - selector: addLabel(_fixComma(_selectors), 'name='), - labels: addLabel(_fixComma(_name), 'name=') + ' ' + addLabel(_fixComma(_component), 'component=') + ' ' + - addLabel(_fixComma(_provider), 'provider=') + name: service.metadata.name, + ip: service.spec.portalIP, + port: _ports, + selector: _selectors, + labels: _labels }); }); } @@ -3821,7 +3815,7 @@ ReplicationController.prototype.handleError = function(data, status, headers, co app.controller('ReplicationControllerCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', function($scope, $routeParams, k8sApi) { $scope.controller = new ReplicationController(); $scope.controller.k8sApi = k8sApi; @@ -3829,6 +3823,7 @@ app.controller('ReplicationControllerCtrl', [ $scope.controller.getData($routeParams.replicationControllerId); $scope.doTheBack = function() { window.history.back(); }; + $scope.getSelectorUrlFragment = function(sel){ return _.map(sel, function(v, k) { return k + '=' + v }).join(','); }; } ]); @@ -3857,7 +3852,7 @@ ServiceController.prototype.handleError = function(data, status, headers, config app.controller('ServiceCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$location', function($scope, $routeParams, k8sApi, $location) { $scope.controller = new ServiceController(); @@ -3866,6 +3861,8 @@ app.controller('ServiceCtrl', [ $scope.controller.getData($routeParams.serviceId); $scope.doTheBack = function() { window.history.back(); }; + $scope.go = function(d) { $location.path('/dashboard/services/' + d.metadata.name); } + $scope.getSelectorUrlFragment = function(sel){ return _.map(sel, function(v, k) { return k + '=' + v }).join(','); }; } ]); @@ -4681,7 +4678,7 @@ func www_app_assets_js_app_js() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/assets/js/app.js", size: 93554, mode: os.FileMode(420), modTime: time.Unix(1432768978, 0)} + info := bindata_file_info{name: "www/app/assets/js/app.js", size: 93220, mode: os.FileMode(420), modTime: time.Unix(1432833385, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4723,7 +4720,7 @@ func www_app_assets_js_base_js() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/assets/js/base.js", size: 477048, mode: os.FileMode(420), modTime: time.Unix(1432768977, 0)} + info := bindata_file_info{name: "www/app/assets/js/base.js", size: 477048, mode: os.FileMode(420), modTime: time.Unix(1432833385, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4740,7 +4737,7 @@ func www_app_components_dashboard_img_icons_ic_arrow_drop_down_18px_svg() (*asse return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_arrow_drop_down_18px.svg", size: 114, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_arrow_drop_down_18px.svg", size: 114, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4761,7 +4758,7 @@ func www_app_components_dashboard_img_icons_ic_arrow_drop_down_24px_svg() (*asse return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_arrow_drop_down_24px.svg", size: 166, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_arrow_drop_down_24px.svg", size: 166, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4778,7 +4775,7 @@ func www_app_components_dashboard_img_icons_ic_close_18px_svg() (*asset, error) return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_close_18px.svg", size: 215, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_close_18px.svg", size: 215, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4795,7 +4792,7 @@ func www_app_components_dashboard_img_icons_ic_close_24px_svg() (*asset, error) return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_close_24px.svg", size: 202, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/img/icons/ic_close_24px.svg", size: 202, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4877,7 +4874,7 @@ func www_app_components_dashboard_manifest_json() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/manifest.json", size: 1854, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/manifest.json", size: 1854, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4894,7 +4891,7 @@ func www_app_components_dashboard_pages_footer_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/pages/footer.html", size: 7, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/pages/footer.html", size: 7, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4937,7 +4934,7 @@ func www_app_components_dashboard_pages_header_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/pages/header.html", size: 1313, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/pages/header.html", size: 1313, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4961,7 +4958,7 @@ func www_app_components_dashboard_pages_home_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/pages/home.html", size: 247, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/pages/home.html", size: 247, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4978,7 +4975,7 @@ func www_app_components_dashboard_protractor_smoke_spec_js() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/protractor/smoke.spec.js", size: 2616, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/protractor/smoke.spec.js", size: 2616, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -4995,7 +4992,7 @@ func www_app_components_dashboard_test_controllers_header_spec_js() (*asset, err return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/test/controllers/header.spec.js", size: 1293, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/test/controllers/header.spec.js", size: 1293, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5046,7 +5043,7 @@ func www_app_components_dashboard_views_groups_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/groups.html", size: 1298, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/groups.html", size: 1298, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5070,7 +5067,7 @@ func www_app_components_dashboard_views_listevents_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/listEvents.html", size: 326, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/listEvents.html", size: 326, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5094,7 +5091,7 @@ func www_app_components_dashboard_views_listminions_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/listMinions.html", size: 348, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/listMinions.html", size: 348, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5118,7 +5115,7 @@ func www_app_components_dashboard_views_listpods_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/listPods.html", size: 345, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/listPods.html", size: 345, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5194,7 +5191,7 @@ func www_app_components_dashboard_views_listpodscards_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/listPodsCards.html", size: 1967, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/listPodsCards.html", size: 1967, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5228,7 +5225,7 @@ func www_app_components_dashboard_views_listpodsvisualizer_html() (*asset, error return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/listPodsVisualizer.html", size: 841, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/listPodsVisualizer.html", size: 841, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5252,7 +5249,7 @@ func www_app_components_dashboard_views_listreplicationcontrollers_html() (*asse return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/listReplicationControllers.html", size: 363, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/listReplicationControllers.html", size: 363, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5276,7 +5273,7 @@ func www_app_components_dashboard_views_listservices_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/listServices.html", size: 349, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/listServices.html", size: 349, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5382,7 +5379,7 @@ func www_app_components_dashboard_views_node_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/node.html", size: 2307, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/node.html", size: 2307, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5408,7 +5405,7 @@ func www_app_components_dashboard_views_partials_cadvisor_html() (*asset, error) return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/partials/cadvisor.html", size: 443, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/partials/cadvisor.html", size: 443, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5445,7 +5442,7 @@ func www_app_components_dashboard_views_partials_groupbox_html() (*asset, error) return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/partials/groupBox.html", size: 769, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/partials/groupBox.html", size: 769, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5504,7 +5501,7 @@ func www_app_components_dashboard_views_partials_groupitem_html() (*asset, error return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/partials/groupItem.html", size: 2213, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/partials/groupItem.html", size: 2213, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5554,7 +5551,7 @@ func www_app_components_dashboard_views_partials_podtilesbyname_html() (*asset, return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/partials/podTilesByName.html", size: 1287, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/partials/podTilesByName.html", size: 1287, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5604,7 +5601,7 @@ func www_app_components_dashboard_views_partials_podtilesbyserver_html() (*asset return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/partials/podTilesByServer.html", size: 1281, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/partials/podTilesByServer.html", size: 1281, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5735,7 +5732,7 @@ func www_app_components_dashboard_views_pod_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/pod.html", size: 4149, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/pod.html", size: 4149, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5752,37 +5749,46 @@ var _www_app_components_dashboard_views_replication_html = []byte(`
Replication Controller: - {{replicationController.id}} + {{replicationController.metadata.name}}
- +
+ + + + + @@ -5791,9 +5797,13 @@ var _www_app_components_dashboard_views_replication_html = []byte(`
@@ -5801,8 +5811,13 @@ var _www_app_components_dashboard_views_replication_html = []byte(`
@@ -5834,7 +5849,7 @@ func www_app_components_dashboard_views_replication_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/replication.html", size: 2165, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/replication.html", size: 3207, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -5851,68 +5866,81 @@ var _www_app_components_dashboard_views_service_html = []byte(`
Service: - {{service.id}} + {{service.metadata.name}}
-
Created - {{replicationController.creationTimestamp | date:'medium'}} + {{replicationController.metadata.creationTimestamp | date:'medium'}}
Desired Replicas - {{replicationController.desiredState.replicas}} + {{replicationController.spec.replicas}}
Current Replicas - {{replicationController.currentState.replicas}} + {{replicationController.status.replicas}} +
Selector + + {{label}}={{value}}{{$last ? '' : ', '}} +
Labels -
+
{{label}}: {{value}}
Related Pods - + + +
Related Services -
+
- + + - + - - + + - - - - - + + + + + + @@ -5920,8 +5948,12 @@ var _www_app_components_dashboard_views_service_html = []byte(`
@@ -5948,7 +5980,7 @@ func www_app_components_dashboard_views_service_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/components/dashboard/views/service.html", size: 2477, mode: os.FileMode(420), modTime: time.Unix(1432768981, 0)} + info := bindata_file_info{name: "www/app/components/dashboard/views/service.html", size: 3539, mode: os.FileMode(420), modTime: time.Unix(1432833389, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -6025,7 +6057,7 @@ func www_app_index_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/index.html", size: 2315, mode: os.FileMode(420), modTime: time.Unix(1432754009, 0)} + info := bindata_file_info{name: "www/app/index.html", size: 2315, mode: os.FileMode(420), modTime: time.Unix(1432773208, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -6212,7 +6244,7 @@ func www_app_views_partials_md_table_tmpl_html() (*asset, error) { return nil, err } - info := bindata_file_info{name: "www/app/views/partials/md-table.tmpl.html", size: 2819, mode: os.FileMode(420), modTime: time.Unix(1432754009, 0)} + info := bindata_file_info{name: "www/app/views/partials/md-table.tmpl.html", size: 2819, mode: os.FileMode(420), modTime: time.Unix(1432773208, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/pkg/util/ssh.go b/pkg/util/ssh.go new file mode 100644 index 00000000000..7760761b339 --- /dev/null +++ b/pkg/util/ssh.go @@ -0,0 +1,174 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "os" + + "github.com/golang/glog" + "golang.org/x/crypto/ssh" +) + +// TODO: Unit tests for this code, we can spin up a test SSH server with instructions here: +// https://godoc.org/golang.org/x/crypto/ssh#ServerConn +type SSHTunnel struct { + Config *ssh.ClientConfig + Host string + SSHPort int + LocalPort int + RemoteHost string + RemotePort int + running bool + sock net.Listener + client *ssh.Client +} + +func (s *SSHTunnel) copyBytes(out io.Writer, in io.Reader) { + if _, err := io.Copy(out, in); err != nil { + glog.Errorf("Error in SSH tunnel: %v", err) + } +} + +func NewSSHTunnel(user, keyfile, host, remoteHost string, localPort, remotePort int) (*SSHTunnel, error) { + signer, err := MakePrivateKeySigner(keyfile) + if err != nil { + return nil, err + } + config := ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + } + return &SSHTunnel{ + Config: &config, + Host: host, + SSHPort: 22, + LocalPort: localPort, + RemotePort: remotePort, + RemoteHost: remoteHost, + }, nil +} + +func (s *SSHTunnel) Open() error { + var err error + s.client, err = ssh.Dial("tcp", fmt.Sprintf("%s:%d", s.Host, s.SSHPort), s.Config) + if err != nil { + return err + } + s.sock, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", s.LocalPort)) + if err != nil { + return err + } + s.running = true + return nil +} + +func (s *SSHTunnel) Listen() { + for s.running { + conn, err := s.sock.Accept() + if err != nil { + glog.Errorf("Error listening for ssh tunnel to %s (%v)", s.RemoteHost, err) + continue + } + if err := s.tunnel(conn); err != nil { + glog.Errorf("Error starting tunnel: %v", err) + } + } +} + +func (s *SSHTunnel) tunnel(conn net.Conn) error { + tunnel, err := s.client.Dial("tcp", fmt.Sprintf("%s:%d", s.RemoteHost, s.RemotePort)) + if err != nil { + return err + } + go s.copyBytes(tunnel, conn) + go s.copyBytes(conn, tunnel) + return nil +} + +func (s *SSHTunnel) Close() error { + // TODO: try to shutdown copying here? + s.running = false + // TODO: Aggregate errors and keep going? + if err := s.sock.Close(); err != nil { + return err + } + if err := s.client.Close(); err != nil { + return err + } + return nil +} + +func RunSSHCommand(cmd, host string, signer ssh.Signer) (string, string, int, error) { + // Setup the config, dial the server, and open a session. + config := &ssh.ClientConfig{ + User: os.Getenv("USER"), + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + } + client, err := ssh.Dial("tcp", host, config) + if err != nil { + return "", "", 0, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err) + } + session, err := client.NewSession() + if err != nil { + return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", host, err) + } + defer session.Close() + + // Run the command. + code := 0 + var bout, berr bytes.Buffer + session.Stdout, session.Stderr = &bout, &berr + if err = session.Run(cmd); err != nil { + // Check whether the command failed to run or didn't complete. + if exiterr, ok := err.(*ssh.ExitError); ok { + // If we got an ExitError and the exit code is nonzero, we'll + // consider the SSH itself successful (just that the command run + // errored on the host). + if code = exiterr.ExitStatus(); code != 0 { + err = nil + } + } else { + // Some other kind of error happened (e.g. an IOError); consider the + // SSH unsuccessful. + err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, host, err) + } + } + return bout.String(), berr.String(), code, err +} + +func MakePrivateKeySigner(key string) (ssh.Signer, error) { + // Create an actual signer. + file, err := os.Open(key) + if err != nil { + return nil, fmt.Errorf("error opening SSH key %s: '%v'", key, err) + } + defer file.Close() + buffer, err := ioutil.ReadAll(file) + if err != nil { + return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err) + } + signer, err := ssh.ParsePrivateKey(buffer) + if err != nil { + return nil, fmt.Errorf("error parsing SSH key %s: '%v'", key, err) + } + return signer, nil +} diff --git a/pkg/util/util.go b/pkg/util/util.go index f85a8d15f45..639ac8f3cf3 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -428,7 +428,7 @@ func chooseHostInterfaceNativeGo() (net.IP, error) { if ip == nil { return nil, fmt.Errorf("no acceptable interface from host") } - glog.V(4).Infof("Choosing interface %s for from-host portals", intfs[i].Name) + glog.V(4).Infof("Choosing interface %s (IP %v) as default", intfs[i].Name, ip) return ip, nil } diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index e2cca460b57..c2693a1dc88 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -92,7 +92,7 @@ func (s *SchedulerServer) Run(_ []string) error { return err } kubeconfig.QPS = 20.0 - kubeconfig.Burst = 100 + kubeconfig.Burst = 30 kubeClient, err := client.New(kubeconfig) if err != nil { diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index ec6e83806b0..7ff6cbb42d8 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -145,10 +145,6 @@ func defaultContainerResourceRequirements(limitRange *api.LimitRange) api.Resour value := v.Copy() requirements.Limits[k] = *value } - for k, v := range limit.Min { - value := v.Copy() - requirements.Requests[k] = *value - } } } return requirements diff --git a/plugin/pkg/admission/limitranger/admission_test.go b/plugin/pkg/admission/limitranger/admission_test.go index 21591718343..9765509b19b 100644 --- a/plugin/pkg/admission/limitranger/admission_test.go +++ b/plugin/pkg/admission/limitranger/admission_test.go @@ -84,7 +84,7 @@ func TestDefaultContainerResourceRequirements(t *testing.T) { limitRange := validLimitRange() expected := api.ResourceRequirements{ Limits: getResourceList("50m", "5Mi"), - Requests: getResourceList("25m", "1Mi"), + Requests: api.ResourceList{}, } actual := defaultContainerResourceRequirements(&limitRange) @@ -118,10 +118,7 @@ func TestMergePodResourceRequirements(t *testing.T) { api.ResourceCPU: defaultRequirements.Limits[api.ResourceCPU], api.ResourceMemory: resource.MustParse("512Mi"), }, - Requests: api.ResourceList{ - api.ResourceCPU: defaultRequirements.Requests[api.ResourceCPU], - api.ResourceMemory: defaultRequirements.Requests[api.ResourceMemory], - }, + Requests: api.ResourceList{}, } mergePodResourceRequirements(&pod, &defaultRequirements) for i := range pod.Spec.Containers { diff --git a/plugin/pkg/admission/namespace/exists/admission.go b/plugin/pkg/admission/namespace/exists/admission.go index 04e97e374ce..12dfd48177f 100644 --- a/plugin/pkg/admission/namespace/exists/admission.go +++ b/plugin/pkg/admission/namespace/exists/admission.go @@ -19,6 +19,7 @@ package exists import ( "fmt" "io" + "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/admission" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" @@ -73,7 +74,14 @@ func (e *exists) Admit(a admission.Attributes) (err error) { if exists { return nil } - return admission.NewForbidden(a, fmt.Errorf("Namespace %s does not exist", a.GetNamespace())) + + // in case of latency in our caches, make a call direct to storage to verify that it truly exists or not + _, err = e.client.Namespaces().Get(a.GetNamespace()) + if err != nil { + return admission.NewForbidden(a, fmt.Errorf("Namespace %s does not exist", a.GetNamespace())) + } + + return nil } // NewExists creates a new namespace exists admission control handler @@ -90,7 +98,7 @@ func NewExists(c client.Interface) admission.Interface { }, &api.Namespace{}, store, - 0, + 5*time.Minute, ) reflector.Run() return &exists{ diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 6f360a09521..c1a5d3cf99c 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -189,10 +189,10 @@ func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, } func PodFitsHost(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { - if len(pod.Spec.Host) == 0 { + if len(pod.Spec.NodeName) == 0 { return true, nil } - return pod.Spec.Host == node, nil + return pod.Spec.NodeName == node, nil } type NodeLabelChecker struct { @@ -300,7 +300,7 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api } if len(nsServicePods) > 0 { // consider any service pod and fetch the minion its hosted on - otherMinion, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.Host) + otherMinion, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName) if err != nil { return false, err } @@ -369,7 +369,7 @@ func MapPodsToMachines(lister algorithm.PodLister) (map[string][]*api.Pod, error return map[string][]*api.Pod{}, err } for _, scheduledPod := range pods { - host := scheduledPod.Spec.Host + host := scheduledPod.Spec.NodeName machineToPods[host] = append(machineToPods[host], scheduledPod) } return machineToPods, nil diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index 653e7cdaade..6c3a63b2179 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -197,7 +197,7 @@ func TestPodFitsHost(t *testing.T) { { pod: &api.Pod{ Spec: api.PodSpec{ - Host: "foo", + NodeName: "foo", }, }, node: "foo", @@ -207,7 +207,7 @@ func TestPodFitsHost(t *testing.T) { { pod: &api.Pod{ Spec: api.PodSpec{ - Host: "bar", + NodeName: "bar", }, }, node: "foo", @@ -234,7 +234,7 @@ func newPod(host string, hostPorts ...int) *api.Pod { } return &api.Pod{ Spec: api.PodSpec{ - Host: host, + NodeName: host, Containers: []api.Container{ { Ports: networkPorts, @@ -632,7 +632,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, @@ -641,7 +641,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, @@ -650,7 +650,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: false, @@ -659,7 +659,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}}, fits: true, @@ -668,7 +668,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, fits: true, @@ -677,7 +677,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, fits: false, @@ -686,7 +686,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine1", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: false, @@ -695,7 +695,7 @@ func TestServiceAffinity(t *testing.T) { }, { pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{Host: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, node: "machine4", services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, fits: true, diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go index 3a387606b7b..ab43cecfedb 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go @@ -48,16 +48,16 @@ func TestLeastRequested(t *testing.T) { "baz": "blah", } machine1Spec := api.PodSpec{ - Host: "machine1", + NodeName: "machine1", } machine2Spec := api.PodSpec{ - Host: "machine2", + NodeName: "machine2", } noResources := api.PodSpec{ Containers: []api.Container{}, } cpuOnly := api.PodSpec{ - Host: "machine1", + NodeName: "machine1", Containers: []api.Container{ { Resources: api.ResourceRequirements{ @@ -76,9 +76,9 @@ func TestLeastRequested(t *testing.T) { }, } cpuOnly2 := cpuOnly - cpuOnly2.Host = "machine2" + cpuOnly2.NodeName = "machine2" cpuAndMemory := api.PodSpec{ - Host: "machine2", + NodeName: "machine2", Containers: []api.Container{ { Resources: api.ResourceRequirements{ @@ -378,16 +378,16 @@ func TestBalancedResourceAllocation(t *testing.T) { "baz": "blah", } machine1Spec := api.PodSpec{ - Host: "machine1", + NodeName: "machine1", } machine2Spec := api.PodSpec{ - Host: "machine2", + NodeName: "machine2", } noResources := api.PodSpec{ Containers: []api.Container{}, } cpuOnly := api.PodSpec{ - Host: "machine1", + NodeName: "machine1", Containers: []api.Container{ { Resources: api.ResourceRequirements{ @@ -406,9 +406,9 @@ func TestBalancedResourceAllocation(t *testing.T) { }, } cpuOnly2 := cpuOnly - cpuOnly2.Host = "machine2" + cpuOnly2.NodeName = "machine2" cpuAndMemory := api.PodSpec{ - Host: "machine2", + NodeName: "machine2", Containers: []api.Container{ { Resources: api.ResourceRequirements{ diff --git a/plugin/pkg/scheduler/algorithm/priorities/spreading.go b/plugin/pkg/scheduler/algorithm/priorities/spreading.go index 6f0cc2c5981..eaddad66d0d 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/spreading.go +++ b/plugin/pkg/scheduler/algorithm/priorities/spreading.go @@ -64,10 +64,10 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorith counts := map[string]int{} if len(nsServicePods) > 0 { for _, pod := range nsServicePods { - counts[pod.Spec.Host]++ + counts[pod.Spec.NodeName]++ // Compute the maximum number of pods hosted on any minion - if counts[pod.Spec.Host] > maxCount { - maxCount = counts[pod.Spec.Host] + if counts[pod.Spec.NodeName] > maxCount { + maxCount = counts[pod.Spec.NodeName] } } } @@ -141,7 +141,7 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis podCounts := map[string]int{} for _, pod := range nsServicePods { - label, exists := labeledMinions[pod.Spec.Host] + label, exists := labeledMinions[pod.Spec.NodeName] if !exists { continue } diff --git a/plugin/pkg/scheduler/algorithm/priorities/spreading_test.go b/plugin/pkg/scheduler/algorithm/priorities/spreading_test.go index 0eddda7e509..1e5a2660b17 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/spreading_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/spreading_test.go @@ -35,10 +35,10 @@ func TestServiceSpreadPriority(t *testing.T) { "baz": "blah", } zone1Spec := api.PodSpec{ - Host: "machine1", + NodeName: "machine1", } zone2Spec := api.PodSpec{ - Host: "machine2", + NodeName: "machine2", } tests := []struct { pod *api.Pod @@ -191,13 +191,13 @@ func TestZoneSpreadPriority(t *testing.T) { "name": "value", } zone0Spec := api.PodSpec{ - Host: "machine01", + NodeName: "machine01", } zone1Spec := api.PodSpec{ - Host: "machine11", + NodeName: "machine11", } zone2Spec := api.PodSpec{ - Host: "machine21", + NodeName: "machine21", } labeledNodes := map[string]map[string]string{ "machine01": nozone, "machine02": nozone, diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 87515b45cdb..b5faa39c4d5 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -38,6 +38,13 @@ import ( "github.com/golang/glog" ) +// Rate limitations for binding pods to hosts. +// TODO: expose these as cmd line flags. +const ( + BindPodsQps = 15 + BindPodsBurst = 20 +) + // ConfigFactory knows how to fill out a scheduler config with its support functions. type ConfigFactory struct { Client *client.Client @@ -54,6 +61,8 @@ type ConfigFactory struct { // Close this to stop all reflectors StopEverything chan struct{} + // Rate limiter for binding pods + BindPodsRateLimiter util.RateLimiter scheduledPodPopulator *framework.Controller modeler scheduler.SystemModeler @@ -73,6 +82,7 @@ func NewConfigFactory(client *client.Client) *ConfigFactory { modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister) c.modeler = modeler c.PodLister = modeler.PodLister() + c.BindPodsRateLimiter = util.NewTokenBucketRateLimiter(BindPodsQps, BindPodsBurst) // On add/delete to the scheduled pods, remove from the assumed pods. // We construct this here instead of in CreateFromKeys because @@ -204,8 +214,9 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe glog.V(2).Infof("About to try and schedule pod %v", pod.Name) return pod }, - Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue), - StopEverything: f.StopEverything, + Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue), + BindPodsRateLimiter: f.BindPodsRateLimiter, + StopEverything: f.StopEverything, }, nil } @@ -267,7 +278,7 @@ func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue } return } - if pod.Spec.Host == "" { + if pod.Spec.NodeName == "" { podQueue.Add(pod) } }() diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index 7d3830fcac2..57d99cbfa2f 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -73,6 +73,9 @@ type Config struct { Algorithm algorithm.ScheduleAlgorithm Binder Binder + // Rate at which we can create pods + BindPodsRateLimiter util.RateLimiter + // NextPod should be a function that blocks until the next pod // is available. We don't use a channel for this, because scheduling // a pod may take some amount of time and we don't want pods to get @@ -106,6 +109,10 @@ func (s *Scheduler) Run() { func (s *Scheduler) scheduleOne() { pod := s.config.NextPod() + if s.config.BindPodsRateLimiter != nil { + s.config.BindPodsRateLimiter.Accept() + } + glog.V(3).Infof("Attempting to schedule: %v", pod) start := time.Now() defer func() { @@ -142,7 +149,7 @@ func (s *Scheduler) scheduleOne() { s.config.Recorder.Eventf(pod, "scheduled", "Successfully assigned %v to %v", pod.Name, dest) // tell the model to assume that this binding took effect. assumed := *pod - assumed.Spec.Host = dest + assumed.Spec.NodeName = dest s.config.Modeler.AssumePod(&assumed) }) } diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index 39093a4c2e8..c891c33a175 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -42,7 +42,7 @@ func podWithID(id, desiredHost string) *api.Pod { return &api.Pod{ ObjectMeta: api.ObjectMeta{Name: id, SelfLink: testapi.SelfLink("pods", id)}, Spec: api.PodSpec{ - Host: desiredHost, + NodeName: desiredHost, }, } } @@ -295,3 +295,67 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) { <-called events.Stop() } + +// Fake rate limiter that records the 'accept' tokens from the real rate limiter +type FakeRateLimiter struct { + r util.RateLimiter + acceptValues []bool +} + +func (fr *FakeRateLimiter) CanAccept() bool { + return true +} + +func (fr *FakeRateLimiter) Stop() {} + +func (fr *FakeRateLimiter) Accept() { + fr.acceptValues = append(fr.acceptValues, fr.r.CanAccept()) +} + +func TestSchedulerRateLimitsBinding(t *testing.T) { + scheduledPodStore := cache.NewStore(cache.MetaNamespaceKeyFunc) + scheduledPodLister := &cache.StoreToPodLister{scheduledPodStore} + queuedPodStore := cache.NewFIFO(cache.MetaNamespaceKeyFunc) + queuedPodLister := &cache.StoreToPodLister{queuedPodStore} + modeler := NewSimpleModeler(queuedPodLister, scheduledPodLister) + + algo := NewGenericScheduler( + map[string]algorithm.FitPredicate{}, + []algorithm.PriorityConfig{}, + modeler.PodLister(), + rand.New(rand.NewSource(time.Now().UnixNano()))) + + // Rate limit to 1 pod + fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}} + c := &Config{ + Modeler: modeler, + MinionLister: algorithm.FakeMinionLister( + api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, + ), + Algorithm: algo, + Binder: fakeBinder{func(b *api.Binding) error { + return nil + }}, + NextPod: func() *api.Pod { + return queuedPodStore.Pop().(*api.Pod) + }, + Error: func(p *api.Pod, err error) { + t.Errorf("Unexpected error when scheduling pod %+v: %v", p, err) + }, + Recorder: &record.FakeRecorder{}, + BindPodsRateLimiter: &fr, + } + + s := New(c) + firstPod := podWithID("foo", "") + secondPod := podWithID("boo", "") + queuedPodStore.Add(firstPod) + queuedPodStore.Add(secondPod) + + for i, hitRateLimit := range []bool{true, false} { + s.scheduleOne() + if fr.acceptValues[i] != hitRateLimit { + t.Errorf("Unexpected rate limiting, expect rate limit to be: %v but found it was %v", hitRateLimit, fr.acceptValues[i]) + } + } +} diff --git a/test/e2e/dns.go b/test/e2e/dns.go index 2f133e9337c..bac3fe070c7 100644 --- a/test/e2e/dns.go +++ b/test/e2e/dns.go @@ -73,7 +73,9 @@ var _ = Describe("DNS", func() { probeCmd := "for i in `seq 1 600`; do " for _, name := range namesToResolve { - // Resolve by TCP and UDP DNS. + // Resolve by TCP and UDP DNS. Use $$(...) because $(...) is + // expanded by kubernetes (though this won't expand so should + // remain a literal, safe > sorry). probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s)" && echo OK > /results/udp@%s;`, name, name) probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s)" && echo OK > /results/tcp@%s;`, name, name) } @@ -207,7 +209,7 @@ var _ = Describe("DNS", func() { Name: testServiceName, }, Spec: api.ServiceSpec{ - PortalIP: "None", + ClusterIP: "None", Ports: []api.ServicePort{ {Port: 80}, }, @@ -234,12 +236,13 @@ var _ = Describe("DNS", func() { probeCmd := "for i in `seq 1 600`; do " for _, name := range namesToResolve { - // Resolve by TCP and UDP DNS. - probeCmd += fmt.Sprintf(`test -n "$(dig +notcp +noall +answer +search %s)" && echo OK > /results/udp@%s;`, name, name) - probeCmd += fmt.Sprintf(`test -n "$(dig +tcp +noall +answer +search %s)" && echo OK > /results/tcp@%s;`, name, name) + // Resolve by TCP and UDP DNS. Use $$(...) because $(...) is + // expanded by kubernetes (though this won't expand so should + // remain a literal, safe > sorry). + probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s)" && echo OK > /results/udp@%s;`, name, name) + probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s)" && echo OK > /results/tcp@%s;`, name, name) } probeCmd += "sleep 1; done" - Logf("vishh: 1") // Run a pod which probes DNS and exposes the results by HTTP. By("creating a pod to probe DNS") pod := &api.Pod{ diff --git a/test/e2e/es_cluster_logging.go b/test/e2e/es_cluster_logging.go index c505dce648c..294f54b085b 100644 --- a/test/e2e/es_cluster_logging.go +++ b/test/e2e/es_cluster_logging.go @@ -203,7 +203,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) { Command: []string{"bash", "-c", fmt.Sprintf("i=0; while ((i < %d)); do echo \"%d %s $i %s\"; i=$$(($i+1)); done", countTo, i, taintName, podName)}, }, }, - Host: node.Name, + NodeName: node.Name, RestartPolicy: api.RestartPolicyNever, }, }) diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 203a48ac659..c1dd414efe2 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -63,6 +63,10 @@ func (f *Framework) beforeEach() { Expect(err).NotTo(HaveOccurred()) f.Namespace = namespace + + By("Waiting for a default service account to be provisioned in namespace") + err = waitForDefaultServiceAccountInNamespace(c, namespace.Name) + Expect(err).NotTo(HaveOccurred()) } // afterEach deletes the namespace, after reading its events. diff --git a/test/e2e/networking.go b/test/e2e/networking.go index b507eff7a24..e762a820e76 100644 --- a/test/e2e/networking.go +++ b/test/e2e/networking.go @@ -31,7 +31,180 @@ import ( . "github.com/onsi/gomega" ) -func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name string) []string { +var _ = Describe("NetworkingNew", func() { + f := NewFramework("nettestnew") + + var svcname = "nettest" + + BeforeEach(func() { + //Assert basic external connectivity. + //Since this is not really a test of kubernetes in any way, we + //leave it as a pre-test assertion, rather than a Ginko test. + By("Executing a successful http request from the external internet") + resp, err := http.Get("http://google.com") + if err != nil { + Failf("Unable to connect/talk to the internet: %v", err) + } + if resp.StatusCode != http.StatusOK { + Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp) + } + }) + + // First test because it has no dependencies on variables created later on. + It("should provide unchanging, static URL paths for kubernetes api services.", func() { + tests := []struct { + path string + }{ + {path: "/validate"}, + {path: "/healthz"}, + // TODO: test proxy links here + } + for _, test := range tests { + By(fmt.Sprintf("testing: %s", test.path)) + data, err := f.Client.RESTClient.Get(). + Namespace(f.Namespace.Name). + AbsPath(test.path). + DoRaw() + if err != nil { + Failf("Failed: %v\nBody: %s", err, string(data)) + } + } + }) + + //Now we can proceed with the test. + It("should function for intra-pod communication", func() { + if testContext.Provider == "vagrant" { + By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)") + return + } + + By(fmt.Sprintf("Creating a service named %q in namespace %q", svcname, f.Namespace.Name)) + svc, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{ + ObjectMeta: api.ObjectMeta{ + Name: svcname, + Labels: map[string]string{ + "name": svcname, + }, + }, + Spec: api.ServiceSpec{ + Ports: []api.ServicePort{{ + Protocol: "TCP", + Port: 8080, + TargetPort: util.NewIntOrStringFromInt(8080), + }}, + Selector: map[string]string{ + "name": svcname, + }, + }, + }) + if err != nil { + Failf("unable to create test service named [%s] %v", svc.Name, err) + } + + // Clean up service + defer func() { + defer GinkgoRecover() + By("Cleaning up the service") + if err = f.Client.Services(f.Namespace.Name).Delete(svc.Name); err != nil { + Failf("unable to delete svc %v: %v", svc.Name, err) + } + }() + + By("Creating a webserver (pending) pod on each node") + + nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything()) + if err != nil { + Failf("Failed to list nodes: %v", err) + } + + podNames := LaunchNetTestPodPerNode(f, nodes, svcname, "1.4") + + // Clean up the pods + defer func() { + defer GinkgoRecover() + By("Cleaning up the webserver pods") + for _, podName := range podNames { + if err = f.Client.Pods(f.Namespace.Name).Delete(podName, nil); err != nil { + Logf("Failed to delete pod %s: %v", podName, err) + } + } + }() + + By("Waiting for the webserver pods to transition to Running state") + for _, podName := range podNames { + err = f.WaitForPodRunning(podName) + Expect(err).NotTo(HaveOccurred()) + } + + By("Waiting for connectivity to be verified") + passed := false + + //once response OK, evaluate response body for pass/fail. + var body []byte + getDetails := func() ([]byte, error) { + return f.Client.Get(). + Namespace(f.Namespace.Name). + Prefix("proxy"). + Resource("services"). + Name(svc.Name). + Suffix("read"). + DoRaw() + } + + getStatus := func() ([]byte, error) { + return f.Client.Get(). + Namespace(f.Namespace.Name). + Prefix("proxy"). + Resource("services"). + Name(svc.Name). + Suffix("status"). + DoRaw() + } + + timeout := time.Now().Add(2 * time.Minute) + for i := 0; !passed && timeout.After(time.Now()); i++ { + time.Sleep(2 * time.Second) + Logf("About to make a proxy status call") + start := time.Now() + body, err = getStatus() + Logf("Proxy status call returned in %v", time.Since(start)) + if err != nil { + Logf("Attempt %v: service/pod still starting. (error: '%v')", i, err) + continue + } + // Finally, we pass/fail the test based on if the container's response body, as to wether or not it was able to find peers. + switch { + case string(body) == "pass": + Logf("Passed on attempt %v. Cleaning up.", i) + passed = true + case string(body) == "running": + Logf("Attempt %v: test still running", i) + case string(body) == "fail": + if body, err = getDetails(); err != nil { + Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err) + } else { + Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body)) + } + case strings.Contains(string(body), "no endpoints available"): + Logf("Attempt %v: waiting on service/endpoints", i) + default: + Logf("Unexpected response:\n%s", body) + } + } + + if !passed { + if body, err = getDetails(); err != nil { + Failf("Timed out. Cleaning up. Error reading details: %v", err) + } else { + Failf("Timed out. Cleaning up. Details:\n%s", string(body)) + } + } + Expect(string(body)).To(Equal("pass")) + }) + +}) + +func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name, version string) []string { podNames := []string{} totalPods := len(nodes.Items) @@ -50,7 +223,7 @@ func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name string) []s Containers: []api.Container{ { Name: "webserver", - Image: "gcr.io/google_containers/nettest:1.3", + Image: "gcr.io/google_containers/nettest:" + version, Args: []string{ "-service=" + name, //peers >= totalPods should be asserted by the container. @@ -60,7 +233,7 @@ func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name string) []s Ports: []api.ContainerPort{{ContainerPort: 8080}}, }, }, - Host: node.Name, + NodeName: node.Name, RestartPolicy: api.RestartPolicyNever, }, }) @@ -112,9 +285,7 @@ var _ = Describe("Networking", func() { }) //Now we can proceed with the test. - It("should function for intra-pod communication", func(done Done) { - defer close(done) - + It("should function for intra-pod communication", func() { if testContext.Provider == "vagrant" { By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)") return @@ -159,7 +330,7 @@ var _ = Describe("Networking", func() { Failf("Failed to list nodes: %v", err) } - podNames := LaunchNetTestPodPerNode(f, nodes, svcname) + podNames := LaunchNetTestPodPerNode(f, nodes, svcname, "1.3") // Clean up the pods defer func() { @@ -203,7 +374,8 @@ var _ = Describe("Networking", func() { DoRaw() } - for i := 0; !passed; i++ { // Timeout will keep us from going forever. + timeout := time.Now().Add(2 * time.Minute) + for i := 0; !passed && timeout.After(time.Now()); i++ { time.Sleep(2 * time.Second) Logf("About to make a proxy status call") start := time.Now() @@ -241,6 +413,6 @@ var _ = Describe("Networking", func() { } } Expect(string(body)).To(Equal("pass")) - }, 120) + }) }) diff --git a/test/e2e/pd.go b/test/e2e/pd.go index 58441696a2e..d396d44665d 100644 --- a/test/e2e/pd.go +++ b/test/e2e/pd.go @@ -263,7 +263,7 @@ func testPDPod(diskName, targetHost string, readOnly bool) *api.Pod { }, }, }, - Host: targetHost, + NodeName: targetHost, }, } diff --git a/test/e2e/pods.go b/test/e2e/pods.go index f0414f35b97..62a7151a711 100644 --- a/test/e2e/pods.go +++ b/test/e2e/pods.go @@ -34,11 +34,22 @@ import ( . "github.com/onsi/gomega" ) +// createNamespaceIfDoesNotExist ensures that the namespace with specified name exists, or returns an error +func createNamespaceIfDoesNotExist(c *client.Client, name string) (*api.Namespace, error) { + namespace, err := c.Namespaces().Get(name) + if err != nil { + namespace, err = c.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: name}}) + } + return namespace, err +} + func runLivenessTest(c *client.Client, podDescr *api.Pod, expectRestart bool) { ns := "e2e-test-" + string(util.NewUUID()) + _, err := createNamespaceIfDoesNotExist(c, ns) + expectNoError(err, fmt.Sprintf("creating namespace %s", ns)) By(fmt.Sprintf("Creating pod %s in namespace %s", podDescr.Name, ns)) - _, err := c.Pods(ns).Create(podDescr) + _, err = c.Pods(ns).Create(podDescr) expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) // At the end of the test, clean up by removing the pod. @@ -85,10 +96,13 @@ func runLivenessTest(c *client.Client, podDescr *api.Pod, expectRestart bool) { // testHostIP tests that a pod gets a host IP func testHostIP(c *client.Client, pod *api.Pod) { ns := "e2e-test-" + string(util.NewUUID()) + _, err := createNamespaceIfDoesNotExist(c, ns) + expectNoError(err, fmt.Sprintf("creating namespace %s", ns)) + podClient := c.Pods(ns) By("creating pod") defer podClient.Delete(pod.Name, nil) - _, err := podClient.Create(pod) + _, err = podClient.Create(pod) if err != nil { Fail(fmt.Sprintf("Failed to create pod: %v", err)) } diff --git a/test/e2e/pre_stop.go b/test/e2e/pre_stop.go new file mode 100644 index 00000000000..43af2355f00 --- /dev/null +++ b/test/e2e/pre_stop.go @@ -0,0 +1,150 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait" + + . "github.com/onsi/ginkgo" +) + +// partially cloned from webserver.go +type State struct { + Received map[string]int +} + +func testPreStop(c *client.Client, ns string) { + // This is the server that will receive the preStop notification + podDescr := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "server", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "server", + Image: "gcr.io/google_containers/nettest:1.3", + Ports: []api.ContainerPort{{ContainerPort: 8080}}, + }, + }, + }, + } + By(fmt.Sprintf("Creating server pod %s in namespace %s", podDescr.Name, ns)) + _, err := c.Pods(ns).Create(podDescr) + expectNoError(err, fmt.Sprintf("creating pod %s", podDescr.Name)) + + // At the end of the test, clean up by removing the pod. + defer func() { + By("Deleting the server pod") + c.Pods(ns).Delete(podDescr.Name, nil) + }() + + By("Waiting for pods to come up.") + err = waitForPodRunningInNamespace(c, podDescr.Name, ns) + expectNoError(err, "waiting for server pod to start") + + val := "{\"Source\": \"prestop\"}" + + podOut, err := c.Pods(ns).Get(podDescr.Name) + expectNoError(err, "getting pod info") + + preStopDescr := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: "tester", + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "tester", + Image: "busybox", + Command: []string{"sleep", "600"}, + Lifecycle: &api.Lifecycle{ + PreStop: &api.Handler{ + Exec: &api.ExecAction{ + Command: []string{ + "wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP), + }, + }, + }, + }, + }, + }, + }, + } + + By(fmt.Sprintf("Creating tester pod %s in namespace %s", podDescr.Name, ns)) + _, err = c.Pods(ns).Create(preStopDescr) + expectNoError(err, fmt.Sprintf("creating pod %s", preStopDescr.Name)) + deletePreStop := true + + // At the end of the test, clean up by removing the pod. + defer func() { + if deletePreStop { + By("Deleting the tester pod") + c.Pods(ns).Delete(preStopDescr.Name, nil) + } + }() + + err = waitForPodRunningInNamespace(c, preStopDescr.Name, ns) + expectNoError(err, "waiting for tester pod to start") + + // Delete the pod with the preStop handler. + By("Deleting pre-stop pod") + if err := c.Pods(ns).Delete(preStopDescr.Name, nil); err == nil { + deletePreStop = false + } + expectNoError(err, fmt.Sprintf("deleting pod: %s", preStopDescr.Name)) + + // Validate that the server received the web poke. + err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { + if body, err := c.Get(). + Namespace(ns).Prefix("proxy"). + Resource("pods"). + Name(podDescr.Name). + Suffix("read"). + DoRaw(); err != nil { + By(fmt.Sprintf("Error validating prestop: %v", err)) + } else { + state := State{} + err := json.Unmarshal(body, &state) + if err != nil { + Logf("Error parsing: %v", err) + return false, nil + } + if state.Received["prestop"] != 0 { + return true, nil + } + Logf("Saw: %s", string(body)) + } + return false, nil + }) + expectNoError(err, "validating pre-stop.") +} + +var _ = Describe("PreStop", func() { + f := NewFramework("prestop") + + It("should call prestop when killing a pod", func() { + testPreStop(f.Client, f.Namespace.Name) + }) +}) diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go index da40a003dfc..44fd607066a 100644 --- a/test/e2e/reboot.go +++ b/test/e2e/reboot.go @@ -63,53 +63,102 @@ var _ = Describe("Reboot", func() { Expect(err).NotTo(HaveOccurred()) }) - It("should reboot each node and ensure they function upon restart", func() { - // This test requires SSH, so the provider check should be identical to - // there (the limiting factor is the implementation of util.go's - // getSigner(...)). - provider := testContext.Provider - if !providerIs("gce", "gke") { - By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider)) - return - } + It("each node by ordering clean reboot and ensure they function upon restart", func() { + // clean shutdown and restart + testReboot(c, "sudo reboot") + }) - // Get all nodes, and kick off the test on each. - nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything()) - if err != nil { - Failf("Error getting nodes: %v", err) - } - result := make(chan bool, len(nodelist.Items)) - for _, n := range nodelist.Items { - go rebootNode(c, provider, n.ObjectMeta.Name, result) - } + It("each node by ordering unclean reboot and ensure they function upon restart", func() { + // unclean shutdown and restart + testReboot(c, "echo b | sudo tee /proc/sysrq-trigger") + }) - // Wait for all to finish and check the final result. - failed := false - // TODO(mbforbes): Change to `for range` syntax and remove logging once - // we support only Go >= 1.4. - for _, n := range nodelist.Items { - if !<-result { - Failf("Node %s failed reboot test.", n.ObjectMeta.Name) - failed = true - } - } - if failed { - Failf("Test failed; at least one node failed to reboot in the time given.") - } + It("each node by triggering kernel panic and ensure they function upon restart", func() { + // kernel panic + testReboot(c, "echo c | sudo tee /proc/sysrq-trigger") + }) + + It("each node by switching off the network interface and ensure they function upon switch on", func() { + // switch the network interface off for a while to simulate a network outage + testReboot(c, "sudo ifdown eth0 && sleep 120 && sudo ifup eth0") + }) + + It("each node by dropping all inbound packages for a while and ensure they function afterwards", func() { + // tell the firewall to drop all inbound packets for a while + testReboot(c, "sudo iptables -A INPUT -j DROP && sleep 120 && sudo iptables -D INPUT -j DROP") + }) + + It("each node by dropping all outbound packages for a while and ensure they function afterwards", func() { + // tell the firewall to drop all outbound packets for a while + testReboot(c, "sudo iptables -A OUTPUT -j DROP && sleep 120 && sudo iptables -D OUTPUT -j DROP") }) }) +func testReboot(c *client.Client, rebootCmd string) { + // This test requires SSH, so the provider check should be identical to + // there (the limiting factor is the implementation of util.go's + // getSigner(...)). + provider := testContext.Provider + if !providerIs("gce", "gke") { + By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider)) + return + } + + // Get all nodes, and kick off the test on each. + nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything()) + if err != nil { + Failf("Error getting nodes: %v", err) + } + result := make(chan bool, len(nodelist.Items)) + for _, n := range nodelist.Items { + go rebootNode(c, provider, n.ObjectMeta.Name, rebootCmd, result) + } + + // Wait for all to finish and check the final result. + failed := false + // TODO(mbforbes): Change to `for range` syntax and remove logging once + // we support only Go >= 1.4. + for _, n := range nodelist.Items { + if !<-result { + Failf("Node %s failed reboot test.", n.ObjectMeta.Name) + failed = true + } + } + if failed { + Failf("Test failed; at least one node failed to reboot in the time given.") + } +} + +func issueSSHCommand(node *api.Node, provider, cmd string) error { + Logf("Getting external IP address for %s", node.Name) + host := "" + for _, a := range node.Status.Addresses { + if a.Type == api.NodeExternalIP { + host = a.Address + ":22" + break + } + } + if host == "" { + return fmt.Errorf("couldn't find external IP address for node %s", node.Name) + } + Logf("Calling %s on %s", cmd, node.Name) + if _, _, code, err := SSH(cmd, host, provider); code != 0 || err != nil { + return fmt.Errorf("when running %s on %s, got %d and %v", cmd, node.Name, code, err) + } + return nil +} + // rebootNode takes node name on provider through the following steps using c: // - ensures the node is ready // - ensures all pods on the node are running and ready -// - reboots the node +// - reboots the node (by executing rebootCmd over ssh) // - ensures the node reaches some non-ready state // - ensures the node becomes ready again // - ensures all pods on the node become running and ready again // // It returns true through result only if all of the steps pass; at the first // failed step, it will return false through result and not run the rest. -func rebootNode(c *client.Client, provider, name string, result chan bool) { +func rebootNode(c *client.Client, provider, name, rebootCmd string, result chan bool) { // Get the node initially. Logf("Getting %s", name) node, err := c.Nodes().Get(name) @@ -147,26 +196,9 @@ func rebootNode(c *client.Client, provider, name string, result chan bool) { } // Reboot the node. - Logf("Getting external IP address for %s", name) - host := "" - for _, a := range node.Status.Addresses { - if a.Type == api.NodeExternalIP { - host = a.Address + ":22" - break - } - } - if host == "" { - Logf("Couldn't find external IP address for node %s", name) - result <- false - return - } - Logf("Calling reboot on %s", name) - rebootCmd := "sudo reboot" - if _, _, code, err := SSH(rebootCmd, host, provider); code != 0 || err != nil { - Failf("Expected 0 exit code and nil error when running %s on %s, got %d and %v", - rebootCmd, node, code, err) - result <- false - return + if err = issueSSHCommand(node, provider, rebootCmd); err != nil { + // Just log the error as reboot may cause unclean termination of ssh session, which is expected. + Logf("Error while issuing ssh command: %v", err) } // Wait for some kind of "not ready" status. diff --git a/test/e2e/util.go b/test/e2e/util.go index b23707a7bd1..a5792fca606 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -19,7 +19,6 @@ package e2e import ( "bytes" "fmt" - "io/ioutil" "math" "math/rand" "os" @@ -30,8 +29,6 @@ import ( "strings" "time" - "code.google.com/p/go-uuid/uuid" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" @@ -42,6 +39,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + "code.google.com/p/go-uuid/uuid" + "github.com/davecgh/go-spew/spew" "golang.org/x/crypto/ssh" . "github.com/onsi/ginkgo" @@ -58,6 +57,13 @@ const ( // How often to poll pods. podPoll = 5 * time.Second + + // service accounts are provisioned after namespace creation + // a service account is required to support pod creation in a namespace as part of admission control + serviceAccountProvisionTimeout = 2 * time.Minute + + // How often to poll for service accounts + serviceAccountPoll = 5 * time.Second ) type CloudConfig struct { @@ -128,8 +134,8 @@ func logPodStates(pods []api.Pod) { if len(pod.ObjectMeta.Name) > maxPodW { maxPodW = len(pod.ObjectMeta.Name) } - if len(pod.Spec.Host) > maxNodeW { - maxNodeW = len(pod.Spec.Host) + if len(pod.Spec.NodeName) > maxNodeW { + maxNodeW = len(pod.Spec.NodeName) } if len(pod.Status.Phase) > maxPhaseW { maxPhaseW = len(pod.Status.Phase) @@ -145,7 +151,7 @@ func logPodStates(pods []api.Pod) { maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", "CONDITIONS") for _, pod := range pods { Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %[7]s", - maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.Host, maxPhaseW, pod.Status.Phase, pod.Status.Conditions) + maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, pod.Status.Conditions) } Logf("") // Final empty line helps for readability. } @@ -156,12 +162,12 @@ func podRunningReady(p *api.Pod) (bool, error) { // Check the phase is running. if p.Status.Phase != api.PodRunning { return false, fmt.Errorf("want pod '%s' on '%s' to be '%v' but was '%v'", - p.ObjectMeta.Name, p.Spec.Host, api.PodRunning, p.Status.Phase) + p.ObjectMeta.Name, p.Spec.NodeName, api.PodRunning, p.Status.Phase) } // Check the ready condition is true. if !podReady(p) { return false, fmt.Errorf("pod '%s' on '%s' didn't have condition {%v %v}; conditions: %v", - p.ObjectMeta.Name, p.Spec.Host, api.PodReady, api.ConditionTrue, p.Status.Conditions) + p.ObjectMeta.Name, p.Spec.NodeName, api.PodReady, api.ConditionTrue, p.Status.Conditions) } return true, nil @@ -205,6 +211,20 @@ func waitForPodsRunningReady(ns string, minPods int, timeout time.Duration) erro return fmt.Errorf("Not all pods in namespace '%s' running and ready within %v", ns, timeout) } +func waitForServiceAccountInNamespace(c *client.Client, ns, serviceAccountName string, poll, timeout time.Duration) error { + Logf("Waiting up to %v for service account %s to be provisioned in ns %s", timeout, serviceAccountName, ns) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { + _, err := c.ServiceAccounts(ns).Get(serviceAccountName) + if err != nil { + Logf("Get service account %s in ns %s failed, ignoring for %v: %v", serviceAccountName, ns, poll, err) + continue + } + Logf("Service account %s in ns %s found. (%v)", serviceAccountName, ns, time.Since(start)) + return nil + } + return fmt.Errorf("Service account %s in namespace %s not ready within %v", serviceAccountName, ns, timeout) +} + func waitForPodCondition(c *client.Client, ns, podName, desc string, poll, timeout time.Duration, condition podCondition) error { Logf("Waiting up to %v for pod %s status to be %s", timeout, podName, desc) for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { @@ -223,7 +243,15 @@ func waitForPodCondition(c *client.Client, ns, podName, desc string, poll, timeo return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout) } +// waitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned +// the default service account is what is associated with pods when they do not specify a service account +// as a result, pods are not able to be provisioned in a namespace until the service account is provisioned +func waitForDefaultServiceAccountInNamespace(c *client.Client, namespace string) error { + return waitForServiceAccountInNamespace(c, namespace, "default", serviceAccountPoll, serviceAccountProvisionTimeout) +} + // createNS should be used by every test, note that we append a common prefix to the provided test name. +// Please see NewFramework instead of using this directly. func createTestingNS(baseName string, c *client.Client) (*api.Namespace, error) { namespaceObj := &api.Namespace{ ObjectMeta: api.ObjectMeta{ @@ -238,7 +266,13 @@ func createTestingNS(baseName string, c *client.Client) (*api.Namespace, error) func waitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error { return waitForPodCondition(c, namespace, podName, "running", podPoll, podStartTimeout, func(pod *api.Pod) (bool, error) { - return (pod.Status.Phase == api.PodRunning), nil + if pod.Status.Phase == api.PodRunning { + return true, nil + } + if pod.Status.Phase == api.PodFailed { + return true, fmt.Errorf("Giving up; pod went into failed status: \n%s", spew.Sprintf("%#v", pod)) + } + return false, nil }) } @@ -265,15 +299,15 @@ func waitForPodSuccessInNamespace(c *client.Client, podName string, contName str if !ok { Logf("No Status.Info for container '%s' in pod '%s' yet", contName, podName) } else { - if ci.State.Termination != nil { - if ci.State.Termination.ExitCode == 0 { + if ci.State.Terminated != nil { + if ci.State.Terminated.ExitCode == 0 { By("Saw pod success") return true, nil } else { - return true, fmt.Errorf("pod '%s' terminated with failure: %+v", podName, ci.State.Termination) + return true, fmt.Errorf("pod '%s' terminated with failure: %+v", podName, ci.State.Terminated) } } else { - Logf("Nil State.Termination for container '%s' in pod '%s' in namespace '%s' so far", contName, podName, namespace) + Logf("Nil State.Terminated for container '%s' in pod '%s' in namespace '%s' so far", contName, podName, namespace) } } return false, nil @@ -532,8 +566,8 @@ func testContainerOutputInNamespace(scenarioName string, c *client.Client, pod * Failf("Failed to get pod status: %v", err) } - By(fmt.Sprintf("Trying to get logs from host %s pod %s container %s: %v", - podStatus.Spec.Host, podStatus.Name, containerName, err)) + By(fmt.Sprintf("Trying to get logs from node %s pod %s container %s: %v", + podStatus.Spec.NodeName, podStatus.Name, containerName, err)) var logs []byte start := time.Now() @@ -542,15 +576,15 @@ func testContainerOutputInNamespace(scenarioName string, c *client.Client, pod * logs, err = c.Get(). Prefix("proxy"). Resource("nodes"). - Name(podStatus.Spec.Host). + Name(podStatus.Spec.NodeName). Suffix("containerLogs", ns, podStatus.Name, containerName). Do(). Raw() fmt.Sprintf("pod logs:%v\n", string(logs)) By(fmt.Sprintf("pod logs:%v\n", string(logs))) if strings.Contains(string(logs), "Internal Error") { - By(fmt.Sprintf("Failed to get logs from host %q pod %q container %q: %v", - podStatus.Spec.Host, podStatus.Name, containerName, string(logs))) + By(fmt.Sprintf("Failed to get logs from node %q pod %q container %q: %v", + podStatus.Spec.NodeName, podStatus.Name, containerName, string(logs))) time.Sleep(5 * time.Second) continue } @@ -613,15 +647,15 @@ func Diff(oldPods *api.PodList, curPods *api.PodList) PodDiff { // New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist. for _, pod := range curPods.Items { - podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.Host, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist} + podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist} } // Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist. for _, pod := range oldPods.Items { if info, ok := podInfoMap[pod.Name]; ok { - info.oldHostname, info.oldPhase = pod.Spec.Host, string(pod.Status.Phase) + info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase) } else { - podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.Host, oldPhase: string(pod.Status.Phase)} + podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)} } } return podInfoMap @@ -733,7 +767,7 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error failedContainers = failedContainers + v.restarts } } else if p.Status.Phase == api.PodPending { - if p.Spec.Host == "" { + if p.Spec.NodeName == "" { waiting++ } else { pending++ @@ -869,10 +903,10 @@ func FailedContainers(pod api.Pod) map[string]ContainerFailures { return nil } else { for _, status := range statuses { - if status.State.Termination != nil { - states[status.ContainerID] = ContainerFailures{status: status.State.Termination} - } else if status.LastTerminationState.Termination != nil { - states[status.ContainerID] = ContainerFailures{status: status.LastTerminationState.Termination} + if status.State.Terminated != nil { + states[status.ContainerID] = ContainerFailures{status: status.State.Terminated} + } else if status.LastTerminationState.Terminated != nil { + states[status.ContainerID] = ContainerFailures{status: status.LastTerminationState.Terminated} } if status.RestartCount > 0 { var ok bool @@ -956,41 +990,7 @@ func SSH(cmd, host, provider string) (string, string, int, error) { return "", "", 0, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err) } - // Setup the config, dial the server, and open a session. - config := &ssh.ClientConfig{ - User: os.Getenv("USER"), - Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, - } - client, err := ssh.Dial("tcp", host, config) - if err != nil { - return "", "", 0, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err) - } - session, err := client.NewSession() - if err != nil { - return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", host, err) - } - defer session.Close() - - // Run the command. - code := 0 - var bout, berr bytes.Buffer - session.Stdout, session.Stderr = &bout, &berr - if err = session.Run(cmd); err != nil { - // Check whether the command failed to run or didn't complete. - if exiterr, ok := err.(*ssh.ExitError); ok { - // If we got an ExitError and the exit code is nonzero, we'll - // consider the SSH itself successful (just that the command run - // errored on the host). - if code = exiterr.ExitStatus(); code != 0 { - err = nil - } - } else { - // Some other kind of error happened (e.g. an IOError); consider the - // SSH unsuccessful. - err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, host, err) - } - } - return bout.String(), berr.String(), code, err + return util.RunSSHCommand(cmd, host, signer) } // getSigner returns an ssh.Signer for the provider ("gce", etc.) that can be @@ -1012,21 +1012,7 @@ func getSigner(provider string) (ssh.Signer, error) { key := filepath.Join(keydir, keyfile) Logf("Using SSH key: %s", key) - // Create an actual signer. - file, err := os.Open(key) - if err != nil { - return nil, fmt.Errorf("error opening SSH key %s: '%v'", key, err) - } - defer file.Close() - buffer, err := ioutil.ReadAll(file) - if err != nil { - return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err) - } - signer, err := ssh.ParsePrivateKey(buffer) - if err != nil { - return nil, fmt.Errorf("error parsing SSH key %s: '%v'", key, err) - } - return signer, nil + return util.MakePrivateKeySigner(key) } // LatencyMetrics stores data about request latency at a given quantile diff --git a/test/integration/client_test.go b/test/integration/client_test.go index 9f193c211b6..097be311df6 100644 --- a/test/integration/client_test.go +++ b/test/integration/client_test.go @@ -102,7 +102,7 @@ func TestClient(t *testing.T) { if actual.Name != got.Name { t.Errorf("expected pod %#v, got %#v", got, actual) } - if actual.Spec.Host != "" { + if actual.Spec.NodeName != "" { t.Errorf("expected pod to be unscheduled, got %#v", actual) } } diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 97aad0693ef..3ff952e3547 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -242,7 +242,7 @@ func StartPods(numPods int, host string, restClient *client.Client) error { // Make the rc unique to the given host. controller.Spec.Replicas = numPods - controller.Spec.Template.Spec.Host = host + controller.Spec.Template.Spec.NodeName = host controller.Name = controller.Name + host controller.Spec.Selector["host"] = host controller.Spec.Template.Labels["host"] = host diff --git a/test/integration/scheduler_test.go b/test/integration/scheduler_test.go index bcff273feb1..e1733929c7e 100644 --- a/test/integration/scheduler_test.go +++ b/test/integration/scheduler_test.go @@ -104,7 +104,7 @@ func podScheduled(c *client.Client, podNamespace, podName string) wait.Condition // This could be a connection error so we want to retry. return false, nil } - if pod.Spec.Host == "" { + if pod.Spec.NodeName == "" { return false, nil } return true, nil diff --git a/test/soak/serve_hostnames/serve_hostnames.go b/test/soak/serve_hostnames/serve_hostnames.go index 17bd5bc8859..629432f9d9b 100644 --- a/test/soak/serve_hostnames/serve_hostnames.go +++ b/test/soak/serve_hostnames/serve_hostnames.go @@ -185,7 +185,7 @@ func main() { Ports: []api.ContainerPort{{ContainerPort: 9376}}, }, }, - Host: node.Name, + NodeName: node.Name, }, }) glog.V(4).Infof("Pod create %s/%s request took %v", ns, podName, time.Since(t)) diff --git a/www/app/assets/js/app.js b/www/app/assets/js/app.js index d7630a67fe9..e92d09bb291 100644 --- a/www/app/assets/js/app.js +++ b/www/app/assets/js/app.js @@ -1602,7 +1602,7 @@ app.controller('ListPodsCtrl', [ app.controller('ListReplicationControllersCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$location', function($scope, $routeParams, k8sApi, $location) { 'use strict'; @@ -1658,27 +1658,26 @@ app.controller('ListReplicationControllersCtrl', [ var _name = '', _image = ''; - if (replicationController.desiredState.podTemplate.desiredState.manifest.containers) { - Object.keys(replicationController.desiredState.podTemplate.desiredState.manifest.containers) + if (replicationController.spec.template.spec.containers) { + Object.keys(replicationController.spec.template.spec.containers) .forEach(function(key) { - _name += replicationController.desiredState.podTemplate.desiredState.manifest.containers[key].name; - _image += replicationController.desiredState.podTemplate.desiredState.manifest.containers[key].image; + _name += replicationController.spec.template.spec.containers[key].name; + _image += replicationController.spec.template.spec.containers[key].image; }); } - var _name_selector = ''; + var _selectors = ''; - if (replicationController.desiredState.replicaSelector) { - Object.keys(replicationController.desiredState.replicaSelector) - .forEach(function(key) { _name_selector += replicationController.desiredState.replicaSelector[key]; }); + if (replicationController.spec.selector) { + _selectors = _.map(replicationController.spec.selector, function(v, k) { return k + '=' + v }).join(', '); } $scope.content.push({ - controller: replicationController.id, + controller: replicationController.metadata.name, containers: _name, images: _image, - selector: _name_selector, - replicas: replicationController.currentState.replicas + selector: _selectors, + replicas: replicationController.status.replicas }); }); @@ -1700,7 +1699,7 @@ app.controller('ListServicesCtrl', [ '$scope', '$interval', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$rootScope', '$location', function($scope, $interval, $routeParams, k8sApi, $rootScope, $location) { @@ -1712,7 +1711,7 @@ app.controller('ListServicesCtrl', [ {name: 'Labels', field: 'labels'}, {name: 'Selector', field: 'selector'}, {name: 'IP', field: 'ip'}, - {name: 'Port', field: 'port'} + {name: 'Ports', field: 'port'} ]; $scope.custom = { @@ -1760,41 +1759,36 @@ app.controller('ListServicesCtrl', [ if (data.items.constructor === Array) { data.items.forEach(function(service) { - var _name = '', _uses = '', _component = '', _provider = ''; + var _labels = ''; - if (service.labels !== null && typeof service.labels === 'object') { - Object.keys(service.labels) - .forEach(function(key) { - if (key == 'name') { - _name += ',' + service.labels[key]; - } - if (key == 'component') { - _component += ',' + service.labels[key]; - } - if (key == 'provider') { - _provider += ',' + service.labels[key]; - } - }); + if (service.metadata.labels) { + _labels = _.map(service.metadata.labels, function(v, k) { return k + '=' + v }).join(', '); } var _selectors = ''; - if (service.selector !== null && typeof service.selector === 'object') { - Object.keys(service.selector) - .forEach(function(key) { - if (key == 'name') { - _selectors += ',' + service.selector[key]; - } - }); + if (service.spec.selector) { + _selectors = _.map(service.spec.selector, function(v, k) { return k + '=' + v }).join(', '); + } + + var _ports = ''; + + if (service.spec.ports) { + _ports = _.map(service.spec.ports, function(p) { + var n = ''; + if(p.name) + n = p.name + ': '; + n = n + p.port; + return n; + }).join(', '); } $scope.content.push({ - name: service.id, - ip: service.portalIP, - port: service.port, - selector: addLabel(_fixComma(_selectors), 'name='), - labels: addLabel(_fixComma(_name), 'name=') + ' ' + addLabel(_fixComma(_component), 'component=') + ' ' + - addLabel(_fixComma(_provider), 'provider=') + name: service.metadata.name, + ip: service.spec.portalIP, + port: _ports, + selector: _selectors, + labels: _labels }); }); } @@ -1897,7 +1891,7 @@ ReplicationController.prototype.handleError = function(data, status, headers, co app.controller('ReplicationControllerCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', function($scope, $routeParams, k8sApi) { $scope.controller = new ReplicationController(); $scope.controller.k8sApi = k8sApi; @@ -1905,6 +1899,7 @@ app.controller('ReplicationControllerCtrl', [ $scope.controller.getData($routeParams.replicationControllerId); $scope.doTheBack = function() { window.history.back(); }; + $scope.getSelectorUrlFragment = function(sel){ return _.map(sel, function(v, k) { return k + '=' + v }).join(','); }; } ]); @@ -1933,7 +1928,7 @@ ServiceController.prototype.handleError = function(data, status, headers, config app.controller('ServiceCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$location', function($scope, $routeParams, k8sApi, $location) { $scope.controller = new ServiceController(); @@ -1942,6 +1937,8 @@ app.controller('ServiceCtrl', [ $scope.controller.getData($routeParams.serviceId); $scope.doTheBack = function() { window.history.back(); }; + $scope.go = function(d) { $location.path('/dashboard/services/' + d.metadata.name); } + $scope.getSelectorUrlFragment = function(sel){ return _.map(sel, function(v, k) { return k + '=' + v }).join(','); }; } ]); diff --git a/www/app/components/dashboard/views/replication.html b/www/app/components/dashboard/views/replication.html index 2704b92cf0f..4897bfca212 100644 --- a/www/app/components/dashboard/views/replication.html +++ b/www/app/components/dashboard/views/replication.html @@ -10,37 +10,46 @@
Replication Controller: - {{replicationController.id}} + {{replicationController.metadata.name}}
-
Created - {{service.creationTimestamp | date:'medium'}} + {{service.metadata.creationTimestamp | date:'medium'}}
PortPorts - {{service.port}} +
+ + {{port.name}}: + + {{port.port}}/{{port.protocol}} +
Container PortIP - {{service.containerPort}} + {{service.spec.portalIP}}
Portal IP
Public IPs - {{service.portalIP}} -
Protocol - {{service.protocol}} + {{service.spec.publicIPs | join:', '}}
Session Affinity - {{service.sessionAffinity}} + {{service.spec.sessionAffinity}}
Labels -
+
{{label}}: {{value}}
Selector + + {{label}}={{value}}{{$last ? '' : ', '}} + +
Related Pods -
Related Replication Controllers -
+
+ + + + + @@ -49,9 +58,13 @@ @@ -59,8 +72,13 @@ diff --git a/www/app/components/dashboard/views/service.html b/www/app/components/dashboard/views/service.html index 63dd7ad5379..4fb03046d59 100644 --- a/www/app/components/dashboard/views/service.html +++ b/www/app/components/dashboard/views/service.html @@ -10,68 +10,81 @@
Service: - {{service.id}} + {{service.metadata.name}}
-
Created - {{replicationController.creationTimestamp | date:'medium'}} + {{replicationController.metadata.creationTimestamp | date:'medium'}}
Desired Replicas - {{replicationController.desiredState.replicas}} + {{replicationController.spec.replicas}}
Current Replicas - {{replicationController.currentState.replicas}} + {{replicationController.status.replicas}} +
Selector + + {{label}}={{value}}{{$last ? '' : ', '}} +
Labels -
+
{{label}}: {{value}}
Related Pods - + + +
Related Services -
+
- + + - + - - + + - - - - - + + + + + + @@ -79,8 +92,12 @@ diff --git a/www/master/components/dashboard/js/modules/controllers/listReplicationControllersController.js b/www/master/components/dashboard/js/modules/controllers/listReplicationControllersController.js index 99eaaf84667..dff65305338 100644 --- a/www/master/components/dashboard/js/modules/controllers/listReplicationControllersController.js +++ b/www/master/components/dashboard/js/modules/controllers/listReplicationControllersController.js @@ -6,7 +6,7 @@ app.controller('ListReplicationControllersCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$location', function($scope, $routeParams, k8sApi, $location) { 'use strict'; @@ -62,27 +62,26 @@ app.controller('ListReplicationControllersCtrl', [ var _name = '', _image = ''; - if (replicationController.desiredState.podTemplate.desiredState.manifest.containers) { - Object.keys(replicationController.desiredState.podTemplate.desiredState.manifest.containers) + if (replicationController.spec.template.spec.containers) { + Object.keys(replicationController.spec.template.spec.containers) .forEach(function(key) { - _name += replicationController.desiredState.podTemplate.desiredState.manifest.containers[key].name; - _image += replicationController.desiredState.podTemplate.desiredState.manifest.containers[key].image; + _name += replicationController.spec.template.spec.containers[key].name; + _image += replicationController.spec.template.spec.containers[key].image; }); } - var _name_selector = ''; + var _selectors = ''; - if (replicationController.desiredState.replicaSelector) { - Object.keys(replicationController.desiredState.replicaSelector) - .forEach(function(key) { _name_selector += replicationController.desiredState.replicaSelector[key]; }); + if (replicationController.spec.selector) { + _selectors = _.map(replicationController.spec.selector, function(v, k) { return k + '=' + v }).join(', '); } $scope.content.push({ - controller: replicationController.id, + controller: replicationController.metadata.name, containers: _name, images: _image, - selector: _name_selector, - replicas: replicationController.currentState.replicas + selector: _selectors, + replicas: replicationController.status.replicas }); }); diff --git a/www/master/components/dashboard/js/modules/controllers/listServicesController.js b/www/master/components/dashboard/js/modules/controllers/listServicesController.js index 5472d784023..0fada5198ba 100644 --- a/www/master/components/dashboard/js/modules/controllers/listServicesController.js +++ b/www/master/components/dashboard/js/modules/controllers/listServicesController.js @@ -7,7 +7,7 @@ app.controller('ListServicesCtrl', [ '$scope', '$interval', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$rootScope', '$location', function($scope, $interval, $routeParams, k8sApi, $rootScope, $location) { @@ -19,7 +19,7 @@ app.controller('ListServicesCtrl', [ {name: 'Labels', field: 'labels'}, {name: 'Selector', field: 'selector'}, {name: 'IP', field: 'ip'}, - {name: 'Port', field: 'port'} + {name: 'Ports', field: 'port'} ]; $scope.custom = { @@ -67,41 +67,36 @@ app.controller('ListServicesCtrl', [ if (data.items.constructor === Array) { data.items.forEach(function(service) { - var _name = '', _uses = '', _component = '', _provider = ''; + var _labels = ''; - if (service.labels !== null && typeof service.labels === 'object') { - Object.keys(service.labels) - .forEach(function(key) { - if (key == 'name') { - _name += ',' + service.labels[key]; - } - if (key == 'component') { - _component += ',' + service.labels[key]; - } - if (key == 'provider') { - _provider += ',' + service.labels[key]; - } - }); + if (service.metadata.labels) { + _labels = _.map(service.metadata.labels, function(v, k) { return k + '=' + v }).join(', '); } var _selectors = ''; - if (service.selector !== null && typeof service.selector === 'object') { - Object.keys(service.selector) - .forEach(function(key) { - if (key == 'name') { - _selectors += ',' + service.selector[key]; - } - }); + if (service.spec.selector) { + _selectors = _.map(service.spec.selector, function(v, k) { return k + '=' + v }).join(', '); + } + + var _ports = ''; + + if (service.spec.ports) { + _ports = _.map(service.spec.ports, function(p) { + var n = ''; + if(p.name) + n = p.name + ': '; + n = n + p.port; + return n; + }).join(', '); } $scope.content.push({ - name: service.id, - ip: service.portalIP, - port: service.port, - selector: addLabel(_fixComma(_selectors), 'name='), - labels: addLabel(_fixComma(_name), 'name=') + ' ' + addLabel(_fixComma(_component), 'component=') + ' ' + - addLabel(_fixComma(_provider), 'provider=') + name: service.metadata.name, + ip: service.spec.portalIP, + port: _ports, + selector: _selectors, + labels: _labels }); }); } diff --git a/www/master/components/dashboard/js/modules/controllers/replicationController.js b/www/master/components/dashboard/js/modules/controllers/replicationController.js index 781f50c4c87..60366ec8cf1 100644 --- a/www/master/components/dashboard/js/modules/controllers/replicationController.js +++ b/www/master/components/dashboard/js/modules/controllers/replicationController.js @@ -22,7 +22,7 @@ ReplicationController.prototype.handleError = function(data, status, headers, co app.controller('ReplicationControllerCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', function($scope, $routeParams, k8sApi) { $scope.controller = new ReplicationController(); $scope.controller.k8sApi = k8sApi; @@ -30,6 +30,7 @@ app.controller('ReplicationControllerCtrl', [ $scope.controller.getData($routeParams.replicationControllerId); $scope.doTheBack = function() { window.history.back(); }; + $scope.getSelectorUrlFragment = function(sel){ return _.map(sel, function(v, k) { return k + '=' + v }).join(','); }; } ]); diff --git a/www/master/components/dashboard/js/modules/controllers/serviceController.js b/www/master/components/dashboard/js/modules/controllers/serviceController.js index c4d829780a7..d8cb6a01021 100644 --- a/www/master/components/dashboard/js/modules/controllers/serviceController.js +++ b/www/master/components/dashboard/js/modules/controllers/serviceController.js @@ -22,7 +22,7 @@ ServiceController.prototype.handleError = function(data, status, headers, config app.controller('ServiceCtrl', [ '$scope', '$routeParams', - 'k8sApi', + 'k8sv1Beta3Api', '$location', function($scope, $routeParams, k8sApi, $location) { $scope.controller = new ServiceController(); @@ -31,6 +31,8 @@ app.controller('ServiceCtrl', [ $scope.controller.getData($routeParams.serviceId); $scope.doTheBack = function() { window.history.back(); }; + $scope.go = function(d) { $location.path('/dashboard/services/' + d.metadata.name); } + $scope.getSelectorUrlFragment = function(sel){ return _.map(sel, function(v, k) { return k + '=' + v }).join(','); }; } ]); diff --git a/www/master/components/dashboard/views/replication.html b/www/master/components/dashboard/views/replication.html index 2704b92cf0f..4897bfca212 100644 --- a/www/master/components/dashboard/views/replication.html +++ b/www/master/components/dashboard/views/replication.html @@ -10,37 +10,46 @@
Replication Controller: - {{replicationController.id}} + {{replicationController.metadata.name}}
-
Created - {{service.creationTimestamp | date:'medium'}} + {{service.metadata.creationTimestamp | date:'medium'}}
PortPorts - {{service.port}} +
+ + {{port.name}}: + + {{port.port}}/{{port.protocol}} +
Container PortIP - {{service.containerPort}} + {{service.spec.portalIP}}
Portal IP
Public IPs - {{service.portalIP}} -
Protocol - {{service.protocol}} + {{service.spec.publicIPs | join:', '}}
Session Affinity - {{service.sessionAffinity}} + {{service.spec.sessionAffinity}}
Labels -
+
{{label}}: {{value}}
Selector + + {{label}}={{value}}{{$last ? '' : ', '}} + +
Related Pods -
Related Replication Controllers -
+
+ + + + + @@ -49,9 +58,13 @@ @@ -59,8 +72,13 @@ diff --git a/www/master/components/dashboard/views/service.html b/www/master/components/dashboard/views/service.html index 63dd7ad5379..4fb03046d59 100644 --- a/www/master/components/dashboard/views/service.html +++ b/www/master/components/dashboard/views/service.html @@ -10,68 +10,81 @@
Service: - {{service.id}} + {{service.metadata.name}}
-
Created - {{replicationController.creationTimestamp | date:'medium'}} + {{replicationController.metadata.creationTimestamp | date:'medium'}}
Desired Replicas - {{replicationController.desiredState.replicas}} + {{replicationController.spec.replicas}}
Current Replicas - {{replicationController.currentState.replicas}} + {{replicationController.status.replicas}} +
Selector + + {{label}}={{value}}{{$last ? '' : ', '}} +
Labels -
+
{{label}}: {{value}}
Related Pods - + + +
Related Services -
+
- + + - + - - + + - - - - - + + + + + + @@ -79,8 +92,12 @@
Created - {{service.creationTimestamp | date:'medium'}} + {{service.metadata.creationTimestamp | date:'medium'}}
PortPorts - {{service.port}} +
+ + {{port.name}}: + + {{port.port}}/{{port.protocol}} +
Container PortIP - {{service.containerPort}} + {{service.spec.portalIP}}
Portal IP
Public IPs - {{service.portalIP}} -
Protocol - {{service.protocol}} + {{service.spec.publicIPs | join:', '}}
Session Affinity - {{service.sessionAffinity}} + {{service.spec.sessionAffinity}}
Labels -
+
{{label}}: {{value}}
Selector + + {{label}}={{value}}{{$last ? '' : ', '}} + +
Related Pods -
Related Replication Controllers -