Merge branch 'master' of github.com:GoogleCloudPlatform/kubernetes into build-local

Conflicts:
	cluster/juju/bundles/local.yaml
This commit is contained in:
Matt Bruzek
2015-05-29 17:36:48 -05:00
288 changed files with 6530 additions and 3377 deletions

6
Godeps/Godeps.json generated
View File

@@ -157,8 +157,8 @@
}, },
{ {
"ImportPath": "github.com/emicklei/go-restful", "ImportPath": "github.com/emicklei/go-restful",
"Comment": "v1.1.3-45-gd487287", "Comment": "v1.1.3-54-gbdfb7d4",
"Rev": "d4872876992d385f0e69b007f154e5633bdb40af" "Rev": "bdfb7d41639a84ea7c36df648e5865cd9fbf21e2"
}, },
{ {
"ImportPath": "github.com/evanphx/json-patch", "ImportPath": "github.com/evanphx/json-patch",
@@ -475,7 +475,7 @@
}, },
{ {
"ImportPath": "golang.org/x/oauth2", "ImportPath": "golang.org/x/oauth2",
"Rev": "2e66694fea36dc820636630792a55cdc6987e05b" "Rev": "b5adcc2dcdf009d0391547edc6ecbaff889f5bb9"
}, },
{ {
"ImportPath": "google.golang.org/appengine", "ImportPath": "google.golang.org/appengine",

View File

@@ -0,0 +1,61 @@
package main
import (
"log"
"net/http"
"github.com/emicklei/go-restful"
"github.com/emicklei/go-restful/swagger"
)
type Book struct {
Title string
Author string
}
func main() {
ws := new(restful.WebService)
ws.Path("/books")
ws.Consumes(restful.MIME_JSON, restful.MIME_XML)
ws.Produces(restful.MIME_JSON, restful.MIME_XML)
restful.Add(ws)
ws.Route(ws.GET("/{medium}").To(noop).
Doc("Search all books").
Param(ws.PathParameter("medium", "digital or paperback").DataType("string")).
Param(ws.QueryParameter("language", "en,nl,de").DataType("string")).
Param(ws.HeaderParameter("If-Modified-Since", "last known timestamp").DataType("datetime")).
Do(returns200, returns500))
ws.Route(ws.PUT("/{medium}").To(noop).
Doc("Add a new book").
Param(ws.PathParameter("medium", "digital or paperback").DataType("string")).
Reads(Book{}))
// You can install the Swagger Service which provides a nice Web UI on your REST API
// You need to download the Swagger HTML5 assets and change the FilePath location in the config below.
// Open http://localhost:8080/apidocs and enter http://localhost:8080/apidocs.json in the api input field.
config := swagger.Config{
WebServices: restful.DefaultContainer.RegisteredWebServices(), // you control what services are visible
WebServicesUrl: "http://localhost:8080",
ApiPath: "/apidocs.json",
// Optionally, specifiy where the UI is located
SwaggerPath: "/apidocs/",
SwaggerFilePath: "/Users/emicklei/xProjects/swagger-ui/dist"}
swagger.RegisterSwaggerService(config, restful.DefaultContainer)
log.Printf("start listening on localhost:8080")
server := &http.Server{Addr: ":8080", Handler: restful.DefaultContainer}
log.Fatal(server.ListenAndServe())
}
func noop(req *restful.Request, resp *restful.Response) {}
func returns200(b *restful.RouteBuilder) {
b.Returns(http.StatusOK, "OK", Book{})
}
func returns500(b *restful.RouteBuilder) {
b.Returns(http.StatusInternalServerError, "Bummer, something went wrong", nil)
}

View File

@@ -35,6 +35,7 @@ type ParameterData struct {
Required bool Required bool
AllowableValues map[string]string AllowableValues map[string]string
AllowMultiple bool AllowMultiple bool
DefaultValue string
} }
// Data returns the state of the Parameter // Data returns the state of the Parameter
@@ -70,26 +71,32 @@ func (p *Parameter) beForm() *Parameter {
return p return p
} }
// Required sets the required field and return the receiver // Required sets the required field and returns the receiver
func (p *Parameter) Required(required bool) *Parameter { func (p *Parameter) Required(required bool) *Parameter {
p.data.Required = required p.data.Required = required
return p return p
} }
// AllowMultiple sets the allowMultiple field and return the receiver // AllowMultiple sets the allowMultiple field and returns the receiver
func (p *Parameter) AllowMultiple(multiple bool) *Parameter { func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
p.data.AllowMultiple = multiple p.data.AllowMultiple = multiple
return p return p
} }
// AllowableValues sets the allowableValues field and return the receiver // AllowableValues sets the allowableValues field and returns the receiver
func (p *Parameter) AllowableValues(values map[string]string) *Parameter { func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
p.data.AllowableValues = values p.data.AllowableValues = values
return p return p
} }
// DataType sets the dataType field and return the receiver // DataType sets the dataType field and returns the receiver
func (p *Parameter) DataType(typeName string) *Parameter { func (p *Parameter) DataType(typeName string) *Parameter {
p.data.DataType = typeName p.data.DataType = typeName
return p return p
} }
// DefaultValue sets the default value field and returnw the receiver
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
p.data.DefaultValue = stringRepresentation
return p
}

View File

@@ -4,6 +4,14 @@ import (
"testing" "testing"
) )
// accept should match produces
func TestMatchesAcceptPlainTextWhenProducePlainTextAsLast(t *testing.T) {
r := Route{Produces: []string{"application/json", "text/plain"}}
if !r.matchesAccept("text/plain") {
t.Errorf("accept should match text/plain")
}
}
// accept should match produces // accept should match produces
func TestMatchesAcceptStar(t *testing.T) { func TestMatchesAcceptStar(t *testing.T) {
r := Route{Produces: []string{"application/xml"}} r := Route{Produces: []string{"application/xml"}}

View File

@@ -1,5 +1,10 @@
Change history of swagger Change history of swagger
= =
2015-05-25
- (api break) changed the type of Properties in Model
- (api break) changed the type of Models in ApiDeclaration
- (api break) changed the parameter type of PostBuildDeclarationMapFunc
2015-04-09 2015-04-09
- add ModelBuildable interface for customization of Model - add ModelBuildable interface for customization of Model

View File

@@ -23,6 +23,6 @@ Now, you can install the Swagger WebService for serving the Swagger specificatio
Notes Notes
-- --
- Use RouteBuilder.Operation(..) to set the Nickname field of the API spec - The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints. - The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
- Use tag "description" to annotate a struct field with a description to show in the UI - Use tag "description" to annotate a struct field with a description to show in the UI

View File

@@ -0,0 +1,64 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
type ApiDeclarationList struct {
List []ApiDeclaration
}
// At returns the ApiDeclaration by its path unless absent, then ok is false
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
for _, each := range l.List {
if each.ResourcePath == path {
return each, true
}
}
return a, false
}
// Put adds or replaces a ApiDeclaration with this name
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
// maybe replace existing
for i, each := range l.List {
if each.ResourcePath == path {
// replace
l.List[i] = a
return
}
}
// add
l.List = append(l.List, a)
}
// Do enumerates all the properties, each with its assigned name
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
for _, each := range l.List {
block(each.ResourcePath, each)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.ResourcePath)
buf.WriteString("\": ")
encoder.Encode(each)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}

View File

@@ -7,7 +7,7 @@ import (
) )
// PostBuildDeclarationMapFunc can be used to modify the api declaration map. // PostBuildDeclarationMapFunc can be used to modify the api declaration map.
type PostBuildDeclarationMapFunc func(apiDeclarationMap map[string]ApiDeclaration) type PostBuildDeclarationMapFunc func(apiDeclarationMap *ApiDeclarationList)
type Config struct { type Config struct {
// url where the services are available, e.g. http://localhost:8080 // url where the services are available, e.g. http://localhost:8080

View File

@@ -13,7 +13,7 @@ type ModelBuildable interface {
} }
type modelBuilder struct { type modelBuilder struct {
Models map[string]Model Models *ModelList
} }
// addModelFrom creates and adds a Model to the builder and detects and calls // addModelFrom creates and adds a Model to the builder and detects and calls
@@ -23,7 +23,7 @@ func (b modelBuilder) addModelFrom(sample interface{}) {
// allow customizations // allow customizations
if buildable, ok := sample.(ModelBuildable); ok { if buildable, ok := sample.(ModelBuildable); ok {
modelOrNil = buildable.PostBuildModel(modelOrNil) modelOrNil = buildable.PostBuildModel(modelOrNil)
b.Models[modelOrNil.Id] = *modelOrNil b.Models.Put(modelOrNil.Id, *modelOrNil)
} }
} }
} }
@@ -38,16 +38,16 @@ func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
return nil return nil
} }
// see if we already have visited this model // see if we already have visited this model
if _, ok := b.Models[modelName]; ok { if _, ok := b.Models.At(modelName); ok {
return nil return nil
} }
sm := Model{ sm := Model{
Id: modelName, Id: modelName,
Required: []string{}, Required: []string{},
Properties: map[string]ModelProperty{}} Properties: ModelPropertyList{}}
// reference the model before further initializing (enables recursive structs) // reference the model before further initializing (enables recursive structs)
b.Models[modelName] = sm b.Models.Put(modelName, sm)
// check for slice or array // check for slice or array
if st.Kind() == reflect.Slice || st.Kind() == reflect.Array { if st.Kind() == reflect.Slice || st.Kind() == reflect.Array {
@@ -70,11 +70,11 @@ func (b modelBuilder) addModel(st reflect.Type, nameOverride string) *Model {
if b.isPropertyRequired(field) { if b.isPropertyRequired(field) {
sm.Required = append(sm.Required, jsonName) sm.Required = append(sm.Required, jsonName)
} }
sm.Properties[jsonName] = prop sm.Properties.Put(jsonName, prop)
} }
} }
// update model builder with completed model // update model builder with completed model
b.Models[modelName] = sm b.Models.Put(modelName, sm)
return &sm return &sm
} }
@@ -179,13 +179,13 @@ func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonNam
if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) { if field.Name == fieldType.Name() && field.Anonymous && !hasNamedJSONTag(field) {
// embedded struct // embedded struct
sub := modelBuilder{map[string]Model{}} sub := modelBuilder{new(ModelList)}
sub.addModel(fieldType, "") sub.addModel(fieldType, "")
subKey := sub.keyFrom(fieldType) subKey := sub.keyFrom(fieldType)
// merge properties from sub // merge properties from sub
subModel := sub.Models[subKey] subModel, _ := sub.Models.At(subKey)
for k, v := range subModel.Properties { subModel.Properties.Do(func(k string, v ModelProperty) {
model.Properties[k] = v model.Properties.Put(k, v)
// if subModel says this property is required then include it // if subModel says this property is required then include it
required := false required := false
for _, each := range subModel.Required { for _, each := range subModel.Required {
@@ -197,15 +197,15 @@ func (b modelBuilder) buildStructTypeProperty(field reflect.StructField, jsonNam
if required { if required {
model.Required = append(model.Required, k) model.Required = append(model.Required, k)
} }
} })
// add all new referenced models // add all new referenced models
for key, sub := range sub.Models { sub.Models.Do(func(key string, sub Model) {
if key != subKey { if key != subKey {
if _, ok := b.Models[key]; !ok { if _, ok := b.Models.At(key); !ok {
b.Models[key] = sub b.Models.Put(key, sub)
}
} }
} }
})
// empty name signals skip property // empty name signals skip property
return "", prop return "", prop
} }

View File

@@ -0,0 +1,86 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModel associates a name with a Model (not using its Id)
type NamedModel struct {
Name string
Model Model
}
// ModelList encapsulates a list of NamedModel (association)
type ModelList struct {
List []NamedModel
}
// Put adds or replaces a Model by its name
func (l *ModelList) Put(name string, model Model) {
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModel{name, model}
return
}
}
// add
l.List = append(l.List, NamedModel{name, model})
}
// At returns a Model by its name, ok is false if absent
func (l *ModelList) At(name string) (m Model, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Model, true
}
}
return m, false
}
// Do enumerates all the models, each with its assigned name
func (l *ModelList) Do(block func(name string, value Model)) {
for _, each := range l.List {
block(each.Name, each.Model)
}
}
// MarshalJSON writes the ModelList as if it was a map[string]Model
func (l ModelList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Model)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelList. This is an expensive operation.
func (l *ModelList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m Model
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View File

@@ -0,0 +1,48 @@
package swagger
import (
"encoding/json"
"testing"
)
func TestModelList(t *testing.T) {
m := Model{}
m.Id = "m"
l := ModelList{}
l.Put("m", m)
k, ok := l.At("m")
if !ok {
t.Error("want model back")
}
if got, want := k.Id, "m"; got != want {
t.Errorf("got %v want %v", got, want)
}
}
func TestModelList_Marshal(t *testing.T) {
l := ModelList{}
m := Model{Id: "myid"}
l.Put("myid", m)
data, err := json.Marshal(l)
if err != nil {
t.Error(err)
}
if got, want := string(data), `{"myid":{"id":"myid","properties":{}}}`; got != want {
t.Errorf("got %v want %v", got, want)
}
}
func TestModelList_Unmarshal(t *testing.T) {
data := `{"myid":{"id":"myid","properties":{}}}`
l := ModelList{}
if err := json.Unmarshal([]byte(data), &l); err != nil {
t.Error(err)
}
m, ok := l.At("myid")
if !ok {
t.Error("expected myid")
}
if got, want := m.Id, "myid"; got != want {
t.Errorf("got %v want %v", got, want)
}
}

View File

@@ -0,0 +1,87 @@
package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"encoding/json"
)
// NamedModelProperty associates a name to a ModelProperty
type NamedModelProperty struct {
Name string
Property ModelProperty
}
// ModelPropertyList encapsulates a list of NamedModelProperty (association)
type ModelPropertyList struct {
List []NamedModelProperty
}
// At returns the ModelPropety by its name unless absent, then ok is false
func (l *ModelPropertyList) At(name string) (p ModelProperty, ok bool) {
for _, each := range l.List {
if each.Name == name {
return each.Property, true
}
}
return p, false
}
// Put adds or replaces a ModelProperty with this name
func (l *ModelPropertyList) Put(name string, prop ModelProperty) {
// maybe replace existing
for i, each := range l.List {
if each.Name == name {
// replace
l.List[i] = NamedModelProperty{Name: name, Property: prop}
return
}
}
// add
l.List = append(l.List, NamedModelProperty{Name: name, Property: prop})
}
// Do enumerates all the properties, each with its assigned name
func (l *ModelPropertyList) Do(block func(name string, value ModelProperty)) {
for _, each := range l.List {
block(each.Name, each.Property)
}
}
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
func (l ModelPropertyList) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
buf.WriteString("{\n")
for i, each := range l.List {
buf.WriteString("\"")
buf.WriteString(each.Name)
buf.WriteString("\": ")
encoder.Encode(each.Property)
if i < len(l.List)-1 {
buf.WriteString(",\n")
}
}
buf.WriteString("}")
return buf.Bytes(), nil
}
// UnmarshalJSON reads back a ModelPropertyList. This is an expensive operation.
func (l *ModelPropertyList) UnmarshalJSON(data []byte) error {
raw := map[string]interface{}{}
json.NewDecoder(bytes.NewReader(data)).Decode(&raw)
for k, v := range raw {
// produces JSON bytes for each value
data, err := json.Marshal(v)
if err != nil {
return err
}
var m ModelProperty
json.NewDecoder(bytes.NewReader(data)).Decode(&m)
l.Put(k, m)
}
return nil
}

View File

@@ -0,0 +1,47 @@
package swagger
import (
"encoding/json"
"testing"
)
func TestModelPropertyList(t *testing.T) {
l := ModelPropertyList{}
p := ModelProperty{Description: "d"}
l.Put("p", p)
q, ok := l.At("p")
if !ok {
t.Error("expected p")
}
if got, want := q.Description, "d"; got != want {
t.Errorf("got %v want %v", got, want)
}
}
func TestModelPropertyList_Marshal(t *testing.T) {
l := ModelPropertyList{}
p := ModelProperty{Description: "d"}
l.Put("p", p)
data, err := json.Marshal(l)
if err != nil {
t.Error(err)
}
if got, want := string(data), `{"p":{"description":"d"}}`; got != want {
t.Errorf("got %v want %v", got, want)
}
}
func TestModelPropertyList_Unmarshal(t *testing.T) {
data := `{"p":{"description":"d"}}`
l := ModelPropertyList{}
if err := json.Unmarshal([]byte(data), &l); err != nil {
t.Error(err)
}
m, ok := l.At("p")
if !ok {
t.Error("expected p")
}
if got, want := m.Description, "d"; got != want {
t.Errorf("got %v want %v", got, want)
}
}

View File

@@ -1,5 +1,9 @@
package swagger package swagger
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import "github.com/emicklei/go-restful" import "github.com/emicklei/go-restful"
type orderedRouteMap struct { type orderedRouteMap struct {

View File

@@ -1,29 +0,0 @@
package swagger
// Copyright 2014 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
type ParameterSorter []Parameter
func (s ParameterSorter) Len() int {
return len(s)
}
func (s ParameterSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
var typeToSortKey = map[string]string{
"path": "A",
"query": "B",
"form": "C",
"header": "D",
"body": "E",
}
func (s ParameterSorter) Less(i, j int) bool {
// use ordering path,query,form,header,body
pi := s[i]
pj := s[j]
return typeToSortKey[pi.ParamType]+pi.Name < typeToSortKey[pj.ParamType]+pj.Name
}

View File

@@ -1,52 +0,0 @@
package swagger
import (
"bytes"
"sort"
"testing"
)
func TestSortParameters(t *testing.T) {
unsorted := []Parameter{
Parameter{
Name: "form2",
ParamType: "form",
},
Parameter{
Name: "header1",
ParamType: "header",
},
Parameter{
Name: "path2",
ParamType: "path",
},
Parameter{
Name: "body",
ParamType: "body",
},
Parameter{
Name: "path1",
ParamType: "path",
},
Parameter{
Name: "form1",
ParamType: "form",
},
Parameter{
Name: "query2",
ParamType: "query",
},
Parameter{
Name: "query1",
ParamType: "query",
},
}
sort.Sort(ParameterSorter(unsorted))
var b bytes.Buffer
for _, p := range unsorted {
b.WriteString(p.Name + ".")
}
if "path1.path2.query1.query2.form1.form2.header1.body." != b.String() {
t.Fatal("sorting has changed:" + b.String())
}
}

View File

@@ -14,12 +14,12 @@ func (b Boat) PostBuildModel(m *Model) *Model {
// add model property (just to test is can be added; is this a real usecase?) // add model property (just to test is can be added; is this a real usecase?)
extraType := "string" extraType := "string"
m.Properties["extra"] = ModelProperty{ m.Properties.Put("extra", ModelProperty{
Description: "extra description", Description: "extra description",
DataTypeFields: DataTypeFields{ DataTypeFields: DataTypeFields{
Type: &extraType, Type: &extraType,
}, },
} })
return m return m
} }

View File

@@ -1,19 +0,0 @@
package swagger
// Copyright 2014 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
type ResourceSorter []Resource
func (s ResourceSorter) Len() int {
return len(s)
}
func (s ResourceSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s ResourceSorter) Less(i, j int) bool {
return s[i].Path < s[j].Path
}

View File

@@ -119,7 +119,7 @@ type ApiDeclaration struct {
BasePath string `json:"basePath"` BasePath string `json:"basePath"`
ResourcePath string `json:"resourcePath"` // must start with / ResourcePath string `json:"resourcePath"` // must start with /
Apis []Api `json:"apis,omitempty"` Apis []Api `json:"apis,omitempty"`
Models map[string]Model `json:"models,omitempty"` Models ModelList `json:"models,omitempty"`
Produces []string `json:"produces,omitempty"` Produces []string `json:"produces,omitempty"`
Consumes []string `json:"consumes,omitempty"` Consumes []string `json:"consumes,omitempty"`
Authorizations []Authorization `json:"authorizations,omitempty"` Authorizations []Authorization `json:"authorizations,omitempty"`
@@ -169,7 +169,7 @@ type Model struct {
Id string `json:"id"` Id string `json:"id"`
Description string `json:"description,omitempty"` Description string `json:"description,omitempty"`
Required []string `json:"required,omitempty"` Required []string `json:"required,omitempty"`
Properties map[string]ModelProperty `json:"properties"` Properties ModelPropertyList `json:"properties"`
SubTypes []string `json:"subTypes,omitempty"` SubTypes []string `json:"subTypes,omitempty"`
Discriminator string `json:"discriminator,omitempty"` Discriminator string `json:"discriminator,omitempty"`
} }

View File

@@ -26,7 +26,7 @@ func TestServiceToApi(t *testing.T) {
WebServicesUrl: "http://here.com", WebServicesUrl: "http://here.com",
ApiPath: "/apipath", ApiPath: "/apipath",
WebServices: []*restful.WebService{ws}, WebServices: []*restful.WebService{ws},
PostBuildHandler: func(in map[string]ApiDeclaration) {}, PostBuildHandler: func(in *ApiDeclarationList) {},
} }
sws := newSwaggerService(cfg) sws := newSwaggerService(cfg)
decl := sws.composeDeclaration(ws, "/tests") decl := sws.composeDeclaration(ws, "/tests")
@@ -73,7 +73,7 @@ func TestComposeResponseMessages(t *testing.T) {
responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: TestItem{}} responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: TestItem{}}
route := restful.Route{ResponseErrors: responseErrors} route := restful.Route{ResponseErrors: responseErrors}
decl := new(ApiDeclaration) decl := new(ApiDeclaration)
decl.Models = map[string]Model{} decl.Models = ModelList{}
msgs := composeResponseMessages(route, decl) msgs := composeResponseMessages(route, decl)
if msgs[0].ResponseModel != "swagger.TestItem" { if msgs[0].ResponseModel != "swagger.TestItem" {
t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel) t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel)
@@ -86,7 +86,7 @@ func TestComposeResponseMessageArray(t *testing.T) {
responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: []TestItem{}} responseErrors[400] = restful.ResponseError{Code: 400, Message: "Bad Request", Model: []TestItem{}}
route := restful.Route{ResponseErrors: responseErrors} route := restful.Route{ResponseErrors: responseErrors}
decl := new(ApiDeclaration) decl := new(ApiDeclaration)
decl.Models = map[string]Model{} decl.Models = ModelList{}
msgs := composeResponseMessages(route, decl) msgs := composeResponseMessages(route, decl)
if msgs[0].ResponseModel != "array[swagger.TestItem]" { if msgs[0].ResponseModel != "array[swagger.TestItem]" {
t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel) t.Errorf("got %s want swagger.TestItem", msgs[0].ResponseModel)
@@ -95,23 +95,23 @@ func TestComposeResponseMessageArray(t *testing.T) {
func TestIssue78(t *testing.T) { func TestIssue78(t *testing.T) {
sws := newSwaggerService(Config{}) sws := newSwaggerService(Config{})
models := map[string]Model{} models := new(ModelList)
sws.addModelFromSampleTo(&Operation{}, true, Response{Items: &[]TestItem{}}, models) sws.addModelFromSampleTo(&Operation{}, true, Response{Items: &[]TestItem{}}, models)
model, ok := models["swagger.Response"] model, ok := models.At("swagger.Response")
if !ok { if !ok {
t.Fatal("missing response model") t.Fatal("missing response model")
} }
if "swagger.Response" != model.Id { if "swagger.Response" != model.Id {
t.Fatal("wrong model id:" + model.Id) t.Fatal("wrong model id:" + model.Id)
} }
code, ok := model.Properties["Code"] code, ok := model.Properties.At("Code")
if !ok { if !ok {
t.Fatal("missing code") t.Fatal("missing code")
} }
if "integer" != *code.Type { if "integer" != *code.Type {
t.Fatal("wrong code type:" + *code.Type) t.Fatal("wrong code type:" + *code.Type)
} }
items, ok := model.Properties["Items"] items, ok := model.Properties.At("Items")
if !ok { if !ok {
t.Fatal("missing items") t.Fatal("missing items")
} }

View File

@@ -15,13 +15,13 @@ import (
type SwaggerService struct { type SwaggerService struct {
config Config config Config
apiDeclarationMap map[string]ApiDeclaration apiDeclarationMap *ApiDeclarationList
} }
func newSwaggerService(config Config) *SwaggerService { func newSwaggerService(config Config) *SwaggerService {
return &SwaggerService{ return &SwaggerService{
config: config, config: config,
apiDeclarationMap: map[string]ApiDeclaration{}} apiDeclarationMap: new(ApiDeclarationList)}
} }
// LogInfo is the function that is called when this package needs to log. It defaults to log.Printf // LogInfo is the function that is called when this package needs to log. It defaults to log.Printf
@@ -66,13 +66,13 @@ func RegisterSwaggerService(config Config, wsContainer *restful.Container) {
// use routes // use routes
for _, route := range each.Routes() { for _, route := range each.Routes() {
entry := staticPathFromRoute(route) entry := staticPathFromRoute(route)
_, exists := sws.apiDeclarationMap[entry] _, exists := sws.apiDeclarationMap.At(entry)
if !exists { if !exists {
sws.apiDeclarationMap[entry] = sws.composeDeclaration(each, entry) sws.apiDeclarationMap.Put(entry, sws.composeDeclaration(each, entry))
} }
} }
} else { // use root path } else { // use root path
sws.apiDeclarationMap[each.RootPath()] = sws.composeDeclaration(each, each.RootPath()) sws.apiDeclarationMap.Put(each.RootPath(), sws.composeDeclaration(each, each.RootPath()))
} }
} }
} }
@@ -139,19 +139,22 @@ func enableCORS(req *restful.Request, resp *restful.Response, chain *restful.Fil
func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) { func (sws SwaggerService) getListing(req *restful.Request, resp *restful.Response) {
listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion} listing := ResourceListing{SwaggerVersion: swaggerVersion, ApiVersion: sws.config.ApiVersion}
for k, v := range sws.apiDeclarationMap { sws.apiDeclarationMap.Do(func(k string, v ApiDeclaration) {
ref := Resource{Path: k} ref := Resource{Path: k}
if len(v.Apis) > 0 { // use description of first (could still be empty) if len(v.Apis) > 0 { // use description of first (could still be empty)
ref.Description = v.Apis[0].Description ref.Description = v.Apis[0].Description
} }
listing.Apis = append(listing.Apis, ref) listing.Apis = append(listing.Apis, ref)
} })
sort.Sort(ResourceSorter(listing.Apis))
resp.WriteAsJson(listing) resp.WriteAsJson(listing)
} }
func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) { func (sws SwaggerService) getDeclarations(req *restful.Request, resp *restful.Response) {
decl := sws.apiDeclarationMap[composeRootPath(req)] decl, ok := sws.apiDeclarationMap.At(composeRootPath(req))
if !ok {
resp.WriteErrorString(http.StatusNotFound, "ApiDeclaration not found")
return
}
// unless WebServicesUrl is given // unless WebServicesUrl is given
if len(sws.config.WebServicesUrl) == 0 { if len(sws.config.WebServicesUrl) == 0 {
// update base path from the actual request // update base path from the actual request
@@ -180,7 +183,7 @@ func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix
SwaggerVersion: swaggerVersion, SwaggerVersion: swaggerVersion,
BasePath: sws.config.WebServicesUrl, BasePath: sws.config.WebServicesUrl,
ResourcePath: ws.RootPath(), ResourcePath: ws.RootPath(),
Models: map[string]Model{}, Models: ModelList{},
ApiVersion: ws.Version()} ApiVersion: ws.Version()}
// collect any path parameters // collect any path parameters
@@ -218,8 +221,6 @@ func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix
for _, param := range route.ParameterDocs { for _, param := range route.ParameterDocs {
operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data())) operation.Parameters = append(operation.Parameters, asSwaggerParameter(param.Data()))
} }
// sort parameters
sort.Sort(ParameterSorter(operation.Parameters))
sws.addModelsFromRouteTo(&operation, route, &decl) sws.addModelsFromRouteTo(&operation, route, &decl)
api.Operations = append(api.Operations, operation) api.Operations = append(api.Operations, operation)
@@ -253,7 +254,7 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration) (message
if isCollection { if isCollection {
modelName = "array[" + modelName + "]" modelName = "array[" + modelName + "]"
} }
modelBuilder{decl.Models}.addModel(st, "") modelBuilder{&decl.Models}.addModel(st, "")
// reference the model // reference the model
message.ResponseModel = modelName message.ResponseModel = modelName
} }
@@ -265,10 +266,10 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration) (message
// addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it. // addModelsFromRoute takes any read or write sample from the Route and creates a Swagger model from it.
func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) { func (sws SwaggerService) addModelsFromRouteTo(operation *Operation, route restful.Route, decl *ApiDeclaration) {
if route.ReadSample != nil { if route.ReadSample != nil {
sws.addModelFromSampleTo(operation, false, route.ReadSample, decl.Models) sws.addModelFromSampleTo(operation, false, route.ReadSample, &decl.Models)
} }
if route.WriteSample != nil { if route.WriteSample != nil {
sws.addModelFromSampleTo(operation, true, route.WriteSample, decl.Models) sws.addModelFromSampleTo(operation, true, route.WriteSample, &decl.Models)
} }
} }
@@ -289,7 +290,7 @@ func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
} }
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource // addModelFromSample creates and adds (or overwrites) a Model from a sample resource
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models map[string]Model) { func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
st := reflect.TypeOf(sample) st := reflect.TypeOf(sample)
isCollection, st := detectCollectionType(st) isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st) modelName := modelBuilder{}.keyFrom(st)
@@ -307,6 +308,7 @@ func asSwaggerParameter(param restful.ParameterData) Parameter {
DataTypeFields: DataTypeFields{ DataTypeFields: DataTypeFields{
Type: &param.DataType, Type: &param.DataType,
Format: asFormat(param.DataType), Format: asFormat(param.DataType),
DefaultValue: Special(param.DefaultValue),
}, },
Name: param.Name, Name: param.Name,
Description: param.Description, Description: param.Description,

View File

@@ -15,8 +15,8 @@ func testJsonFromStruct(t *testing.T, sample interface{}, expectedJson string) b
return compareJson(t, string(data), expectedJson) return compareJson(t, string(data), expectedJson)
} }
func modelsFromStruct(sample interface{}) map[string]Model { func modelsFromStruct(sample interface{}) *ModelList {
models := map[string]Model{} models := new(ModelList)
builder := modelBuilder{models} builder := modelBuilder{models}
builder.addModelFrom(sample) builder.addModelFrom(sample)
return models return models
@@ -28,12 +28,12 @@ func compareJson(t *testing.T, actualJsonAsString string, expectedJsonAsString s
var expectedMap map[string]interface{} var expectedMap map[string]interface{}
json.Unmarshal([]byte(expectedJsonAsString), &expectedMap) json.Unmarshal([]byte(expectedJsonAsString), &expectedMap)
if !reflect.DeepEqual(actualMap, expectedMap) { if !reflect.DeepEqual(actualMap, expectedMap) {
fmt.Println("---- expected -----") t.Log("---- expected -----")
fmt.Println(withLineNumbers(expectedJsonAsString)) t.Log(withLineNumbers(expectedJsonAsString))
fmt.Println("---- actual -----") t.Log("---- actual -----")
fmt.Println(withLineNumbers(actualJsonAsString)) t.Log(withLineNumbers(actualJsonAsString))
fmt.Println("---- raw -----") t.Log("---- raw -----")
fmt.Println(actualJsonAsString) t.Log(actualJsonAsString)
t.Error("there are differences") t.Error("there are differences")
return false return false
} }

View File

@@ -0,0 +1,18 @@
package restful
import "testing"
// Use like this:
//
// TraceLogger(testLogger{t})
type testLogger struct {
t *testing.T
}
func (l testLogger) Print(v ...interface{}) {
l.t.Log(v...)
}
func (l testLogger) Printf(format string, v ...interface{}) {
l.t.Logf(format, v...)
}

View File

@@ -108,6 +108,20 @@ func TestContentType415_POST_Issue170(t *testing.T) {
} }
} }
// go test -v -test.run TestContentType406PlainJson ...restful
func TestContentType406PlainJson(t *testing.T) {
tearDown()
TraceLogger(testLogger{t})
Add(newGetPlainTextOrJsonService())
httpRequest, _ := http.NewRequest("GET", "http://here.com/get", nil)
httpRequest.Header.Set("Accept", "text/plain")
httpWriter := httptest.NewRecorder()
DefaultContainer.dispatch(httpWriter, httpRequest)
if got, want := httpWriter.Code, 200; got != want {
t.Errorf("got %v, want %v", got, want)
}
}
// go test -v -test.run TestContentTypeOctet_Issue170 ...restful // go test -v -test.run TestContentTypeOctet_Issue170 ...restful
func TestContentTypeOctet_Issue170(t *testing.T) { func TestContentTypeOctet_Issue170(t *testing.T) {
tearDown() tearDown()
@@ -155,6 +169,13 @@ func newGetOnlyJsonOnlyService() *WebService {
return ws return ws
} }
func newGetPlainTextOrJsonService() *WebService {
ws := new(WebService).Path("")
ws.Produces("text/plain", "application/json")
ws.Route(ws.GET("/get").To(doNothing))
return ws
}
func newGetConsumingOctetStreamService() *WebService { func newGetConsumingOctetStreamService() *WebService {
ws := new(WebService).Path("") ws := new(WebService).Path("")
ws.Consumes("application/octet-stream") ws.Consumes("application/octet-stream")

View File

@@ -8,7 +8,7 @@ install:
- export GOPATH="$HOME/gopath" - export GOPATH="$HOME/gopath"
- mkdir -p "$GOPATH/src/golang.org/x" - mkdir -p "$GOPATH/src/golang.org/x"
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
- go get -v -t -d -tags='appengine appenginevm' golang.org/x/oauth2/... - go get -v -t -d golang.org/x/oauth2/...
script: script:
- go test -v -tags='appengine appenginevm' golang.org/x/oauth2/... - go test -v golang.org/x/oauth2/...

View File

@@ -1,25 +1,31 @@
# Contributing # Contributing to Go
We don't use GitHub pull requests but use Gerrit for code reviews, Go is an open source project.
similar to the Go project.
1. Sign one of the contributor license agreements below. It is the work of hundreds of contributors. We appreciate your help!
2. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
3. Get the package by running `go get -d golang.org/x/oauth2`.
Make changes and create a change by running `git codereview change <name>`, provide a command message, and use `git codereview mail` to create a Gerrit CL.
Keep amending to the change and mail as your recieve feedback.
For more information about the workflow, see Go's [Contribution Guidelines](https://golang.org/doc/contribute.html).
Before we can accept any pull requests ## Filing issues
we have to jump through a couple of legal hurdles,
primarily a Contributor License Agreement (CLA):
- **If you are an individual writing original source code** When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
and you're sure you own the intellectual property,
then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). 1. What version of Go are you using (`go version`)?
- **If you work for a company that wants to allow you to contribute your work**, 2. What operating system and processor architecture are you using?
then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). 3. What did you do?
4. What did you expect to see?
5. What did you see instead?
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
## Contributing code
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
before sending patches.
**We do not accept GitHub pull requests**
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.
You can sign these electronically (just scroll to the bottom).
After that, we'll be able to accept your pull requests.

View File

@@ -16,3 +16,49 @@ See godoc for further documentation and examples.
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) * [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
## App Engine
In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
of the [`context.Context`](https://golang.org/x/net/context#Context) type from
the `golang.org/x/net/context` package
This means its no longer possible to use the "Classic App Engine"
`appengine.Context` type with the `oauth2` package. (You're using
Classic App Engine if you import the package `"appengine"`.)
To work around this, you may use the new `"google.golang.org/appengine"`
package. This package has almost the same API as the `"appengine"` package,
but it can be fetched with `go get` and used on "Managed VMs" and well as
Classic App Engine.
See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
for information on updating your app.
If you don't want to update your entire app to use the new App Engine packages,
you may use both sets of packages in parallel, using only the new packages
with the `oauth2` package.
import (
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
newappengine "google.golang.org/appengine"
newurlfetch "google.golang.org/appengine/urlfetch"
"appengine"
)
func handler(w http.ResponseWriter, r *http.Request) {
var c appengine.Context = appengine.NewContext(r)
c.Infof("Logging a message with the old package")
var ctx context.Context = newappengine.NewContext(r)
client := &http.Client{
Transport: &oauth2.Transport{
Source: google.AppEngineTokenSource(ctx, "scope"),
Base: &newurlfetch.Transport{Context: ctx},
},
}
client.Get("...")
}

View File

@@ -2,38 +2,24 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build appengine,!appenginevm // +build appengine appenginevm
// App Engine hooks. // App Engine hooks.
package oauth2 package oauth2
import ( import (
"log"
"net/http" "net/http"
"sync"
"appengine" "golang.org/x/net/context"
"appengine/urlfetch" "golang.org/x/oauth2/internal"
"google.golang.org/appengine/urlfetch"
) )
var warnOnce sync.Once
func init() { func init() {
registerContextClientFunc(contextClientAppEngine) internal.RegisterContextClientFunc(contextClientAppEngine)
} }
func contextClientAppEngine(ctx Context) (*http.Client, error) { func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
if actx, ok := ctx.(appengine.Context); ok { return urlfetch.Client(ctx), nil
return urlfetch.Client(actx), nil
}
// The user did it wrong. We'll log once (and hope they see it
// in dev_appserver), but stil return (nil, nil) in case some
// other contextClientFunc hook finds a way to proceed.
warnOnce.Do(gaeDoingItWrongHelp)
return nil, nil
}
func gaeDoingItWrongHelp() {
log.Printf("WARNING: you attempted to use the oauth2 package without passing a valid appengine.Context or *http.Request as the oauth2.Context. App Engine requires that all service RPCs (including urlfetch) be associated with an *http.Request/appengine.Context.")
} }

View File

@@ -0,0 +1,112 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package clientcredentials implements the OAuth2.0 "client credentials" token flow,
// also known as the "two-legged OAuth 2.0".
//
// This should be used when the client is acting on its own behalf or when the client
// is the resource owner. It may also be used when requesting access to protected
// resources based on an authorization previously arranged with the authorization
// server.
//
// See http://tools.ietf.org/html/draft-ietf-oauth-v2-31#section-4.4
package clientcredentials
import (
"net/http"
"net/url"
"strings"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
)
// tokenFromInternal maps an *internal.Token struct into
// an *oauth2.Token struct.
func tokenFromInternal(t *internal.Token) *oauth2.Token {
if t == nil {
return nil
}
tk := &oauth2.Token{
AccessToken: t.AccessToken,
TokenType: t.TokenType,
RefreshToken: t.RefreshToken,
Expiry: t.Expiry,
}
return tk.WithExtra(t.Raw)
}
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
// This token is then mapped from *internal.Token into an *oauth2.Token which is
// returned along with an error.
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*oauth2.Token, error) {
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.TokenURL, v)
if err != nil {
return nil, err
}
return tokenFromInternal(tk), nil
}
// Client Credentials Config describes a 2-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
type Config struct {
// ClientID is the application's ID.
ClientID string
// ClientSecret is the application's secret.
ClientSecret string
// TokenURL is the resource server's token endpoint
// URL. This is a constant specific to each server.
TokenURL string
// Scope specifies optional requested permissions.
Scopes []string
}
// Token uses client credentials to retreive a token.
// The HTTP client to use is derived from the context.
// If nil, http.DefaultClient is used.
func (c *Config) Token(ctx context.Context) (*oauth2.Token, error) {
return retrieveToken(ctx, c, url.Values{
"grant_type": {"client_credentials"},
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
})
}
// Client returns an HTTP client using the provided token.
// The token will auto-refresh as necessary. The underlying
// HTTP transport will be obtained using the provided context.
// The returned client and its Transport should not be modified.
func (c *Config) Client(ctx context.Context) *http.Client {
return oauth2.NewClient(ctx, c.TokenSource(ctx))
}
// TokenSource returns a TokenSource that returns t until t expires,
// automatically refreshing it as necessary using the provided context and the
// client ID and client secret.
//
// Most users will use Config.Client instead.
func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
source := &tokenSource{
ctx: ctx,
conf: c,
}
return oauth2.ReuseTokenSource(nil, source)
}
type tokenSource struct {
ctx context.Context
conf *Config
}
// Token refreshes the token by using a new client credentials request.
// tokens received this way do not include a refresh token
func (c *tokenSource) Token() (*oauth2.Token, error) {
return retrieveToken(c.ctx, c.conf, url.Values{
"grant_type": {"client_credentials"},
"scope": internal.CondVal(strings.Join(c.conf.Scopes, " ")),
})
}

View File

@@ -0,0 +1,96 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package clientcredentials
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"golang.org/x/oauth2"
)
func newConf(url string) *Config {
return &Config{
ClientID: "CLIENT_ID",
ClientSecret: "CLIENT_SECRET",
Scopes: []string{"scope1", "scope2"},
TokenURL: url + "/token",
}
}
type mockTransport struct {
rt func(req *http.Request) (resp *http.Response, err error)
}
func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
return t.rt(req)
}
func TestTokenRequest(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() != "/token" {
t.Errorf("authenticate client request URL = %q; want %q", r.URL, "/token")
}
headerAuth := r.Header.Get("Authorization")
if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
}
if got, want := r.Header.Get("Content-Type"), "application/x-www-form-urlencoded"; got != want {
t.Errorf("Content-Type header = %q; want %q", got, want)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
r.Body.Close()
}
if err != nil {
t.Errorf("failed reading request body: %s.", err)
}
if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" {
t.Errorf("payload = %q; want %q", string(body), "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2")
}
w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&token_type=bearer"))
}))
defer ts.Close()
conf := newConf(ts.URL)
tok, err := conf.Token(oauth2.NoContext)
if err != nil {
t.Error(err)
}
if !tok.Valid() {
t.Fatalf("token invalid. got: %#v", tok)
}
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
t.Errorf("Access token = %q; want %q", tok.AccessToken, "90d64460d14870c08c81352a05dedd3465940a7c")
}
if tok.TokenType != "bearer" {
t.Errorf("token type = %q; want %q", tok.TokenType, "bearer")
}
}
func TestTokenRefreshRequest(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/somethingelse" {
return
}
if r.URL.String() != "/token" {
t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
}
headerContentType := r.Header.Get("Content-Type")
if headerContentType != "application/x-www-form-urlencoded" {
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
}
body, _ := ioutil.ReadAll(r.Body)
if string(body) != "client_id=CLIENT_ID&grant_type=client_credentials&scope=scope1+scope2" {
t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
}
}))
defer ts.Close()
conf := newConf(ts.URL)
c := conf.Client(oauth2.NoContext)
c.Get(ts.URL + "/somethingelse")
}

View File

@@ -7,15 +7,10 @@ package oauth2_test
import ( import (
"fmt" "fmt"
"log" "log"
"testing"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
// TODO(jbd): Remove after Go 1.4.
// Related to https://codereview.appspot.com/107320046
func TestA(t *testing.T) {}
func ExampleConfig() { func ExampleConfig() {
conf := &oauth2.Config{ conf := &oauth2.Config{
ClientID: "YOUR_CLIENT_ID", ClientID: "YOUR_CLIENT_ID",

View File

@@ -0,0 +1,16 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package facebook provides constants for using OAuth2 to access Facebook.
package facebook
import (
"golang.org/x/oauth2"
)
// Endpoint is Facebook's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.facebook.com/dialog/oauth",
TokenURL: "https://graph.facebook.com/oauth/access_token",
}

View File

@@ -2,36 +2,82 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build appengine,!appenginevm
package google package google
import ( import (
"sort"
"strings"
"sync"
"time" "time"
"appengine" "golang.org/x/net/context"
"golang.org/x/oauth2" "golang.org/x/oauth2"
) )
// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
// AppEngineTokenSource returns a token source that fetches tokens // AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account. // issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine // If you are implementing a 3-legged OAuth 2.0 flow on App Engine
// that involves user accounts, see oauth2.Config instead. // that involves user accounts, see oauth2.Config instead.
// //
// You are required to provide a valid appengine.Context as context. // The provided context must have come from appengine.NewContext.
func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource { func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
if appengineTokenFunc == nil {
panic("google: AppEngineTokenSource can only be used on App Engine.")
}
scopes := append([]string{}, scope...)
sort.Strings(scopes)
return &appEngineTokenSource{ return &appEngineTokenSource{
ctx: ctx, ctx: ctx,
scopes: scope, scopes: scopes,
fetcherFunc: aeFetcherFunc, key: strings.Join(scopes, " "),
} }
} }
var aeFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) { // aeTokens helps the fetched tokens to be reused until their expiration.
c, ok := ctx.(appengine.Context) var (
aeTokensMu sync.Mutex
aeTokens = make(map[string]*tokenLock) // key is space-separated scopes
)
type tokenLock struct {
mu sync.Mutex // guards t; held while fetching or updating t
t *oauth2.Token
}
type appEngineTokenSource struct {
ctx context.Context
scopes []string
key string // to aeTokens map; space-separated scopes
}
func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
if appengineTokenFunc == nil {
panic("google: AppEngineTokenSource can only be used on App Engine.")
}
aeTokensMu.Lock()
tok, ok := aeTokens[ts.key]
if !ok { if !ok {
return "", time.Time{}, errInvalidContext tok = &tokenLock{}
aeTokens[ts.key] = tok
} }
return appengine.AccessToken(c, scope...) aeTokensMu.Unlock()
tok.mu.Lock()
defer tok.mu.Unlock()
if tok.t.Valid() {
return tok.t, nil
}
access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
if err != nil {
return nil, err
}
tok.t = &oauth2.Token{
AccessToken: access,
Expiry: exp,
}
return tok.t, nil
} }

View File

@@ -0,0 +1,13 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine appenginevm
package google
import "google.golang.org/appengine"
func init() {
appengineTokenFunc = appengine.AccessToken
}

View File

@@ -1,36 +0,0 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appenginevm !appengine
package google
import (
"time"
"golang.org/x/oauth2"
"google.golang.org/appengine"
)
// AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
// that involves user accounts, see oauth2.Config instead.
//
// You are required to provide a valid appengine.Context as context.
func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource {
return &appEngineTokenSource{
ctx: ctx,
scopes: scope,
fetcherFunc: aeVMFetcherFunc,
}
}
var aeVMFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) {
c, ok := ctx.(appengine.Context)
if !ok {
return "", time.Time{}, errInvalidContext
}
return appengine.AccessToken(c, scope...)
}

View File

@@ -0,0 +1,154 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"runtime"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
"google.golang.org/cloud/compute/metadata"
)
// DefaultClient returns an HTTP Client that uses the
// DefaultTokenSource to obtain authentication credentials.
//
// This client should be used when developing services
// that run on Google App Engine or Google Compute Engine
// and use "Application Default Credentials."
//
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
//
func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
ts, err := DefaultTokenSource(ctx, scope...)
if err != nil {
return nil, err
}
return oauth2.NewClient(ctx, ts), nil
}
// DefaultTokenSource is a token source that uses
// "Application Default Credentials".
//
// It looks for credentials in the following places,
// preferring the first location found:
//
// 1. A JSON file whose path is specified by the
// GOOGLE_APPLICATION_CREDENTIALS environment variable.
// 2. A JSON file in a location known to the gcloud command-line tool.
// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
// On other systems, $HOME/.config/gcloud/application_default_credentials.json.
// 3. On Google App Engine it uses the appengine.AccessToken function.
// 4. On Google Compute Engine, it fetches credentials from the metadata server.
// (In this final case any provided scopes are ignored.)
//
// For more details, see:
// https://developers.google.com/accounts/docs/application-default-credentials
//
func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
// First, try the environment variable.
const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
if filename := os.Getenv(envVar); filename != "" {
ts, err := tokenSourceFromFile(ctx, filename, scope)
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
}
return ts, nil
}
// Second, try a well-known file.
filename := wellKnownFile()
_, err := os.Stat(filename)
if err == nil {
ts, err2 := tokenSourceFromFile(ctx, filename, scope)
if err2 == nil {
return ts, nil
}
err = err2
} else if os.IsNotExist(err) {
err = nil // ignore this error
}
if err != nil {
return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
}
// Third, if we're on Google App Engine use those credentials.
if appengineTokenFunc != nil {
return AppEngineTokenSource(ctx, scope...), nil
}
// Fourth, if we're on Google Compute Engine use the metadata server.
if metadata.OnGCE() {
return ComputeTokenSource(""), nil
}
// None are found; return helpful error.
const url = "https://developers.google.com/accounts/docs/application-default-credentials"
return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
}
func wellKnownFile() string {
const f = "application_default_credentials.json"
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
}
return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
}
func tokenSourceFromFile(ctx context.Context, filename string, scopes []string) (oauth2.TokenSource, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
var d struct {
// Common fields
Type string
ClientID string `json:"client_id"`
// User Credential fields
ClientSecret string `json:"client_secret"`
RefreshToken string `json:"refresh_token"`
// Service Account fields
ClientEmail string `json:"client_email"`
PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
}
if err := json.Unmarshal(b, &d); err != nil {
return nil, err
}
switch d.Type {
case "authorized_user":
cfg := &oauth2.Config{
ClientID: d.ClientID,
ClientSecret: d.ClientSecret,
Scopes: append([]string{}, scopes...), // copy
Endpoint: Endpoint,
}
tok := &oauth2.Token{RefreshToken: d.RefreshToken}
return cfg.TokenSource(ctx, tok), nil
case "service_account":
cfg := &jwt.Config{
Email: d.ClientEmail,
PrivateKey: []byte(d.PrivateKey),
Scopes: append([]string{}, scopes...), // copy
TokenURL: JWTTokenURL,
}
return cfg.TokenSource(ctx), nil
case "":
return nil, errors.New("missing 'type' field in credentials")
default:
return nil, fmt.Errorf("unknown credential type: %q", d.Type)
}
}

View File

@@ -11,7 +11,6 @@ import (
"io/ioutil" "io/ioutil"
"log" "log"
"net/http" "net/http"
"testing"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
@@ -20,9 +19,14 @@ import (
"google.golang.org/appengine/urlfetch" "google.golang.org/appengine/urlfetch"
) )
// Remove after Go 1.4. func ExampleDefaultClient() {
// Related to https://codereview.appspot.com/107320046 client, err := google.DefaultClient(oauth2.NoContext,
func TestA(t *testing.T) {} "https://www.googleapis.com/auth/devstorage.full_control")
if err != nil {
log.Fatal(err)
}
client.Get("...")
}
func Example_webServer() { func Example_webServer() {
// Your credentials should be obtained from the Google // Your credentials should be obtained from the Google
@@ -74,6 +78,19 @@ func ExampleJWTConfigFromJSON() {
client.Get("...") client.Get("...")
} }
func ExampleSDKConfig() {
// The credentials will be obtained from the first account that
// has been authorized with `gcloud auth login`.
conf, err := google.NewSDKConfig("")
if err != nil {
log.Fatal(err)
}
// Initiate an http.Client. The following GET request will be
// authorized and authenticated on the behalf of the SDK user.
client := conf.Client(oauth2.NoContext)
client.Get("...")
}
func Example_serviceAccount() { func Example_serviceAccount() {
// Your credentials should be obtained from the Google // Your credentials should be obtained from the Google
// Developer Console (https://console.developers.google.com). // Developer Console (https://console.developers.google.com).

View File

@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package google provides support for making // Package google provides support for making OAuth2 authorized and
// OAuth2 authorized and authenticated HTTP requests // authenticated HTTP requests to Google APIs.
// to Google APIs. It supports Web server, client-side, // It supports the Web server flow, client-side credentials, service accounts,
// service accounts, Google Compute Engine service accounts, // Google Compute Engine service accounts, and Google App Engine service
// and Google App Engine service accounts authorization // accounts.
// and authentications flows:
// //
// For more information, please read // For more information, please read
// https://developers.google.com/accounts/docs/OAuth2. // https://developers.google.com/accounts/docs/OAuth2
// and
// https://developers.google.com/accounts/docs/application-default-credentials.
package google package google
import ( import (
@@ -25,9 +26,6 @@ import (
"google.golang.org/cloud/compute/metadata" "google.golang.org/cloud/compute/metadata"
) )
// TODO(bradfitz,jbd): import "google.golang.org/cloud/compute/metadata" instead of
// the metaClient and metadata.google.internal stuff below.
// Endpoint is Google's OAuth 2.0 endpoint. // Endpoint is Google's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{ var Endpoint = oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth", AuthURL: "https://accounts.google.com/o/oauth2/auth",
@@ -37,6 +35,50 @@ var Endpoint = oauth2.Endpoint{
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. // JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
const JWTTokenURL = "https://accounts.google.com/o/oauth2/token" const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
// ConfigFromJSON uses a Google Developers Console client_credentials.json
// file to construct a config.
// client_credentials.json can be downloadable from https://console.developers.google.com,
// under "APIs & Auth" > "Credentials". Download the Web application credentials in the
// JSON format and provide the contents of the file as jsonKey.
func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
type cred struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectURIs []string `json:"redirect_uris"`
AuthURI string `json:"auth_uri"`
TokenURI string `json:"token_uri"`
}
var j struct {
Web *cred `json:"web"`
Installed *cred `json:"installed"`
}
if err := json.Unmarshal(jsonKey, &j); err != nil {
return nil, err
}
var c *cred
switch {
case j.Web != nil:
c = j.Web
case j.Installed != nil:
c = j.Installed
default:
return nil, fmt.Errorf("oauth2/google: no credentials found")
}
if len(c.RedirectURIs) < 1 {
return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
}
return &oauth2.Config{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
RedirectURL: c.RedirectURIs[0],
Scopes: scope,
Endpoint: oauth2.Endpoint{
AuthURL: c.AuthURI,
TokenURL: c.TokenURI,
},
}, nil
}
// JWTConfigFromJSON uses a Google Developers service account JSON key file to read // JWTConfigFromJSON uses a Google Developers service account JSON key file to read
// the credentials that authorize and authenticate the requests. // the credentials that authorize and authenticate the requests.
// Create a service account on "Credentials" page under "APIs & Auth" for your // Create a service account on "Credentials" page under "APIs & Auth" for your

View File

@@ -0,0 +1,67 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"strings"
"testing"
)
var webJSONKey = []byte(`
{
"web": {
"auth_uri": "https://google.com/o/oauth2/auth",
"client_secret": "3Oknc4jS_wA2r9i",
"token_uri": "https://google.com/o/oauth2/token",
"client_email": "222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
"redirect_uris": ["https://www.example.com/oauth2callback"],
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/222-nprqovg5k43uum874cs9osjt2koe97g8@developer.gserviceaccount.com",
"client_id": "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"javascript_origins": ["https://www.example.com"]
}
}`)
var installedJSONKey = []byte(`{
"installed": {
"client_id": "222-installed.apps.googleusercontent.com",
"redirect_uris": ["https://www.example.com/oauth2callback"]
}
}`)
func TestConfigFromJSON(t *testing.T) {
conf, err := ConfigFromJSON(webJSONKey, "scope1", "scope2")
if err != nil {
t.Error(err)
}
if got, want := conf.ClientID, "222-nprqovg5k43uum874cs9osjt2koe97g8.apps.googleusercontent.com"; got != want {
t.Errorf("ClientID = %q; want %q", got, want)
}
if got, want := conf.ClientSecret, "3Oknc4jS_wA2r9i"; got != want {
t.Errorf("ClientSecret = %q; want %q", got, want)
}
if got, want := conf.RedirectURL, "https://www.example.com/oauth2callback"; got != want {
t.Errorf("RedictURL = %q; want %q", got, want)
}
if got, want := strings.Join(conf.Scopes, ","), "scope1,scope2"; got != want {
t.Errorf("Scopes = %q; want %q", got, want)
}
if got, want := conf.Endpoint.AuthURL, "https://google.com/o/oauth2/auth"; got != want {
t.Errorf("AuthURL = %q; want %q", got, want)
}
if got, want := conf.Endpoint.TokenURL, "https://google.com/o/oauth2/token"; got != want {
t.Errorf("TokenURL = %q; want %q", got, want)
}
}
func TestConfigFromJSON_Installed(t *testing.T) {
conf, err := ConfigFromJSON(installedJSONKey)
if err != nil {
t.Error(err)
}
if got, want := conf.ClientID, "222-installed.apps.googleusercontent.com"; got != want {
t.Errorf("ClientID = %q; want %q", got, want)
}
}

168
Godeps/_workspace/src/golang.org/x/oauth2/google/sdk.go generated vendored Normal file
View File

@@ -0,0 +1,168 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
)
type sdkCredentials struct {
Data []struct {
Credential struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
TokenExpiry *time.Time `json:"token_expiry"`
} `json:"credential"`
Key struct {
Account string `json:"account"`
Scope string `json:"scope"`
} `json:"key"`
}
}
// An SDKConfig provides access to tokens from an account already
// authorized via the Google Cloud SDK.
type SDKConfig struct {
conf oauth2.Config
initialToken *oauth2.Token
}
// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
// account. If account is empty, the account currently active in
// Google Cloud SDK properties is used.
// Google Cloud SDK credentials must be created by running `gcloud auth`
// before using this function.
// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
func NewSDKConfig(account string) (*SDKConfig, error) {
configPath, err := sdkConfigPath()
if err != nil {
return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
}
credentialsPath := filepath.Join(configPath, "credentials")
f, err := os.Open(credentialsPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
}
defer f.Close()
var c sdkCredentials
if err := json.NewDecoder(f).Decode(&c); err != nil {
return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
}
if len(c.Data) == 0 {
return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
}
if account == "" {
propertiesPath := filepath.Join(configPath, "properties")
f, err := os.Open(propertiesPath)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
}
defer f.Close()
ini, err := internal.ParseINI(f)
if err != nil {
return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
}
core, ok := ini["core"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
}
active, ok := core["account"]
if !ok {
return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
}
account = active
}
for _, d := range c.Data {
if account == "" || d.Key.Account == account {
if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
}
var expiry time.Time
if d.Credential.TokenExpiry != nil {
expiry = *d.Credential.TokenExpiry
}
return &SDKConfig{
conf: oauth2.Config{
ClientID: d.Credential.ClientID,
ClientSecret: d.Credential.ClientSecret,
Scopes: strings.Split(d.Key.Scope, " "),
Endpoint: Endpoint,
RedirectURL: "oob",
},
initialToken: &oauth2.Token{
AccessToken: d.Credential.AccessToken,
RefreshToken: d.Credential.RefreshToken,
Expiry: expiry,
},
}, nil
}
}
return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
}
// Client returns an HTTP client using Google Cloud SDK credentials to
// authorize requests. The token will auto-refresh as necessary. The
// underlying http.RoundTripper will be obtained using the provided
// context. The returned client and its Transport should not be
// modified.
func (c *SDKConfig) Client(ctx context.Context) *http.Client {
return &http.Client{
Transport: &oauth2.Transport{
Source: c.TokenSource(ctx),
},
}
}
// TokenSource returns an oauth2.TokenSource that retrieve tokens from
// Google Cloud SDK credentials using the provided context.
// It will returns the current access token stored in the credentials,
// and refresh it when it expires, but it won't update the credentials
// with the new access token.
func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
return c.conf.TokenSource(ctx, c.initialToken)
}
// Scopes are the OAuth 2.0 scopes the current account is authorized for.
func (c *SDKConfig) Scopes() []string {
return c.conf.Scopes
}
// sdkConfigPath tries to guess where the gcloud config is located.
// It can be overridden during tests.
var sdkConfigPath = func() (string, error) {
if runtime.GOOS == "windows" {
return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
}
homeDir := guessUnixHomeDir()
if homeDir == "" {
return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
}
return filepath.Join(homeDir, ".config", "gcloud"), nil
}
func guessUnixHomeDir() string {
usr, err := user.Current()
if err == nil {
return usr.HomeDir
}
return os.Getenv("HOME")
}

View File

@@ -0,0 +1,46 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import "testing"
func TestSDKConfig(t *testing.T) {
sdkConfigPath = func() (string, error) {
return "testdata/gcloud", nil
}
tests := []struct {
account string
accessToken string
err bool
}{
{"", "bar_access_token", false},
{"foo@example.com", "foo_access_token", false},
{"bar@example.com", "bar_access_token", false},
{"baz@serviceaccount.example.com", "", true},
}
for _, tt := range tests {
c, err := NewSDKConfig(tt.account)
if got, want := err != nil, tt.err; got != want {
if !tt.err {
t.Errorf("expected no error, got error: %v", tt.err, err)
} else {
t.Errorf("expected error, got none")
}
continue
}
if err != nil {
continue
}
tok := c.initialToken
if tok == nil {
t.Errorf("expected token %q, got: nil", tt.accessToken)
continue
}
if tok.AccessToken != tt.accessToken {
t.Errorf("expected token %q, got: %q", tt.accessToken, tok.AccessToken)
}
}
}

View File

@@ -1,71 +0,0 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"errors"
"sort"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
)
var (
aeTokensMu sync.Mutex // guards aeTokens and appEngineTokenSource.key
// aeTokens helps the fetched tokens to be reused until their expiration.
aeTokens = make(map[string]*tokenLock) // key is '\0'-separated scopes
)
var errInvalidContext = errors.New("oauth2: a valid appengine.Context is required")
type tokenLock struct {
mu sync.Mutex // guards t; held while updating t
t *oauth2.Token
}
type appEngineTokenSource struct {
ctx oauth2.Context
// fetcherFunc makes the actual RPC to fetch a new access
// token with an expiry time. Provider of this function is
// responsible to assert that the given context is valid.
fetcherFunc func(ctx oauth2.Context, scope ...string) (accessToken string, expiry time.Time, err error)
// scopes and key are guarded by the package-level mutex aeTokensMu
scopes []string
key string
}
func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
aeTokensMu.Lock()
if ts.key == "" {
sort.Sort(sort.StringSlice(ts.scopes))
ts.key = strings.Join(ts.scopes, string(0))
}
tok, ok := aeTokens[ts.key]
if !ok {
tok = &tokenLock{}
aeTokens[ts.key] = tok
}
aeTokensMu.Unlock()
tok.mu.Lock()
defer tok.mu.Unlock()
if tok.t.Valid() {
return tok.t, nil
}
access, exp, err := ts.fetcherFunc(ts.ctx, ts.scopes...)
if err != nil {
return nil, err
}
tok.t = &oauth2.Token{
AccessToken: access,
Expiry: exp,
}
return tok.t, nil
}

View File

@@ -0,0 +1,122 @@
{
"data": [
{
"credential": {
"_class": "OAuth2Credentials",
"_module": "oauth2client.client",
"access_token": "foo_access_token",
"client_id": "foo_client_id",
"client_secret": "foo_client_secret",
"id_token": {
"at_hash": "foo_at_hash",
"aud": "foo_aud",
"azp": "foo_azp",
"cid": "foo_cid",
"email": "foo@example.com",
"email_verified": true,
"exp": 1420573614,
"iat": 1420569714,
"id": "1337",
"iss": "accounts.google.com",
"sub": "1337",
"token_hash": "foo_token_hash",
"verified_email": true
},
"invalid": false,
"refresh_token": "foo_refresh_token",
"revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
"token_expiry": "2015-01-09T00:51:51Z",
"token_response": {
"access_token": "foo_access_token",
"expires_in": 3600,
"id_token": "foo_id_token",
"token_type": "Bearer"
},
"token_uri": "https://accounts.google.com/o/oauth2/token",
"user_agent": "Cloud SDK Command Line Tool"
},
"key": {
"account": "foo@example.com",
"clientId": "foo_client_id",
"scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
"type": "google-cloud-sdk"
}
},
{
"credential": {
"_class": "OAuth2Credentials",
"_module": "oauth2client.client",
"access_token": "bar_access_token",
"client_id": "bar_client_id",
"client_secret": "bar_client_secret",
"id_token": {
"at_hash": "bar_at_hash",
"aud": "bar_aud",
"azp": "bar_azp",
"cid": "bar_cid",
"email": "bar@example.com",
"email_verified": true,
"exp": 1420573614,
"iat": 1420569714,
"id": "1337",
"iss": "accounts.google.com",
"sub": "1337",
"token_hash": "bar_token_hash",
"verified_email": true
},
"invalid": false,
"refresh_token": "bar_refresh_token",
"revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
"token_expiry": "2015-01-09T00:51:51Z",
"token_response": {
"access_token": "bar_access_token",
"expires_in": 3600,
"id_token": "bar_id_token",
"token_type": "Bearer"
},
"token_uri": "https://accounts.google.com/o/oauth2/token",
"user_agent": "Cloud SDK Command Line Tool"
},
"key": {
"account": "bar@example.com",
"clientId": "bar_client_id",
"scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
"type": "google-cloud-sdk"
}
},
{
"credential": {
"_class": "ServiceAccountCredentials",
"_kwargs": {},
"_module": "oauth2client.client",
"_private_key_id": "00000000000000000000000000000000",
"_private_key_pkcs8_text": "-----BEGIN RSA PRIVATE KEY-----\nMIICWwIBAAKBgQCt3fpiynPSaUhWSIKMGV331zudwJ6GkGmvQtwsoK2S2LbvnSwU\nNxgj4fp08kIDR5p26wF4+t/HrKydMwzftXBfZ9UmLVJgRdSswmS5SmChCrfDS5OE\nvFFcN5+6w1w8/Nu657PF/dse8T0bV95YrqyoR0Osy8WHrUOMSIIbC3hRuwIDAQAB\nAoGAJrGE/KFjn0sQ7yrZ6sXmdLawrM3mObo/2uI9T60+k7SpGbBX0/Pi6nFrJMWZ\nTVONG7P3Mu5aCPzzuVRYJB0j8aldSfzABTY3HKoWCczqw1OztJiEseXGiYz4QOyr\nYU3qDyEpdhS6q6wcoLKGH+hqRmz6pcSEsc8XzOOu7s4xW8kCQQDkc75HjhbarCnd\nJJGMe3U76+6UGmdK67ltZj6k6xoB5WbTNChY9TAyI2JC+ppYV89zv3ssj4L+02u3\nHIHFGxsHAkEAwtU1qYb1tScpchPobnYUFiVKJ7KA8EZaHVaJJODW/cghTCV7BxcJ\nbgVvlmk4lFKn3lPKAgWw7PdQsBTVBUcCrQJATPwoIirizrv3u5soJUQxZIkENAqV\nxmybZx9uetrzP7JTrVbFRf0SScMcyN90hdLJiQL8+i4+gaszgFht7sNMnwJAAbfj\nq0UXcauQwALQ7/h2oONfTg5S+MuGC/AxcXPSMZbMRGGoPh3D5YaCv27aIuS/ukQ+\n6dmm/9AGlCb64fsIWQJAPaokbjIifo+LwC5gyK73Mc4t8nAOSZDenzd/2f6TCq76\nS1dcnKiPxaED7W/y6LJiuBT2rbZiQ2L93NJpFZD/UA==\n-----END RSA PRIVATE KEY-----\n",
"_revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
"_scopes": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
"_service_account_email": "baz@serviceaccount.example.com",
"_service_account_id": "baz.serviceaccount.example.com",
"_token_uri": "https://accounts.google.com/o/oauth2/token",
"_user_agent": "Cloud SDK Command Line Tool",
"access_token": null,
"assertion_type": null,
"client_id": null,
"client_secret": null,
"id_token": null,
"invalid": false,
"refresh_token": null,
"revoke_uri": "https://accounts.google.com/o/oauth2/revoke",
"service_account_name": "baz@serviceaccount.example.com",
"token_expiry": null,
"token_response": null,
"user_agent": "Cloud SDK Command Line Tool"
},
"key": {
"account": "baz@serviceaccount.example.com",
"clientId": "baz_client_id",
"scope": "https://www.googleapis.com/auth/appengine.admin https://www.googleapis.com/auth/bigquery https://www.googleapis.com/auth/compute https://www.googleapis.com/auth/devstorage.full_control https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/ndev.cloudman https://www.googleapis.com/auth/cloud-platform https://www.googleapis.com/auth/sqlservice.admin https://www.googleapis.com/auth/prediction https://www.googleapis.com/auth/projecthosting",
"type": "google-cloud-sdk"
}
}
],
"file_version": 1
}

View File

@@ -0,0 +1,2 @@
[core]
account = bar@example.com

View File

@@ -6,10 +6,14 @@
package internal package internal
import ( import (
"bufio"
"crypto/rsa" "crypto/rsa"
"crypto/x509" "crypto/x509"
"encoding/pem" "encoding/pem"
"errors" "errors"
"fmt"
"io"
"strings"
) )
// ParseKey converts the binary contents of a private key file // ParseKey converts the binary contents of a private key file
@@ -26,12 +30,47 @@ func ParseKey(key []byte) (*rsa.PrivateKey, error) {
if err != nil { if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key) parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
} }
} }
parsed, ok := parsedKey.(*rsa.PrivateKey) parsed, ok := parsedKey.(*rsa.PrivateKey)
if !ok { if !ok {
return nil, errors.New("oauth2: private key is invalid") return nil, errors.New("private key is invalid")
} }
return parsed, nil return parsed, nil
} }
func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
result := map[string]map[string]string{
"": map[string]string{}, // root section
}
scanner := bufio.NewScanner(ini)
currentSection := ""
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(line, ";") {
// comment.
continue
}
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
currentSection = strings.TrimSpace(line[1 : len(line)-1])
result[currentSection] = map[string]string{}
continue
}
parts := strings.SplitN(line, "=", 2)
if len(parts) == 2 && parts[0] != "" {
result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("error scanning ini: %v", err)
}
return result, nil
}
func CondVal(v string) []string {
if v == "" {
return nil
}
return []string{v}
}

View File

@@ -0,0 +1,62 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains support packages for oauth2 package.
package internal
import (
"reflect"
"strings"
"testing"
)
func TestParseINI(t *testing.T) {
tests := []struct {
ini string
want map[string]map[string]string
}{
{
`root = toor
[foo]
bar = hop
ini = nin
`,
map[string]map[string]string{
"": map[string]string{"root": "toor"},
"foo": map[string]string{"bar": "hop", "ini": "nin"},
},
},
{
`[empty]
[section]
empty=
`,
map[string]map[string]string{
"": map[string]string{},
"empty": map[string]string{},
"section": map[string]string{"empty": ""},
},
},
{
`ignore
[invalid
=stuff
;comment=true
`,
map[string]map[string]string{
"": map[string]string{},
},
},
}
for _, tt := range tests {
result, err := ParseINI(strings.NewReader(tt.ini))
if err != nil {
t.Errorf("ParseINI(%q) error %v, want: no error", tt.ini, err)
continue
}
if !reflect.DeepEqual(result, tt.want) {
t.Errorf("ParseINI(%q) = %#v, want: %#v", tt.ini, result, tt.want)
}
}
}

View File

@@ -0,0 +1,213 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains support packages for oauth2 package.
package internal
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
)
// Token represents the crendentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
// This type is a mirror of oauth2.Token and exists to break
// an otherwise-circular dependency. Other internal packages
// should convert this Token into an oauth2.Token before use.
type Token struct {
// AccessToken is the token that authorizes and authenticates
// the requests.
AccessToken string
// TokenType is the type of token.
// The Type method returns either this or "Bearer", the default.
TokenType string
// RefreshToken is a token that's used by the application
// (as opposed to the user) to refresh the access token
// if it expires.
RefreshToken string
// Expiry is the optional expiration time of the access token.
//
// If zero, TokenSource implementations will reuse the same
// token forever and RefreshToken or equivalent
// mechanisms for that TokenSource will not be used.
Expiry time.Time
// Raw optionally contains extra metadata from the server
// when updating a token.
Raw interface{}
}
// tokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type tokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
}
func (e *tokenJSON) expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
if v := e.Expires; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
return
}
type expirationTime int32
func (e *expirationTime) UnmarshalJSON(b []byte) error {
var n json.Number
err := json.Unmarshal(b, &n)
if err != nil {
return err
}
i, err := n.Int64()
if err != nil {
return err
}
*e = expirationTime(i)
return nil
}
var brokenAuthHeaderProviders = []string{
"https://accounts.google.com/",
"https://www.googleapis.com/",
"https://github.com/",
"https://api.instagram.com/",
"https://www.douban.com/",
"https://api.dropbox.com/",
"https://api.soundcloud.com/",
"https://www.linkedin.com/",
"https://api.twitch.tv/",
"https://oauth.vk.com/",
"https://api.odnoklassniki.ru/",
"https://connect.stripe.com/",
"https://api.pushbullet.com/",
"https://oauth.sandbox.trainingpeaks.com/",
"https://oauth.trainingpeaks.com/",
"https://www.strava.com/oauth/",
"https://app.box.com/",
"https://test-sandbox.auth.corp.google.com",
"https://user.gini.net/",
}
// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
// implements the OAuth2 spec correctly
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
// In summary:
// - Reddit only accepts client secret in the Authorization header
// - Dropbox accepts either it in URL param or Auth header, but not both.
// - Google only accepts URL param (not spec compliant?), not Auth header
// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
func providerAuthHeaderWorks(tokenURL string) bool {
for _, s := range brokenAuthHeaderProviders {
if strings.HasPrefix(tokenURL, s) {
// Some sites fail to implement the OAuth2 spec fully.
return false
}
}
// Assume the provider implements the spec properly
// otherwise. We can add more exceptions as they're
// discovered. We will _not_ be adding configurable hooks
// to this package to let users select server bugs.
return true
}
func RetrieveToken(ctx context.Context, ClientID, ClientSecret, TokenURL string, v url.Values) (*Token, error) {
hc, err := ContextClient(ctx)
if err != nil {
return nil, err
}
v.Set("client_id", ClientID)
bustedAuth := !providerAuthHeaderWorks(TokenURL)
if bustedAuth && ClientSecret != "" {
v.Set("client_secret", ClientSecret)
}
req, err := http.NewRequest("POST", TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if !bustedAuth {
req.SetBasicAuth(ClientID, ClientSecret)
}
r, err := hc.Do(req)
if err != nil {
return nil, err
}
defer r.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if code := r.StatusCode; code < 200 || code > 299 {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
}
var token *Token
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
switch content {
case "application/x-www-form-urlencoded", "text/plain":
vals, err := url.ParseQuery(string(body))
if err != nil {
return nil, err
}
token = &Token{
AccessToken: vals.Get("access_token"),
TokenType: vals.Get("token_type"),
RefreshToken: vals.Get("refresh_token"),
Raw: vals,
}
e := vals.Get("expires_in")
if e == "" {
// TODO(jbd): Facebook's OAuth2 implementation is broken and
// returns expires_in field in expires. Remove the fallback to expires,
// when Facebook fixes their implementation.
e = vals.Get("expires")
}
expires, _ := strconv.Atoi(e)
if expires != 0 {
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
}
default:
var tj tokenJSON
if err = json.Unmarshal(body, &tj); err != nil {
return nil, err
}
token = &Token{
AccessToken: tj.AccessToken,
TokenType: tj.TokenType,
RefreshToken: tj.RefreshToken,
Expiry: tj.expiry(),
Raw: make(map[string]interface{}),
}
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
}
// Don't overwrite `RefreshToken` with an empty value
// if this was a token refreshing request.
if token.RefreshToken == "" {
token.RefreshToken = v.Get("refresh_token")
}
return token, nil
}

View File

@@ -0,0 +1,28 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains support packages for oauth2 package.
package internal
import (
"fmt"
"testing"
)
func Test_providerAuthHeaderWorks(t *testing.T) {
for _, p := range brokenAuthHeaderProviders {
if providerAuthHeaderWorks(p) {
t.Errorf("URL: %s not found in list", p)
}
p := fmt.Sprintf("%ssomesuffix", p)
if providerAuthHeaderWorks(p) {
t.Errorf("URL: %s not found in list", p)
}
}
p := "https://api.not-in-the-list-example.com/"
if !providerAuthHeaderWorks(p) {
t.Errorf("URL: %s found in list", p)
}
}

View File

@@ -0,0 +1,67 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains support packages for oauth2 package.
package internal
import (
"net/http"
"golang.org/x/net/context"
)
// HTTPClient is the context key to use with golang.org/x/net/context's
// WithValue function to associate an *http.Client value with a context.
var HTTPClient ContextKey
// ContextKey is just an empty struct. It exists so HTTPClient can be
// an immutable public variable with a unique type. It's immutable
// because nobody else can create a ContextKey, being unexported.
type ContextKey struct{}
// ContextClientFunc is a func which tries to return an *http.Client
// given a Context value. If it returns an error, the search stops
// with that error. If it returns (nil, nil), the search continues
// down the list of registered funcs.
type ContextClientFunc func(context.Context) (*http.Client, error)
var contextClientFuncs []ContextClientFunc
func RegisterContextClientFunc(fn ContextClientFunc) {
contextClientFuncs = append(contextClientFuncs, fn)
}
func ContextClient(ctx context.Context) (*http.Client, error) {
for _, fn := range contextClientFuncs {
c, err := fn(ctx)
if err != nil {
return nil, err
}
if c != nil {
return c, nil
}
}
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
return hc, nil
}
return http.DefaultClient, nil
}
func ContextTransport(ctx context.Context) http.RoundTripper {
hc, err := ContextClient(ctx)
// This is a rare error case (somebody using nil on App Engine).
if err != nil {
return ErrorTransport{err}
}
return hc.Transport
}
// ErrorTransport returns the specified error on RoundTrip.
// This RoundTripper should be used in rare error cases where
// error handling can be postponed to response handling time.
type ErrorTransport struct{ Err error }
func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
return nil, t.Err
}

View File

@@ -18,6 +18,7 @@ import (
"strings" "strings"
"time" "time"
"golang.org/x/net/context"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/internal" "golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws" "golang.org/x/oauth2/jws"
@@ -57,7 +58,7 @@ type Config struct {
// TokenSource returns a JWT TokenSource using the configuration // TokenSource returns a JWT TokenSource using the configuration
// in c and the HTTP client from the provided context. // in c and the HTTP client from the provided context.
func (c *Config) TokenSource(ctx oauth2.Context) oauth2.TokenSource { func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
} }
@@ -66,14 +67,14 @@ func (c *Config) TokenSource(ctx oauth2.Context) oauth2.TokenSource {
// obtained from c. // obtained from c.
// //
// The returned client and its Transport should not be modified. // The returned client and its Transport should not be modified.
func (c *Config) Client(ctx oauth2.Context) *http.Client { func (c *Config) Client(ctx context.Context) *http.Client {
return oauth2.NewClient(ctx, c.TokenSource(ctx)) return oauth2.NewClient(ctx, c.TokenSource(ctx))
} }
// jwtSource is a source that always does a signed JWT request for a token. // jwtSource is a source that always does a signed JWT request for a token.
// It should typically be wrapped with a reuseTokenSource. // It should typically be wrapped with a reuseTokenSource.
type jwtSource struct { type jwtSource struct {
ctx oauth2.Context ctx context.Context
conf *Config conf *Config
} }

View File

@@ -0,0 +1,16 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package linkedin provides constants for using OAuth2 to access LinkedIn.
package linkedin
import (
"golang.org/x/oauth2"
)
// Endpoint is LinkedIn's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.linkedin.com/uas/oauth2/authorization",
TokenURL: "https://www.linkedin.com/uas/oauth2/accessToken",
}

View File

@@ -9,30 +9,19 @@ package oauth2
import ( import (
"bytes" "bytes"
"encoding/json"
"errors" "errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings" "strings"
"sync" "sync"
"time"
"golang.org/x/net/context" "golang.org/x/net/context"
"golang.org/x/oauth2/internal"
) )
// Context can be an golang.org/x/net.Context, or an App Engine Context. // NoContext is the default context you should supply if not using
// If you don't care and aren't running on App Engine, you may use NoContext. // your own context.Context (see https://golang.org/x/net/context).
type Context interface{} var NoContext = context.TODO()
// NoContext is the default context. If you're not running this code
// on App Engine or not using golang.org/x/net.Context to provide a custom
// HTTP client, you should use NoContext.
var NoContext Context = nil
// Config describes a typical 3-legged OAuth2 flow, with both the // Config describes a typical 3-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs. // client application information and the server's endpoint URLs.
@@ -78,28 +67,34 @@ var (
// "access_type" field that gets sent in the URL returned by // "access_type" field that gets sent in the URL returned by
// AuthCodeURL. // AuthCodeURL.
// //
// Online (the default if neither is specified) is the default. // Online is the default if neither is specified. If your
// If your application needs to refresh access tokens when the // application needs to refresh access tokens when the user
// user is not present at the browser, then use offline. This // is not present at the browser, then use offline. This will
// will result in your application obtaining a refresh token // result in your application obtaining a refresh token the
// the first time your application exchanges an authorization // first time your application exchanges an authorization
// code for a user. // code for a user.
AccessTypeOnline AuthCodeOption = setParam{"access_type", "online"} AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
AccessTypeOffline AuthCodeOption = setParam{"access_type", "offline"} AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
// ApprovalForce forces the users to view the consent dialog // ApprovalForce forces the users to view the consent dialog
// and confirm the permissions request at the URL returned // and confirm the permissions request at the URL returned
// from AuthCodeURL, even if they've already done so. // from AuthCodeURL, even if they've already done so.
ApprovalForce AuthCodeOption = setParam{"approval_prompt", "force"} ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
) )
// An AuthCodeOption is passed to Config.AuthCodeURL.
type AuthCodeOption interface {
setValue(url.Values)
}
type setParam struct{ k, v string } type setParam struct{ k, v string }
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
// An AuthCodeOption is passed to Config.AuthCodeURL. // SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
type AuthCodeOption interface { // to a provider's authorization endpoint.
setValue(url.Values) func SetAuthURLParam(key, value string) AuthCodeOption {
return setParam{key, value}
} }
// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
@@ -118,9 +113,9 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
v := url.Values{ v := url.Values{
"response_type": {"code"}, "response_type": {"code"},
"client_id": {c.ClientID}, "client_id": {c.ClientID},
"redirect_uri": condVal(c.RedirectURL), "redirect_uri": internal.CondVal(c.RedirectURL),
"scope": condVal(strings.Join(c.Scopes, " ")), "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
"state": condVal(state), "state": internal.CondVal(state),
} }
for _, opt := range opts { for _, opt := range opts {
opt.setValue(v) opt.setValue(v)
@@ -134,118 +129,106 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
return buf.String() return buf.String()
} }
// PasswordCredentialsToken converts a resource owner username and password
// pair into a token.
//
// Per the RFC, this grant type should only be used "when there is a high
// degree of trust between the resource owner and the client (e.g., the client
// is part of the device operating system or a highly privileged application),
// and when other authorization grant types are not available."
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
//
// The HTTP client to use is derived from the context.
// If nil, http.DefaultClient is used.
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
return retrieveToken(ctx, c, url.Values{
"grant_type": {"password"},
"username": {username},
"password": {password},
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
})
}
// Exchange converts an authorization code into a token. // Exchange converts an authorization code into a token.
// //
// It is used after a resource provider redirects the user back // It is used after a resource provider redirects the user back
// to the Redirect URI (the URL obtained from AuthCodeURL). // to the Redirect URI (the URL obtained from AuthCodeURL).
// //
// The HTTP client to use is derived from the context. If nil, // The HTTP client to use is derived from the context.
// http.DefaultClient is used. See the Context type's documentation. // If a client is not provided via the context, http.DefaultClient is used.
// //
// The code will be in the *http.Request.FormValue("code"). Before // The code will be in the *http.Request.FormValue("code"). Before
// calling Exchange, be sure to validate FormValue("state"). // calling Exchange, be sure to validate FormValue("state").
func (c *Config) Exchange(ctx Context, code string) (*Token, error) { func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
return retrieveToken(ctx, c, url.Values{ return retrieveToken(ctx, c, url.Values{
"grant_type": {"authorization_code"}, "grant_type": {"authorization_code"},
"code": {code}, "code": {code},
"redirect_uri": condVal(c.RedirectURL), "redirect_uri": internal.CondVal(c.RedirectURL),
"scope": condVal(strings.Join(c.Scopes, " ")), "scope": internal.CondVal(strings.Join(c.Scopes, " ")),
}) })
} }
// contextClientFunc is a func which tries to return an *http.Client
// given a Context value. If it returns an error, the search stops
// with that error. If it returns (nil, nil), the search continues
// down the list of registered funcs.
type contextClientFunc func(Context) (*http.Client, error)
var contextClientFuncs []contextClientFunc
func registerContextClientFunc(fn contextClientFunc) {
contextClientFuncs = append(contextClientFuncs, fn)
}
func contextClient(ctx Context) (*http.Client, error) {
for _, fn := range contextClientFuncs {
c, err := fn(ctx)
if err != nil {
return nil, err
}
if c != nil {
return c, nil
}
}
if xc, ok := ctx.(context.Context); ok {
if hc, ok := xc.Value(HTTPClient).(*http.Client); ok {
return hc, nil
}
}
return http.DefaultClient, nil
}
func contextTransport(ctx Context) http.RoundTripper {
hc, err := contextClient(ctx)
if err != nil {
// This is a rare error case (somebody using nil on App Engine),
// so I'd rather not everybody do an error check on this Client
// method. They can get the error that they're doing it wrong
// later, at client.Get/PostForm time.
return errorTransport{err}
}
return hc.Transport
}
// Client returns an HTTP client using the provided token. // Client returns an HTTP client using the provided token.
// The token will auto-refresh as necessary. The underlying // The token will auto-refresh as necessary. The underlying
// HTTP transport will be obtained using the provided context. // HTTP transport will be obtained using the provided context.
// The returned client and its Transport should not be modified. // The returned client and its Transport should not be modified.
func (c *Config) Client(ctx Context, t *Token) *http.Client { func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
return NewClient(ctx, c.TokenSource(ctx, t)) return NewClient(ctx, c.TokenSource(ctx, t))
} }
// TokenSource returns a TokenSource that returns t until t expires, // TokenSource returns a TokenSource that returns t until t expires,
// automatically refreshing it as necessary using the provided context. // automatically refreshing it as necessary using the provided context.
// See the the Context documentation.
// //
// Most users will use Config.Client instead. // Most users will use Config.Client instead.
func (c *Config) TokenSource(ctx Context, t *Token) TokenSource { func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
nwn := &reuseTokenSource{t: t} tkr := &tokenRefresher{
nwn.new = tokenRefresher{
ctx: ctx, ctx: ctx,
conf: c, conf: c,
oldToken: &nwn.t,
} }
return nwn if t != nil {
tkr.refreshToken = t.RefreshToken
}
return &reuseTokenSource{
t: t,
new: tkr,
}
} }
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" // tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
// HTTP requests to renew a token using a RefreshToken. // HTTP requests to renew a token using a RefreshToken.
type tokenRefresher struct { type tokenRefresher struct {
ctx Context // used to get HTTP requests ctx context.Context // used to get HTTP requests
conf *Config conf *Config
oldToken **Token // pointer to old *Token w/ RefreshToken refreshToken string
} }
func (tf tokenRefresher) Token() (*Token, error) { // WARNING: Token is not safe for concurrent access, as it
t := *tf.oldToken // updates the tokenRefresher's refreshToken field.
if t == nil { // Within this package, it is used by reuseTokenSource which
return nil, errors.New("oauth2: attempted use of nil Token") // synchronizes calls to this method with its own mutex.
} func (tf *tokenRefresher) Token() (*Token, error) {
if t.RefreshToken == "" { if tf.refreshToken == "" {
return nil, errors.New("oauth2: token expired and refresh token is not set") return nil, errors.New("oauth2: token expired and refresh token is not set")
} }
return retrieveToken(tf.ctx, tf.conf, url.Values{
tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
"grant_type": {"refresh_token"}, "grant_type": {"refresh_token"},
"refresh_token": {t.RefreshToken}, "refresh_token": {tf.refreshToken},
}) })
if err != nil {
return nil, err
}
if tf.refreshToken != tk.RefreshToken {
tf.refreshToken = tk.RefreshToken
}
return tk, err
} }
// reuseTokenSource is a TokenSource that holds a single token in memory // reuseTokenSource is a TokenSource that holds a single token in memory
// and validates its expiry before each call to retrieve it with // and validates its expiry before each call to retrieve it with
// Token. If it's expired, it will be auto-refreshed using the // Token. If it's expired, it will be auto-refreshed using the
// new TokenSource. // new TokenSource.
//
// The first call to TokenRefresher must be SetToken.
type reuseTokenSource struct { type reuseTokenSource struct {
new TokenSource // called when t is expired. new TokenSource // called when t is expired.
@@ -270,145 +253,25 @@ func (s *reuseTokenSource) Token() (*Token, error) {
return t, nil return t, nil
} }
func retrieveToken(ctx Context, c *Config, v url.Values) (*Token, error) { // StaticTokenSource returns a TokenSource that always returns the same token.
hc, err := contextClient(ctx) // Because the provided token t is never refreshed, StaticTokenSource is only
if err != nil { // useful for tokens that never expire.
return nil, err func StaticTokenSource(t *Token) TokenSource {
} return staticTokenSource{t}
v.Set("client_id", c.ClientID)
bustedAuth := !providerAuthHeaderWorks(c.Endpoint.TokenURL)
if bustedAuth && c.ClientSecret != "" {
v.Set("client_secret", c.ClientSecret)
}
req, err := http.NewRequest("POST", c.Endpoint.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if !bustedAuth && c.ClientSecret != "" {
req.SetBasicAuth(c.ClientID, c.ClientSecret)
}
r, err := hc.Do(req)
if err != nil {
return nil, err
}
defer r.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if code := r.StatusCode; code < 200 || code > 299 {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
} }
var token *Token // staticTokenSource is a TokenSource that always returns the same Token.
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) type staticTokenSource struct {
switch content { t *Token
case "application/x-www-form-urlencoded", "text/plain":
vals, err := url.ParseQuery(string(body))
if err != nil {
return nil, err
}
token = &Token{
AccessToken: vals.Get("access_token"),
TokenType: vals.Get("token_type"),
RefreshToken: vals.Get("refresh_token"),
raw: vals,
}
e := vals.Get("expires_in")
if e == "" {
// TODO(jbd): Facebook's OAuth2 implementation is broken and
// returns expires_in field in expires. Remove the fallback to expires,
// when Facebook fixes their implementation.
e = vals.Get("expires")
}
expires, _ := strconv.Atoi(e)
if expires != 0 {
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
}
default:
var tj tokenJSON
if err = json.Unmarshal(body, &tj); err != nil {
return nil, err
}
token = &Token{
AccessToken: tj.AccessToken,
TokenType: tj.TokenType,
RefreshToken: tj.RefreshToken,
Expiry: tj.expiry(),
raw: make(map[string]interface{}),
}
json.Unmarshal(body, &token.raw) // no error checks for optional fields
}
// Don't overwrite `RefreshToken` with an empty value
// if this was a token refreshing request.
if token.RefreshToken == "" {
token.RefreshToken = v.Get("refresh_token")
}
return token, nil
} }
// tokenJSON is the struct representing the HTTP response from OAuth2 func (s staticTokenSource) Token() (*Token, error) {
// providers returning a token in JSON form. return s.t, nil
type tokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"`
Expires int32 `json:"expires"` // broken Facebook spelling of expires_in
}
func (e *tokenJSON) expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
if v := e.Expires; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
return
}
func condVal(v string) []string {
if v == "" {
return nil
}
return []string{v}
}
// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
// implements the OAuth2 spec correctly
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
// In summary:
// - Reddit only accepts client secret in the Authorization header
// - Dropbox accepts either it in URL param or Auth header, but not both.
// - Google only accepts URL param (not spec compliant?), not Auth header
func providerAuthHeaderWorks(tokenURL string) bool {
if strings.HasPrefix(tokenURL, "https://accounts.google.com/") ||
strings.HasPrefix(tokenURL, "https://github.com/") ||
strings.HasPrefix(tokenURL, "https://api.instagram.com/") ||
strings.HasPrefix(tokenURL, "https://www.douban.com/") ||
strings.HasPrefix(tokenURL, "https://api.dropbox.com/") ||
strings.HasPrefix(tokenURL, "https://api.soundcloud.com/") ||
strings.HasPrefix(tokenURL, "https://www.linkedin.com/") {
// Some sites fail to implement the OAuth2 spec fully.
return false
}
// Assume the provider implements the spec properly
// otherwise. We can add more exceptions as they're
// discovered. We will _not_ be adding configurable hooks
// to this package to let users select server bugs.
return true
} }
// HTTPClient is the context key to use with golang.org/x/net/context's // HTTPClient is the context key to use with golang.org/x/net/context's
// WithValue function to associate an *http.Client value with a context. // WithValue function to associate an *http.Client value with a context.
var HTTPClient contextKey var HTTPClient internal.ContextKey
// contextKey is just an empty struct. It exists so HTTPClient can be
// an immutable public variable with a unique type. It's immutable
// because nobody else can create a contextKey, being unexported.
type contextKey struct{}
// NewClient creates an *http.Client from a Context and TokenSource. // NewClient creates an *http.Client from a Context and TokenSource.
// The returned client is not valid beyond the lifetime of the context. // The returned client is not valid beyond the lifetime of the context.
@@ -416,17 +279,17 @@ type contextKey struct{}
// As a special case, if src is nil, a non-OAuth2 client is returned // As a special case, if src is nil, a non-OAuth2 client is returned
// using the provided context. This exists to support related OAuth2 // using the provided context. This exists to support related OAuth2
// packages. // packages.
func NewClient(ctx Context, src TokenSource) *http.Client { func NewClient(ctx context.Context, src TokenSource) *http.Client {
if src == nil { if src == nil {
c, err := contextClient(ctx) c, err := internal.ContextClient(ctx)
if err != nil { if err != nil {
return &http.Client{Transport: errorTransport{err}} return &http.Client{Transport: internal.ErrorTransport{err}}
} }
return c return c
} }
return &http.Client{ return &http.Client{
Transport: &Transport{ Transport: &Transport{
Base: contextTransport(ctx), Base: internal.ContextTransport(ctx),
Source: ReuseTokenSource(nil, src), Source: ReuseTokenSource(nil, src),
}, },
} }

View File

@@ -5,11 +5,16 @@
package oauth2 package oauth2
import ( import (
"encoding/json"
"errors" "errors"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"reflect"
"strconv"
"testing" "testing"
"time"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@@ -56,6 +61,15 @@ func TestAuthCodeURL(t *testing.T) {
} }
} }
func TestAuthCodeURL_CustomParam(t *testing.T) {
conf := newConf("server")
param := SetAuthURLParam("foo", "bar")
url := conf.AuthCodeURL("baz", param)
if url != "server/auth?client_id=CLIENT_ID&foo=bar&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=baz" {
t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
}
}
func TestAuthCodeURL_Optional(t *testing.T) { func TestAuthCodeURL_Optional(t *testing.T) {
conf := &Config{ conf := &Config{
ClientID: "CLIENT_ID", ClientID: "CLIENT_ID",
@@ -158,6 +172,60 @@ func TestExchangeRequest_JSONResponse(t *testing.T) {
} }
} }
const day = 24 * time.Hour
func TestExchangeRequest_JSONResponse_Expiry(t *testing.T) {
seconds := int32(day.Seconds())
jsonNumberType := reflect.TypeOf(json.Number("0"))
for _, c := range []struct {
expires string
expect error
}{
{fmt.Sprintf(`"expires_in": %d`, seconds), nil},
{fmt.Sprintf(`"expires_in": "%d"`, seconds), nil}, // PayPal case
{fmt.Sprintf(`"expires": %d`, seconds), nil}, // Facebook case
{`"expires": false`, &json.UnmarshalTypeError{Value: "bool", Type: jsonNumberType}}, // wrong type
{`"expires": {}`, &json.UnmarshalTypeError{Value: "object", Type: jsonNumberType}}, // wrong type
{`"expires": "zzz"`, &strconv.NumError{Func: "ParseInt", Num: "zzz", Err: strconv.ErrSyntax}}, // wrong value
} {
testExchangeRequest_JSONResponse_expiry(t, c.expires, c.expect)
}
}
func testExchangeRequest_JSONResponse_expiry(t *testing.T, exp string, expect error) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(fmt.Sprintf(`{"access_token": "90d", "scope": "user", "token_type": "bearer", %s}`, exp)))
}))
defer ts.Close()
conf := newConf(ts.URL)
t1 := time.Now().Add(day)
tok, err := conf.Exchange(NoContext, "exchange-code")
t2 := time.Now().Add(day)
// Do a fmt.Sprint comparison so either side can be
// nil. fmt.Sprint just stringifies them to "<nil>", and no
// non-nil expected error ever stringifies as "<nil>", so this
// isn't terribly disgusting. We do this because Go 1.4 and
// Go 1.5 return a different deep value for
// json.UnmarshalTypeError. In Go 1.5, the
// json.UnmarshalTypeError contains a new field with a new
// non-zero value. Rather than ignore it here with reflect or
// add new files and +build tags, just look at the strings.
if fmt.Sprint(err) != fmt.Sprint(expect) {
t.Errorf("Error = %v; want %v", err, expect)
}
if err != nil {
return
}
if !tok.Valid() {
t.Fatalf("Token invalid. Got: %#v", tok)
}
expiry := tok.Expiry
if expiry.Before(t1) || expiry.After(t2) {
t.Errorf("Unexpected value for Expiry: %v (shold be between %v and %v)", expiry, t1, t2)
}
}
func TestExchangeRequest_BadResponse(t *testing.T) { func TestExchangeRequest_BadResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
@@ -210,6 +278,53 @@ func TestExchangeRequest_NonBasicAuth(t *testing.T) {
conf.Exchange(ctx, "code") conf.Exchange(ctx, "code")
} }
func TestPasswordCredentialsTokenRequest(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
expected := "/token"
if r.URL.String() != expected {
t.Errorf("URL = %q; want %q", r.URL, expected)
}
headerAuth := r.Header.Get("Authorization")
expected = "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ="
if headerAuth != expected {
t.Errorf("Authorization header = %q; want %q", headerAuth, expected)
}
headerContentType := r.Header.Get("Content-Type")
expected = "application/x-www-form-urlencoded"
if headerContentType != expected {
t.Errorf("Content-Type header = %q; want %q", headerContentType, expected)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("Failed reading request body: %s.", err)
}
expected = "client_id=CLIENT_ID&grant_type=password&password=password1&scope=scope1+scope2&username=user1"
if string(body) != expected {
t.Errorf("res.Body = %q; want %q", string(body), expected)
}
w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
}))
defer ts.Close()
conf := newConf(ts.URL)
tok, err := conf.PasswordCredentialsToken(NoContext, "user1", "password1")
if err != nil {
t.Error(err)
}
if !tok.Valid() {
t.Fatalf("Token invalid. Got: %#v", tok)
}
expected := "90d64460d14870c08c81352a05dedd3465940a7c"
if tok.AccessToken != expected {
t.Errorf("AccessToken = %q; want %q", tok.AccessToken, expected)
}
expected = "bearer"
if tok.TokenType != expected {
t.Errorf("TokenType = %q; want %q", tok.TokenType, expected)
}
}
func TestTokenRefreshRequest(t *testing.T) { func TestTokenRefreshRequest(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/somethingelse" { if r.URL.String() == "/somethingelse" {
@@ -258,3 +373,50 @@ func TestFetchWithNoRefreshToken(t *testing.T) {
t.Errorf("Fetch should return an error if no refresh token is set") t.Errorf("Fetch should return an error if no refresh token is set")
} }
} }
func TestRefreshToken_RefreshTokenReplacement(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"access_token":"ACCESS TOKEN", "scope": "user", "token_type": "bearer", "refresh_token": "NEW REFRESH TOKEN"}`))
return
}))
defer ts.Close()
conf := newConf(ts.URL)
tkr := tokenRefresher{
conf: conf,
ctx: NoContext,
refreshToken: "OLD REFRESH TOKEN",
}
tk, err := tkr.Token()
if err != nil {
t.Errorf("Unexpected refreshToken error returned: %v", err)
return
}
if tk.RefreshToken != tkr.refreshToken {
t.Errorf("tokenRefresher.refresh_token = %s; want %s", tkr.refreshToken, tk.RefreshToken)
}
}
func TestConfigClientWithToken(t *testing.T) {
tok := &Token{
AccessToken: "abc123",
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if got, want := r.Header.Get("Authorization"), fmt.Sprintf("Bearer %s", tok.AccessToken); got != want {
t.Errorf("Authorization header = %q; want %q", got, want)
}
return
}))
defer ts.Close()
conf := newConf(ts.URL)
c := conf.Client(NoContext, tok)
req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Error(err)
}
_, err = c.Do(req)
if err != nil {
t.Error(err)
}
}

View File

@@ -0,0 +1,16 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package odnoklassniki provides constants for using OAuth2 to access Odnoklassniki.
package odnoklassniki
import (
"golang.org/x/oauth2"
)
// Endpoint is Odnoklassniki's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.odnoklassniki.ru/oauth/authorize",
TokenURL: "https://api.odnoklassniki.ru/oauth/token.do",
}

View File

@@ -0,0 +1,22 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package paypal provides constants for using OAuth2 to access PayPal.
package paypal
import (
"golang.org/x/oauth2"
)
// Endpoint is PayPal's OAuth 2.0 endpoint in live (production) environment.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://www.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
TokenURL: "https://api.paypal.com/v1/identity/openidconnect/tokenservice",
}
// SandboxEndpoint is PayPal's OAuth 2.0 endpoint in sandbox (testing) environment.
var SandboxEndpoint = oauth2.Endpoint{
AuthURL: "https://www.sandbox.paypal.com/webapps/auth/protocol/openidconnect/v1/authorize",
TokenURL: "https://api.sandbox.paypal.com/v1/identity/openidconnect/tokenservice",
}

View File

@@ -7,9 +7,18 @@ package oauth2
import ( import (
"net/http" "net/http"
"net/url" "net/url"
"strings"
"time" "time"
"golang.org/x/net/context"
"golang.org/x/oauth2/internal"
) )
// expiryDelta determines how earlier a token should be considered
// expired than its actual expiration time. It is used to avoid late
// expirations due to client-server time mismatches.
const expiryDelta = 10 * time.Second
// Token represents the crendentials used to authorize // Token represents the crendentials used to authorize
// the requests to access protected resources on the OAuth 2.0 // the requests to access protected resources on the OAuth 2.0
// provider's backend. // provider's backend.
@@ -45,6 +54,15 @@ type Token struct {
// Type returns t.TokenType if non-empty, else "Bearer". // Type returns t.TokenType if non-empty, else "Bearer".
func (t *Token) Type() string { func (t *Token) Type() string {
if strings.EqualFold(t.TokenType, "bearer") {
return "Bearer"
}
if strings.EqualFold(t.TokenType, "mac") {
return "MAC"
}
if strings.EqualFold(t.TokenType, "basic") {
return "Basic"
}
if t.TokenType != "" { if t.TokenType != "" {
return t.TokenType return t.TokenType
} }
@@ -90,10 +108,36 @@ func (t *Token) expired() bool {
if t.Expiry.IsZero() { if t.Expiry.IsZero() {
return false return false
} }
return t.Expiry.Before(time.Now()) return t.Expiry.Add(-expiryDelta).Before(time.Now())
} }
// Valid reports whether t is non-nil, has an AccessToken, and is not expired. // Valid reports whether t is non-nil, has an AccessToken, and is not expired.
func (t *Token) Valid() bool { func (t *Token) Valid() bool {
return t != nil && t.AccessToken != "" && !t.expired() return t != nil && t.AccessToken != "" && !t.expired()
} }
// tokenFromInternal maps an *internal.Token struct into
// a *Token struct.
func tokenFromInternal(t *internal.Token) *Token {
if t == nil {
return nil
}
return &Token{
AccessToken: t.AccessToken,
TokenType: t.TokenType,
RefreshToken: t.RefreshToken,
Expiry: t.Expiry,
raw: t.Raw,
}
}
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
// with an error..
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
if err != nil {
return nil, err
}
return tokenFromInternal(tk), nil
}

View File

@@ -4,7 +4,10 @@
package oauth2 package oauth2
import "testing" import (
"testing"
"time"
)
func TestTokenExtra(t *testing.T) { func TestTokenExtra(t *testing.T) {
type testCase struct { type testCase struct {
@@ -28,3 +31,20 @@ func TestTokenExtra(t *testing.T) {
} }
} }
} }
func TestTokenExpiry(t *testing.T) {
now := time.Now()
cases := []struct {
name string
tok *Token
want bool
}{
{name: "12 seconds", tok: &Token{Expiry: now.Add(12 * time.Second)}, want: false},
{name: "10 seconds", tok: &Token{Expiry: now.Add(expiryDelta)}, want: true},
}
for _, tc := range cases {
if got, want := tc.tok.expired(), tc.want; got != want {
t.Errorf("expired (%q) = %v; want %v", tc.name, got, want)
}
}
}

View File

@@ -130,9 +130,3 @@ func (r *onEOFReader) runFunc() {
r.fn = nil r.fn = nil
} }
} }
type errorTransport struct{ err error }
func (t errorTransport) RoundTrip(*http.Request) (*http.Response, error) {
return nil, t.err
}

View File

@@ -32,6 +32,39 @@ func TestTransportTokenSource(t *testing.T) {
client.Get(server.URL) client.Get(server.URL)
} }
// Test for case-sensitive token types, per https://github.com/golang/oauth2/issues/113
func TestTransportTokenSourceTypes(t *testing.T) {
const val = "abc"
tests := []struct {
key string
val string
want string
}{
{key: "bearer", val: val, want: "Bearer abc"},
{key: "mac", val: val, want: "MAC abc"},
{key: "basic", val: val, want: "Basic abc"},
}
for _, tc := range tests {
ts := &tokenSource{
token: &Token{
AccessToken: tc.val,
TokenType: tc.key,
},
}
tr := &Transport{
Source: ts,
}
server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
if got, want := r.Header.Get("Authorization"), tc.want; got != want {
t.Errorf("Authorization header (%q) = %q; want %q", val, got, want)
}
})
defer server.Close()
client := http.Client{Transport: tr}
client.Get(server.URL)
}
}
func TestTokenValidNoAccessToken(t *testing.T) { func TestTokenValidNoAccessToken(t *testing.T) {
token := &Token{} token := &Token{}
if token.Valid() { if token.Valid() {

16
Godeps/_workspace/src/golang.org/x/oauth2/vk/vk.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright 2015 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package vk provides constants for using OAuth2 to access VK.com.
package vk
import (
"golang.org/x/oauth2"
)
// Endpoint is VK's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://oauth.vk.com/authorize",
TokenURL: "https://oauth.vk.com/access_token",
}

View File

@@ -631,6 +631,9 @@ function kube::release::create_docker_images_for_server() {
echo $md5_sum > ${1}/${binary_name}.docker_tag echo $md5_sum > ${1}/${binary_name}.docker_tag
rm -rf ${docker_build_path} rm -rf ${docker_build_path}
kube::log::status "Deleting docker image ${docker_image_tag}"
"${DOCKER[@]}" rmi ${docker_image_tag}
) & ) &
done done

View File

@@ -24,19 +24,18 @@ function pop_dir {
} }
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/.. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
if [[ -z "${1:-}" ]]; then if [[ -z "${1:-}" ]]; then
echo "Usage: ${0} <pr-number>" echo "Usage: ${0} <pr-number> [opts]"
exit 1 exit 1
fi fi
pushd . > /dev/null pushd . > /dev/null
trap 'pop_dir' INT TERM EXIT trap 'pop_dir' INT TERM EXIT
cd ${KUBE_ROOT}/contrib/release-notes kube::golang::build_binaries contrib/release-notes
# TODO: vendor these dependencies, but using godep again will be annoying... kube::golang::place_bins
GOPATH=$PWD go get github.com/google/go-github/github releasenotes=$(kube::util::find-binary "release-notes")
GOPATH=$PWD go get github.com/google/go-querystring/query "${releasenotes}" --last-release-pr=${1} ${@}
GOPATH=$PWD go build release-notes.go
./release-notes --last-release-pr=${1}

View File

@@ -213,7 +213,7 @@ func (ks *kube2sky) handleEndpointAdd(obj interface{}) {
func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service) error { func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service) error {
for i := range service.Spec.Ports { for i := range service.Spec.Ports {
b, err := json.Marshal(getSkyMsg(service.Spec.PortalIP, service.Spec.Ports[i].Port)) b, err := json.Marshal(getSkyMsg(service.Spec.ClusterIP, service.Spec.Ports[i].Port))
if err != nil { if err != nil {
return err return err
} }
@@ -229,7 +229,7 @@ func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service) error {
if len(service.Spec.Ports) == 0 { if len(service.Spec.Ports) == 0 {
glog.Fatalf("unexpected service with no ports: %v", service) glog.Fatalf("unexpected service with no ports: %v", service)
} }
// if PortalIP is not set, a DNS entry should not be created // if ClusterIP is not set, a DNS entry should not be created
if !kapi.IsServiceIPSet(service) { if !kapi.IsServiceIPSet(service) {
return ks.newHeadlessService(subdomain, service) return ks.newHeadlessService(subdomain, service)
} }

View File

@@ -94,7 +94,7 @@ type hostPort struct {
func getHostPort(service *kapi.Service) *hostPort { func getHostPort(service *kapi.Service) *hostPort {
return &hostPort{ return &hostPort{
Host: service.Spec.PortalIP, Host: service.Spec.ClusterIP,
Port: service.Spec.Ports[0].Port, Port: service.Spec.Ports[0].Port,
} }
} }
@@ -134,7 +134,7 @@ func TestHeadlessService(t *testing.T) {
Namespace: testNamespace, Namespace: testNamespace,
}, },
Spec: kapi.ServiceSpec{ Spec: kapi.ServiceSpec{
PortalIP: "None", ClusterIP: "None",
Ports: []kapi.ServicePort{ Ports: []kapi.ServicePort{
{Port: 80}, {Port: 80},
}, },
@@ -187,7 +187,7 @@ func TestHeadlessServiceEndpointsUpdate(t *testing.T) {
Namespace: testNamespace, Namespace: testNamespace,
}, },
Spec: kapi.ServiceSpec{ Spec: kapi.ServiceSpec{
PortalIP: "None", ClusterIP: "None",
Ports: []kapi.ServicePort{ Ports: []kapi.ServicePort{
{Port: 80}, {Port: 80},
}, },
@@ -244,7 +244,7 @@ func TestHeadlessServiceWithDelayedEndpointsAddition(t *testing.T) {
Namespace: testNamespace, Namespace: testNamespace,
}, },
Spec: kapi.ServiceSpec{ Spec: kapi.ServiceSpec{
PortalIP: "None", ClusterIP: "None",
Ports: []kapi.ServicePort{ Ports: []kapi.ServicePort{
{Port: 80}, {Port: 80},
}, },
@@ -308,7 +308,7 @@ func TestAddSinglePortService(t *testing.T) {
Port: 80, Port: 80,
}, },
}, },
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
}, },
} }
k2s.newService(&service) k2s.newService(&service)
@@ -334,12 +334,12 @@ func TestUpdateSinglePortService(t *testing.T) {
Port: 80, Port: 80,
}, },
}, },
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
}, },
} }
k2s.newService(&service) k2s.newService(&service)
assert.Len(t, ec.writes, 2) assert.Len(t, ec.writes, 2)
service.Spec.PortalIP = "0.0.0.0" service.Spec.ClusterIP = "0.0.0.0"
k2s.newService(&service) k2s.newService(&service)
expectedValue := getHostPort(&service) expectedValue := getHostPort(&service)
assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue) assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue)
@@ -363,7 +363,7 @@ func TestDeleteSinglePortService(t *testing.T) {
Port: 80, Port: 80,
}, },
}, },
PortalIP: "1.2.3.4", ClusterIP: "1.2.3.4",
}, },
} }
// Add the service // Add the service

View File

@@ -41,7 +41,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES="" MINION_SCOPES=""
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# If set to Elastic IP, master instance will be associated with this IP. # If set to Elastic IP, master instance will be associated with this IP.
# If set to auto, a new Elastic IP will be aquired # If set to auto, a new Elastic IP will be aquired
@@ -75,7 +75,7 @@ DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1 DNS_REPLICAS=1
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
# Optional: Enable/disable public IP assignment for minions. # Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!

View File

@@ -37,7 +37,7 @@ MINION_TAG="${INSTANCE_PREFIX}-minion"
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES="" MINION_SCOPES=""
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}" MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
# If set to Elastic IP, master instance will be associated with this IP. # If set to Elastic IP, master instance will be associated with this IP.
# If set to auto, a new Elastic IP will be aquired # If set to auto, a new Elastic IP will be aquired
@@ -72,7 +72,7 @@ DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1 DNS_REPLICAS=1
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
# Optional: Enable/disable public IP assignment for minions. # Optional: Enable/disable public IP assignment for minions.
# Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes! # Important Note: disable only if you have setup a NAT instance for internet access and configured appropriate routes!

View File

@@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'

View File

@@ -507,7 +507,7 @@ function kube-up {
echo "readonly ZONE='${ZONE}'" echo "readonly ZONE='${ZONE}'"
echo "readonly KUBE_USER='${KUBE_USER}'" echo "readonly KUBE_USER='${KUBE_USER}'"
echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'" echo "readonly KUBE_PASSWORD='${KUBE_PASSWORD}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'" echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'" echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'"
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'" echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"

View File

@@ -35,7 +35,7 @@ MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24")) MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
MINION_SCOPES="" MINION_SCOPES=""
PORTAL_NET="10.250.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.250.0.0/16" # formerly PORTAL_NET
# Optional: Install node logging # Optional: Install node logging
ENABLE_NODE_LOGGING=false ENABLE_NODE_LOGGING=false
@@ -49,4 +49,4 @@ ELASTICSEARCH_LOGGING_REPLICAS=1
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-true}" ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-true}"
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota

View File

@@ -22,7 +22,7 @@ mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")' instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: $NODE_INSTANCE_PREFIX node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET service_cluster_ip_range: $SERVICE_CLUSTER_IP_RANGE
admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")' admission_control: '$(echo "$ADMISSION_CONTROL" | sed -e "s/'/''/g")'
EOF EOF

View File

@@ -322,7 +322,7 @@ function kube-up {
echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'" echo "readonly SERVER_BINARY_TAR_URL='${SERVER_BINARY_TAR_URL}'"
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'" echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'" echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'" echo "readonly SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'" echo "readonly ADMISSION_CONTROL='${ADMISSION_CONTROL:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh" grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh" grep -v "^#" "${KUBE_ROOT}/cluster/azure/templates/create-dynamic-salt-files.sh"

View File

@@ -44,7 +44,7 @@ CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write") MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
ALLOCATE_NODE_CIDRS=true ALLOCATE_NODE_CIDRS=true
# When set to true, Docker Cache is enabled by default as part of the cluster bring up. # When set to true, Docker Cache is enabled by default as part of the cluster bring up.
@@ -77,4 +77,4 @@ DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1 DNS_REPLICAS=1
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota

View File

@@ -44,7 +44,7 @@ MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring") MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring")
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
POLL_SLEEP_INTERVAL=3 POLL_SLEEP_INTERVAL=3
PORTAL_NET="10.0.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# When set to true, Docker Cache is enabled by default as part of the cluster bring up. # When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true ENABLE_DOCKER_REGISTRY_CACHE=true
@@ -75,4 +75,4 @@ DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local" DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1 DNS_REPLICAS=1
ADMISSION_CONTROL=NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota

View File

@@ -250,7 +250,7 @@ instance_prefix: '$(echo "$INSTANCE_PREFIX" | sed -e "s/'/''/g")'
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")' node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")' cluster_cidr: '$(echo "$CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")' allocate_node_cidrs: '$(echo "$ALLOCATE_NODE_CIDRS" | sed -e "s/'/''/g")'
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")' enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
@@ -437,6 +437,9 @@ function download-release() {
# store it when we download, and then when it's different infer that # store it when we download, and then when it's different infer that
# a push occurred (otherwise it's a simple reboot). # a push occurred (otherwise it's a simple reboot).
# In case of failure of unpacking Salt tree (the last command in the
# "until" block) retry downloading both release and Salt tars.
until
echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)" echo "Downloading binary release tar ($SERVER_BINARY_TAR_URL)"
download-or-bust "$SERVER_BINARY_TAR_URL" download-or-bust "$SERVER_BINARY_TAR_URL"
@@ -446,6 +449,10 @@ function download-release() {
echo "Unpacking Salt tree" echo "Unpacking Salt tree"
rm -rf kubernetes rm -rf kubernetes
tar xzf "${SALT_TAR_URL##*/}" tar xzf "${SALT_TAR_URL##*/}"
do
sleep 15
echo "Couldn't unpack Salt tree. Retrying..."
done
echo "Running release install script" echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}" sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR_URL##*/}"

View File

@@ -31,7 +31,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
PORTAL_NET: $(yaml-quote ${PORTAL_NET}) SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false}) ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false}) ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})
@@ -64,7 +64,7 @@ ENV_TIMESTAMP=$(yaml-quote $(date -u +%Y-%m-%dT%T%z))
INSTANCE_PREFIX=$(yaml-quote ${INSTANCE_PREFIX}) INSTANCE_PREFIX=$(yaml-quote ${INSTANCE_PREFIX})
NODE_INSTANCE_PREFIX=$(yaml-quote ${NODE_INSTANCE_PREFIX}) NODE_INSTANCE_PREFIX=$(yaml-quote ${NODE_INSTANCE_PREFIX})
SERVER_BINARY_TAR_URL=$(yaml-quote ${SERVER_BINARY_TAR_URL}) SERVER_BINARY_TAR_URL=$(yaml-quote ${SERVER_BINARY_TAR_URL})
PORTAL_NET=$(yaml-quote ${PORTAL_NET}) SERVICE_CLUSTER_IP_RANGE=$(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
ENABLE_CLUSTER_MONITORING=$(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_CLUSTER_MONITORING=$(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING=$(yaml-quote ${ENABLE_NODE_MONITORING:-false}) ENABLE_NODE_MONITORING=$(yaml-quote ${ENABLE_NODE_MONITORING:-false})
ENABLE_CLUSTER_LOGGING=$(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false}) ENABLE_CLUSTER_LOGGING=$(yaml-quote ${ENABLE_CLUSTER_LOGGING:-false})
@@ -121,12 +121,11 @@ function create-master-instance {
--image "${MASTER_IMAGE}" \ --image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \ --tags "${MASTER_TAG}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--scopes "storage-ro" "compute-rw" \ --scopes "storage-ro,compute-rw" \
--can-ip-forward \ --can-ip-forward \
--metadata-from-file \ --metadata-from-file \
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \ "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh,kube-env=${KUBE_TEMP}/master-kube-env.yaml" \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml" \ --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
--disk name="${MASTER_NAME}-pd" device-name=master-pd mode=rw boot=no auto-delete=no
} }
# TODO(dawnchen): Check $CONTAINER_RUNTIME to decide which # TODO(dawnchen): Check $CONTAINER_RUNTIME to decide which

View File

@@ -29,7 +29,7 @@ NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16}) CLUSTER_IP_RANGE: $(yaml-quote ${CLUSTER_IP_RANGE:-10.244.0.0/16})
SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL}) SERVER_BINARY_TAR_URL: $(yaml-quote ${SERVER_BINARY_TAR_URL})
SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL}) SALT_TAR_URL: $(yaml-quote ${SALT_TAR_URL})
PORTAL_NET: $(yaml-quote ${PORTAL_NET}) SERVICE_CLUSTER_IP_RANGE: $(yaml-quote ${SERVICE_CLUSTER_IP_RANGE})
ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false}) ALLOCATE_NODE_CIDRS: $(yaml-quote ${ALLOCATE_NODE_CIDRS:-false})
ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none}) ENABLE_CLUSTER_MONITORING: $(yaml-quote ${ENABLE_CLUSTER_MONITORING:-none})
ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false}) ENABLE_NODE_MONITORING: $(yaml-quote ${ENABLE_NODE_MONITORING:-false})
@@ -99,12 +99,11 @@ function create-master-instance {
--image "${MASTER_IMAGE}" \ --image "${MASTER_IMAGE}" \
--tags "${MASTER_TAG}" \ --tags "${MASTER_TAG}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--scopes "storage-ro" "compute-rw" \ --scopes "storage-ro,compute-rw" \
--can-ip-forward \ --can-ip-forward \
--metadata-from-file \ --metadata-from-file \
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \ "startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh,kube-env=${KUBE_TEMP}/master-kube-env.yaml" \
"kube-env=${KUBE_TEMP}/master-kube-env.yaml" \ --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
--disk name="${MASTER_NAME}-pd" device-name=master-pd mode=rw boot=no auto-delete=no
} }
# TODO(mbforbes): Make $1 required. # TODO(mbforbes): Make $1 required.

View File

@@ -159,7 +159,7 @@ function upgrade-nodes() {
# TODO(mbforbes): Refactor setting scope flags. # TODO(mbforbes): Refactor setting scope flags.
local -a scope_flags=() local -a scope_flags=()
if (( "${#MINION_SCOPES[@]}" > 0 )); then if (( "${#MINION_SCOPES[@]}" > 0 )); then
scope_flags=("--scopes" "${MINION_SCOPES[@]}") scope_flags=("--scopes" "$(join_csv ${MINION_SCOPES[@]})")
else else
scope_flags=("--no-scopes") scope_flags=("--no-scopes")
fi fi

View File

@@ -36,6 +36,10 @@ ALLOCATE_NODE_CIDRS=true
KUBE_PROMPT_FOR_UPDATE=y KUBE_PROMPT_FOR_UPDATE=y
KUBE_SKIP_UPDATE=${KUBE_SKIP_UPDATE-"n"} KUBE_SKIP_UPDATE=${KUBE_SKIP_UPDATE-"n"}
function join_csv {
local IFS=','; echo "$*";
}
# Verify prereqs # Verify prereqs
function verify-prereqs { function verify-prereqs {
local cmd local cmd
@@ -145,10 +149,10 @@ function already-staged() {
local -r file=$1 local -r file=$1
local -r newsum=$2 local -r newsum=$2
[[ -e "${file}.sha1" ]] || return 1 [[ -e "${file}.uploaded.sha1" ]] || return 1
local oldsum local oldsum
oldsum=$(cat "${file}.sha1") oldsum=$(cat "${file}.uploaded.sha1")
[[ "${oldsum}" == "${newsum}" ]] [[ "${oldsum}" == "${newsum}" ]]
} }
@@ -166,6 +170,7 @@ function copy-if-not-staged() {
echo "${server_hash}" > "${tar}.sha1" echo "${server_hash}" > "${tar}.sha1"
gsutil -m -q -h "Cache-Control:private, max-age=0" cp "${tar}" "${tar}.sha1" "${staging_path}" gsutil -m -q -h "Cache-Control:private, max-age=0" cp "${tar}" "${tar}.sha1" "${staging_path}"
gsutil -m acl ch -g all:R "${gs_url}" "${gs_url}.sha1" >/dev/null 2>&1 gsutil -m acl ch -g all:R "${gs_url}" "${gs_url}.sha1" >/dev/null 2>&1
echo "${server_hash}" > "${tar}.uploaded.sha1"
fi fi
} }
@@ -363,7 +368,7 @@ function create-firewall-rule {
--network "${NETWORK}" \ --network "${NETWORK}" \
--source-ranges "$2" \ --source-ranges "$2" \
--target-tags "$3" \ --target-tags "$3" \
--allow tcp udp icmp esp ah sctp; then --allow tcp,udp,icmp,esp,ah,sctp; then
if (( attempt > 5 )); then if (( attempt > 5 )); then
echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}" echo -e "${color_red}Failed to create firewall rule $1 ${color_norm}"
exit 2 exit 2
@@ -459,7 +464,7 @@ function add-instance-metadata-from-file {
if ! gcloud compute instances add-metadata "${instance}" \ if ! gcloud compute instances add-metadata "${instance}" \
--project "${PROJECT}" \ --project "${PROJECT}" \
--zone "${ZONE}" \ --zone "${ZONE}" \
--metadata-from-file $(IFS=, ; echo "${kvs[*]}"); then --metadata-from-file "$(join_csv ${kvs[@]})"; then
if (( attempt > 5 )); then if (( attempt > 5 )); then
echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}" echo -e "${color_red}Failed to add instance metadata in ${instance} ${color_norm}"
exit 2 exit 2
@@ -575,7 +580,7 @@ function kube-up {
--project "${PROJECT}" \ --project "${PROJECT}" \
--network "${NETWORK}" \ --network "${NETWORK}" \
--source-ranges "10.0.0.0/8" \ --source-ranges "10.0.0.0/8" \
--allow "tcp:1-65535" "udp:1-65535" "icmp" & --allow "tcp:1-65535,udp:1-65535,icmp" &
fi fi
if ! gcloud compute firewall-rules describe --project "${PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then if ! gcloud compute firewall-rules describe --project "${PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then
@@ -637,7 +642,7 @@ function kube-up {
# TODO(mbforbes): Refactor setting scope flags. # TODO(mbforbes): Refactor setting scope flags.
local -a scope_flags=() local -a scope_flags=()
if (( "${#MINION_SCOPES[@]}" > 0 )); then if (( "${#MINION_SCOPES[@]}" > 0 )); then
scope_flags=("--scopes" "${MINION_SCOPES[@]}") scope_flags=("--scopes" "$(join_csv ${MINION_SCOPES[@]})")
else else
scope_flags=("--no-scopes") scope_flags=("--no-scopes")
fi fi
@@ -665,8 +670,18 @@ function kube-up {
echo " up." echo " up."
echo echo
# curl in mavericks is borked.
secure=""
if which sw_vers > /dev/null; then
if [[ $(sw_vers | grep ProductVersion | awk '{print $2}') = "10.9."* ]]; then
secure="--insecure"
fi
fi
until curl --cacert "${CERT_DIR}/pki/ca.crt" \ until curl --cacert "${CERT_DIR}/pki/ca.crt" \
-H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \ -H "Authorization: Bearer ${KUBE_BEARER_TOKEN}" \
${secure} \
--max-time 5 --fail --output /dev/null --silent \ --max-time 5 --fail --output /dev/null --silent \
"https://${KUBE_MASTER_IP}/api/v1beta3/pods"; do "https://${KUBE_MASTER_IP}/api/v1beta3/pods"; do
printf "." printf "."

View File

@@ -167,7 +167,7 @@ function test-setup() {
# collisions here? # collisions here?
"${GCLOUD}" compute firewall-rules create \ "${GCLOUD}" compute firewall-rules create \
"${MINION_TAG}-${USER}-http-alt" \ "${MINION_TAG}-${USER}-http-alt" \
--allow tcp:80 tcp:8080 \ --allow tcp:80,tcp:8080 \
--project "${PROJECT}" \ --project "${PROJECT}" \
--target-tags "${MINION_TAG}" \ --target-tags "${MINION_TAG}" \
--network="${NETWORK}" --network="${NETWORK}"

View File

@@ -23,7 +23,7 @@
"command": [ "command": [
"/hyperkube", "/hyperkube",
"apiserver", "apiserver",
"--portal_net=10.0.0.1/24", "--service-cluster-ip-range=10.0.0.1/24",
"--address=0.0.0.0", "--address=0.0.0.0",
"--etcd_servers=http://127.0.0.1:4001", "--etcd_servers=http://127.0.0.1:4001",
"--cluster_name=kubernetes", "--cluster_name=kubernetes",

View File

@@ -23,7 +23,7 @@
"command": [ "command": [
"/hyperkube", "/hyperkube",
"apiserver", "apiserver",
"--portal_net=10.0.0.1/24", "--service-cluster-ip-range=10.0.0.1/24",
"--address=127.0.0.1", "--address=127.0.0.1",
"--etcd_servers=http://127.0.0.1:4001", "--etcd_servers=http://127.0.0.1:4001",
"--cluster_name=kubernetes", "--cluster_name=kubernetes",

View File

@@ -11,7 +11,7 @@ exec /usr/local/bin/apiserver \
--address=%(api_bind_address)s \ --address=%(api_bind_address)s \
--etcd_servers=%(etcd_servers)s \ --etcd_servers=%(etcd_servers)s \
--logtostderr=true \ --logtostderr=true \
--portal_net=10.244.240.0/20 --service-cluster-ip-range=10.244.240.0/20

View File

@@ -59,7 +59,7 @@ case "$(uname -m)" in
host_arch=arm host_arch=arm
;; ;;
i?86*) i?86*)
host_arch=x86 host_arch=386
;; ;;
*) *)
echo "Unsupported host arch. Must be x86_64, 386 or arm." >&2 echo "Unsupported host arch. Must be x86_64, 386 or arm." >&2

View File

@@ -46,7 +46,7 @@ for ((i=0; i < NUM_MINIONS; i++)) do
done done
MINION_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET MINION_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET
PORTAL_NET=10.11.0.0/16 SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET
# Optional: Install node monitoring. # Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true ENABLE_NODE_MONITORING=true

View File

@@ -1,44 +1,37 @@
apiVersion: v1beta3
kind: ReplicationController kind: ReplicationController
apiVersion: v1beta1 metadata:
id: skydns labels:
k8s-app: skydns
name: skydns
namespace: default namespace: default
labels: spec:
k8s-app: skydns
desiredState:
replicas: ${DNS_REPLICAS} replicas: ${DNS_REPLICAS}
replicaSelector: selector:
k8s-app: skydns k8s-app: skydns
podTemplate: template:
metadata:
labels: labels:
k8s-app: skydns k8s-app: skydns
desiredState: spec:
manifest:
version: v1beta2
id: skydns
dnsPolicy: "Default" # Don't use cluster DNS.
containers: containers:
- name: etcd - args:
- \"/etcd\"
- \"-bind-addr=127.0.0.1\"
- \"-peer-bind-addr=127.0.0.1\"
image: quay.io/coreos/etcd:latest image: quay.io/coreos/etcd:latest
command: [ name: etcd
\"/etcd\", - args:
\"-bind-addr=127.0.0.1\", - \"-domain=${DNS_DOMAIN}\"
\"-peer-bind-addr=127.0.0.1\",
]
- name: kube2sky
image: kubernetes/kube2sky:1.0 image: kubernetes/kube2sky:1.0
command: [ name: kube2sky
# entrypoint = \"/kube2sky\", - args:
\"-domain=${DNS_DOMAIN}\", - \"-machines=http://localhost:4001\"
] - \"-addr=0.0.0.0:53\"
- name: skydns - \"-domain=${DNS_DOMAIN}.\"
image: kubernetes/skydns:2014-12-23-001 image: kubernetes/skydns:2014-12-23-001
command: [ name: skydns
# entrypoint = \"/skydns\",
\"-machines=http://localhost:4001\",
\"-addr=0.0.0.0:53\",
\"-domain=${DNS_DOMAIN}.\",
]
ports: ports:
- name: dns - containerPort: 53
containerPort: 53 name: dns
protocol: UDP protocol: UDP

View File

@@ -1,12 +1,15 @@
apiVersion: v1beta3
kind: Service kind: Service
apiVersion: v1beta1 metadata:
id: skydns
namespace: default
protocol: UDP
port: 53
portalIP: ${DNS_SERVER_IP}
containerPort: 53
labels: labels:
k8s-app: skydns k8s-app: skydns
name: skydns
namespace: default
spec:
portalIP: ${DNS_SERVER_IP}
ports:
- port: 53
protocol: UDP
targetPort: 53
selector: selector:
k8s-app: skydns k8s-app: skydns

View File

@@ -18,7 +18,7 @@ coreos:
--port=8080 \ --port=8080 \
--etcd_servers=http://127.0.0.1:4001 \ --etcd_servers=http://127.0.0.1:4001 \
--kubelet_port=10250 \ --kubelet_port=10250 \
--portal_net=${PORTAL_NET} --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}
Restart=always Restart=always
RestartSec=2 RestartSec=2

View File

@@ -93,7 +93,7 @@ coreos:
--etcd_servers=http://127.0.0.1:4001 \ --etcd_servers=http://127.0.0.1:4001 \
--logtostderr=true \ --logtostderr=true \
--port=8080 \ --port=8080 \
--portal_net=PORTAL_NET \ --service-cluster-ip-range=SERVICE_CLUSTER_IP_RANGE \
--token-auth-file=/var/lib/kube-apiserver/known_tokens.csv \ --token-auth-file=/var/lib/kube-apiserver/known_tokens.csv \
--v=2 --v=2
Restart=always Restart=always

View File

@@ -36,7 +36,7 @@ RAX_NUM_MINIONS="${RAX_NUM_MINIONS-4}"
MINION_TAG="tags=${INSTANCE_PREFIX}-minion" MINION_TAG="tags=${INSTANCE_PREFIX}-minion"
MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}})) MINION_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${RAX_NUM_MINIONS}}))
KUBE_NETWORK="10.240.0.0/16" KUBE_NETWORK="10.240.0.0/16"
PORTAL_NET="10.0.0.0/16" SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
# Optional: Install node monitoring. # Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true ENABLE_NODE_MONITORING=true

View File

@@ -164,7 +164,7 @@ rax-boot-master() {
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \ -e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\\&}|" \
-e "s|KUBE_USER|${KUBE_USER}|" \ -e "s|KUBE_USER|${KUBE_USER}|" \
-e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \ -e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \
-e "s|PORTAL_NET|${PORTAL_NET}|" \ -e "s|SERVICE_CLUSTER_IP_RANGE|${SERVICE_CLUSTER_IP_RANGE}|" \
-e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \ -e "s|OS_AUTH_URL|${OS_AUTH_URL}|" \
-e "s|OS_USERNAME|${OS_USERNAME}|" \ -e "s|OS_USERNAME|${OS_USERNAME}|" \
-e "s|OS_PASSWORD|${OS_PASSWORD}|" \ -e "s|OS_PASSWORD|${OS_PASSWORD}|" \

View File

@@ -36,9 +36,9 @@
{% set etcd_servers = "--etcd_servers=http://127.0.0.1:4001" -%} {% set etcd_servers = "--etcd_servers=http://127.0.0.1:4001" -%}
{% set portal_net = "" -%} {% set service_cluster_ip_range = "" -%}
{% if pillar['portal_net'] is defined -%} {% if pillar['service_cluster_ip_range'] is defined -%}
{% set portal_net = "--portal_net=" + pillar['portal_net'] -%} {% set service_cluster_ip_range = "--service-cluster-ip-range=" + pillar['service_cluster_ip_range'] -%}
{% endif -%} {% endif -%}
{% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%} {% set cert_file = "--tls_cert_file=/srv/kubernetes/server.cert" -%}
@@ -74,7 +74,7 @@
{% set runtime_config = "--runtime_config=" + grains.runtime_config -%} {% set runtime_config = "--runtime_config=" + grains.runtime_config -%}
{% endif -%} {% endif -%}
{% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + portal_net + " " + client_ca_file + " " + basic_auth_file -%} {% set params = address + " " + etcd_servers + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + service_cluster_ip_range + " " + client_ca_file + " " + basic_auth_file -%}
{% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure_port=" + secure_port + " " + token_auth_file + " " + publicAddressOverride + " " + pillar['log_level'] -%} {% set params = params + " " + cluster_name + " " + cert_file + " " + key_file + " --secure_port=" + secure_port + " " + token_auth_file + " " + publicAddressOverride + " " + pillar['log_level'] -%}

View File

@@ -40,6 +40,13 @@ DAEMON_USER=root
# #
do_start() do_start()
{ {
# Avoid a potential race at boot time when both monit and init.d start
# the same service
PIDS=$(pidof $DAEMON)
for PID in ${PIDS}; do
kill -9 $PID
done
# Raise the file descriptor limit - we expect to open a lot of sockets! # Raise the file descriptor limit - we expect to open a lot of sockets!
ulimit -n 65536 ulimit -n 65536

View File

@@ -58,10 +58,12 @@
{% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%} {% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%}
{% endif -%} {% endif -%}
# Run containers under the root cgroup. # Run containers under the root cgroup and create a system container.
{% set system_container = "" -%}
{% set cgroup_root = "" -%} {% set cgroup_root = "" -%}
{% if grains['os_family'] == 'Debian' -%} {% if grains['os_family'] == 'Debian' -%}
{% set system_container = "--system-container=/system" -%}
{% set cgroup_root = "--cgroup_root=/" -%} {% set cgroup_root = "--cgroup_root=/" -%}
{% endif -%} {% endif -%}
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{configure_cbr0}} {{cgroup_root}}" DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}}"

View File

@@ -39,6 +39,13 @@ DAEMON_USER=root
# #
do_start() do_start()
{ {
# Avoid a potential race at boot time when both monit and init.d start
# the same service
PIDS=$(pidof $DAEMON)
for PID in ${PIDS}; do
kill -9 $PID
done
# Return # Return
# 0 if daemon has been started # 0 if daemon has been started
# 1 if daemon was already running # 1 if daemon was already running

View File

@@ -23,10 +23,10 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
export roles=("ai" "i" "i") export roles=("ai" "i" "i")
# Define minion numbers # Define minion numbers
export NUM_MINIONS=${NUM_MINIONS:-3} export NUM_MINIONS=${NUM_MINIONS:-3}
# define the IP range used for service portal. # define the IP range used for service cluster IPs.
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. # according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
export PORTAL_NET=192.168.3.0/24 export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24 # formerly PORTAL_NET
# define the IP range used for flannel overlay network, should not conflict with above PORTAL_NET range # define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
export FLANNEL_NET=172.16.0.0/16 export FLANNEL_NET=172.16.0.0/16
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
@@ -52,7 +52,7 @@ DOCKER_OPTS=""
# Optional: Install cluster DNS. # Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS=true ENABLE_CLUSTER_DNS=true
# DNS_SERVER_IP must be a IP in PORTAL_NET range # DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE
DNS_SERVER_IP="192.168.3.10" DNS_SERVER_IP="192.168.3.10"
DNS_DOMAIN="cluster.local" DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1 DNS_REPLICAS=1

View File

@@ -210,7 +210,7 @@ KUBE_APISERVER_OPTS="--address=0.0.0.0 \
--port=8080 \ --port=8080 \
--etcd_servers=http://127.0.0.1:4001 \ --etcd_servers=http://127.0.0.1:4001 \
--logtostderr=true \ --logtostderr=true \
--portal_net=${1}" --service-cluster-ip-range=${1}"
EOF EOF
} }
@@ -377,7 +377,7 @@ function provision-master() {
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \ setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
create-kube-apiserver-opts "${PORTAL_NET}"; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \ create-kube-scheduler-opts; \
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \ sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
@@ -416,7 +416,7 @@ function provision-masterandminion() {
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \ setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
create-kube-apiserver-opts "${PORTAL_NET}"; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \ create-kube-scheduler-opts; \
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";

View File

@@ -43,14 +43,14 @@ for ((i=0; i < NUM_MINIONS; i++)) do
VAGRANT_MINION_NAMES[$i]="minion-$((i+1))" VAGRANT_MINION_NAMES[$i]="minion-$((i+1))"
done done
PORTAL_NET=10.247.0.0/16 SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET
# Since this isn't exposed on the network, default to a simple user/passwd # Since this isn't exposed on the network, default to a simple user/passwd
MASTER_USER=vagrant MASTER_USER=vagrant
MASTER_PASSWD=vagrant MASTER_PASSWD=vagrant
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
# Optional: Install node monitoring. # Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true ENABLE_NODE_MONITORING=true

View File

@@ -85,7 +85,7 @@ EOF
mkdir -p /srv/salt-overlay/pillar mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")' service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")' cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")' enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")' enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'

View File

@@ -127,7 +127,7 @@ function create-provision-scripts {
echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'" echo "CONTAINER_ADDR='${MASTER_CONTAINER_ADDR}'"
echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'" echo "MINION_CONTAINER_NETMASKS='${MINION_CONTAINER_NETMASKS[@]}'"
echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})" echo "MINION_CONTAINER_SUBNETS=(${MINION_CONTAINER_SUBNETS[@]})"
echo "PORTAL_NET='${PORTAL_NET}'" echo "SERVICE_CLUSTER_IP_RANGE='${SERVICE_CLUSTER_IP_RANGE}'"
echo "MASTER_USER='${MASTER_USER}'" echo "MASTER_USER='${MASTER_USER}'"
echo "MASTER_PASSWD='${MASTER_PASSWD}'" echo "MASTER_PASSWD='${MASTER_PASSWD}'"
echo "ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'" echo "ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"

View File

@@ -45,7 +45,8 @@ while true; do
if (( ${found} == "${NUM_MINIONS}" )) && (( ${ready} == "${NUM_MINIONS}")); then if (( ${found} == "${NUM_MINIONS}" )) && (( ${ready} == "${NUM_MINIONS}")); then
break break
else else
if (( attempt > 20 )); then # Set the timeout to ~10minutes (40 x 15 second) to avoid timeouts for 100-node clusters.
if (( attempt > 40 )); then
echo -e "${color_red}Detected ${ready} ready nodes, found ${found} nodes out of expected ${NUM_MINIONS}. Your cluster may not be working.${color_norm}" echo -e "${color_red}Detected ${ready} ready nodes, found ${found} nodes out of expected ${NUM_MINIONS}. Your cluster may not be working.${color_norm}"
cat -n "${MINIONS_FILE}" cat -n "${MINIONS_FILE}"
exit 2 exit 2

Some files were not shown because too many files have changed in this diff Show More