cloudprovider/gce: use golang.org/x/oauth2

code.google.com/p/goauth2 is deprecated.
use golang.org/x/oauth2 instead.

hooks/prepare-commit-msg ignore Godeps
for sh's boilerplate check.
This commit is contained in:
Fumitoshi Ukai 2015-01-15 16:17:28 +09:00
parent b1f5b3f287
commit 54f498acd5
181 changed files with 47310 additions and 3074 deletions

36
Godeps/Godeps.json generated
View File

@ -1,6 +1,6 @@
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes",
"GoVersion": "go1.3",
"GoVersion": "go1.4",
"Packages": [
"./..."
],
@ -14,16 +14,6 @@
"Comment": "null-12",
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
},
{
"ImportPath": "code.google.com/p/goauth2/compute/serviceaccount",
"Comment": "weekly-50",
"Rev": "7fc9d958c83464bd7650240569bf93a102266e6a"
},
{
"ImportPath": "code.google.com/p/goauth2/oauth",
"Comment": "weekly-50",
"Rev": "7fc9d958c83464bd7650240569bf93a102266e6a"
},
{
"ImportPath": "code.google.com/p/google-api-go-client/compute/v1",
"Comment": "release-96",
@ -115,6 +105,10 @@
"ImportPath": "github.com/golang/glog",
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
},
{
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "7f07925444bb51fa4cf9dfe6f7661876f8852275"
},
{
"ImportPath": "github.com/google/cadvisor/client",
"Comment": "0.6.2",
@ -146,10 +140,6 @@
"ImportPath": "github.com/mitchellh/goamz/ec2",
"Rev": "703cfb45985762869e465f37ed030ff01615ff1e"
},
{
"ImportPath": "github.com/mitchellh/goamz/elb",
"Rev": "703cfb45985762869e465f37ed030ff01615ff1e"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
@ -209,6 +199,22 @@
"ImportPath": "golang.org/x/net/websocket",
"Rev": "cbcac7bb8415db9b6cb4d1ebab1dc9afbd688b97"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "2e66694fea36dc820636630792a55cdc6987e05b"
},
{
"ImportPath": "google.golang.org/appengine",
"Rev": "6aa67407028217c352e215f5af320a429d0bcf5f"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "2e43671e4ad874a7bca65746ff3edb38e6e93762"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "2e43671e4ad874a7bca65746ff3edb38e6e93762"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4"

View File

@ -1,172 +0,0 @@
// Copyright 2013 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package serviceaccount provides support for making OAuth2-authorized
// HTTP requests from Google Compute Engine instances using service accounts.
//
// See: https://developers.google.com/compute/docs/authentication
//
// Example usage:
//
// client, err := serviceaccount.NewClient(&serviceaccount.Options{})
// if err != nil {
// c.Errorf("failed to create service account client: %q", err)
// return err
// }
// client.Post("https://www.googleapis.com/compute/...", ...)
// client.Post("https://www.googleapis.com/bigquery/...", ...)
//
package serviceaccount
import (
"encoding/json"
"net/http"
"net/url"
"path"
"sync"
"time"
"code.google.com/p/goauth2/oauth"
)
const (
metadataServer = "metadata"
serviceAccountPath = "/computeMetadata/v1/instance/service-accounts"
)
// Options configures a service account Client.
type Options struct {
// Underlying transport of service account Client.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
// Service account name.
// If empty, "default" is used.
Account string
}
// NewClient returns an *http.Client authorized with the service account
// configured in the Google Compute Engine instance.
func NewClient(opt *Options) (*http.Client, error) {
tr := http.DefaultTransport
account := "default"
if opt != nil {
if opt.Transport != nil {
tr = opt.Transport
}
if opt.Account != "" {
account = opt.Account
}
}
t := &transport{
Transport: tr,
Account: account,
}
// Get the initial access token.
if _, err := fetchToken(t); err != nil {
return nil, err
}
return &http.Client{
Transport: t,
}, nil
}
type tokenData struct {
AccessToken string `json:"access_token"`
ExpiresIn float64 `json:"expires_in"`
TokenType string `json:"token_type"`
}
// transport is an oauth.Transport with a custom Refresh and RoundTrip implementation.
type transport struct {
Transport http.RoundTripper
Account string
mu sync.Mutex
*oauth.Token
}
// Refresh renews the transport's AccessToken.
// t.mu sould be held when this is called.
func (t *transport) refresh() error {
// https://developers.google.com/compute/docs/metadata#transitioning
// v1 requires "Metadata-Flavor: Google" header.
tokenURL := &url.URL{
Scheme: "http",
Host: metadataServer,
Path: path.Join(serviceAccountPath, t.Account, "token"),
}
req, err := http.NewRequest("GET", tokenURL.String(), nil)
if err != nil {
return err
}
req.Header.Add("Metadata-Flavor", "Google")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
d := json.NewDecoder(resp.Body)
var token tokenData
err = d.Decode(&token)
if err != nil {
return err
}
t.Token = &oauth.Token{
AccessToken: token.AccessToken,
Expiry: time.Now().Add(time.Duration(token.ExpiresIn) * time.Second),
}
return nil
}
// Refresh renews the transport's AccessToken.
func (t *transport) Refresh() error {
t.mu.Lock()
defer t.mu.Unlock()
return t.refresh()
}
// Fetch token from cache or generate a new one if cache miss or expired.
func fetchToken(t *transport) (*oauth.Token, error) {
// Get a new token using Refresh in case of a cache miss of if it has expired.
t.mu.Lock()
defer t.mu.Unlock()
if t.Token == nil || t.Expired() {
if err := t.refresh(); err != nil {
return nil, err
}
}
return t.Token, nil
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
// RoundTrip issues an authorized HTTP request and returns its response.
func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
token, err := fetchToken(t)
if err != nil {
return nil, err
}
// To set the Authorization header, we must make a copy of the Request
// so that we don't modify the Request we were given.
// This is required by the specification of http.RoundTripper.
newReq := cloneRequest(req)
newReq.Header.Set("Authorization", "Bearer "+token.AccessToken)
// Make the HTTP request.
return t.Transport.RoundTrip(newReq)
}

View File

@ -1,100 +0,0 @@
// Copyright 2011 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This program makes a call to the specified API, authenticated with OAuth2.
// a list of example APIs can be found at https://code.google.com/oauthplayground/
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"code.google.com/p/goauth2/oauth"
)
var (
clientId = flag.String("id", "", "Client ID")
clientSecret = flag.String("secret", "", "Client Secret")
scope = flag.String("scope", "https://www.googleapis.com/auth/userinfo.profile", "OAuth scope")
redirectURL = flag.String("redirect_url", "oob", "Redirect URL")
authURL = flag.String("auth_url", "https://accounts.google.com/o/oauth2/auth", "Authentication URL")
tokenURL = flag.String("token_url", "https://accounts.google.com/o/oauth2/token", "Token URL")
requestURL = flag.String("request_url", "https://www.googleapis.com/oauth2/v1/userinfo", "API request")
code = flag.String("code", "", "Authorization Code")
cachefile = flag.String("cache", "cache.json", "Token cache file")
)
const usageMsg = `
To obtain a request token you must specify both -id and -secret.
To obtain Client ID and Secret, see the "OAuth 2 Credentials" section under
the "API Access" tab on this page: https://code.google.com/apis/console/
Once you have completed the OAuth flow, the credentials should be stored inside
the file specified by -cache and you may run without the -id and -secret flags.
`
func main() {
flag.Parse()
// Set up a configuration.
config := &oauth.Config{
ClientId: *clientId,
ClientSecret: *clientSecret,
RedirectURL: *redirectURL,
Scope: *scope,
AuthURL: *authURL,
TokenURL: *tokenURL,
TokenCache: oauth.CacheFile(*cachefile),
}
// Set up a Transport using the config.
transport := &oauth.Transport{Config: config}
// Try to pull the token from the cache; if this fails, we need to get one.
token, err := config.TokenCache.Token()
if err != nil {
if *clientId == "" || *clientSecret == "" {
flag.Usage()
fmt.Fprint(os.Stderr, usageMsg)
os.Exit(2)
}
if *code == "" {
// Get an authorization code from the data provider.
// ("Please ask the user if I can access this resource.")
url := config.AuthCodeURL("")
fmt.Print("Visit this URL to get a code, then run again with -code=YOUR_CODE\n\n")
fmt.Println(url)
return
}
// Exchange the authorization code for an access token.
// ("Here's the code you gave the user, now give me a token!")
token, err = transport.Exchange(*code)
if err != nil {
log.Fatal("Exchange:", err)
}
// (The Exchange method will automatically cache the token.)
fmt.Printf("Token is cached in %v\n", config.TokenCache)
}
// Make the actual request using the cached token to authenticate.
// ("Here's the token, let me in!")
transport.Token = token
// Make the request.
r, err := transport.Client().Get(*requestURL)
if err != nil {
log.Fatal("Get:", err)
}
defer r.Body.Close()
// Write the response to standard output.
io.Copy(os.Stdout, r.Body)
// Send final carriage return, just to be neat.
fmt.Println()
}

View File

@ -1 +0,0 @@
{"web":{"auth_uri":"https://accounts.google.com/o/oauth2/auth","token_uri":"https://accounts.google.com/o/oauth2/token","client_email":"XXXXXXXXXXXX@developer.gserviceaccount.com","client_x509_cert_url":"https://www.googleapis.com/robot/v1/metadata/x509/XXXXXXXXXXXX@developer.gserviceaccount.com","client_id":"XXXXXXXXXXXX.apps.googleusercontent.com","auth_provider_x509_cert_url":"https://www.googleapis.com/oauth2/v1/certs"}}

View File

@ -1,20 +0,0 @@
Bag Attributes
friendlyName: privatekey
localKeyID: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
Key Attributes: <No Attributes>
-----BEGIN PRIVATE KEY-----
XXXXxyXXXXXXXxxyxxxX9y0XXYXXXXYXXxXyxxXxXxXXXyXXXXx4yx1xy1xyYxxY
1XxYy38YxXxxxyXxyyxx+xxxxyx1Y1xYx7yx2/Y1XyyXYYYxY5YXxX0xY/Y642yX
zYYxYXzXYxY0Y8y9YxyYXxxX40YyXxxXX4XXxx7XxXxxXyXxYYXxXyxX5XY0Yy2X
1YX0XXyy6YXyXx9XxXxyXX9XXYXxXxXXXXXXxYXYY3Y8Yy311XYYY81XyY14Xyyx
xXyx7xxXXXxxxxyyyX4YYYXyYyYXyxX4XYXYyxXYyx9xy23xXYyXyxYxXxx1XXXY
y98yX6yYxyyyX4Xyx1Xy/0yxxYxXxYYx2xx7yYXXXxYXXXxyXyyYYxx5XX2xxyxy
y6Yyyx0XX3YYYyx9YYXXXX7y0yxXXy+90XYz1y2xyx7yXxX+8X0xYxXXYxxyxYYy
YXx8Yy4yX0Xyxxx6yYX92yxy1YYYzyyyyxy55x/yyXXXYYXYXXzXXxYYxyXY8XXX
+y9+yXxX7XxxyYYxxXYxyY623xxXxYX59x5Y6yYyXYY4YxXXYXXXYxXYxXxXXx6x
YXX7XxXX2X0XY7YXyYy1XXxYXxXxYY1xXXxxxyy+07zXYxYxxXyyxxyxXx1XYy5X
5XYzyxYxXXYyX9XX7xX8xXxx+XXYyYXXXX5YY1x8Yxyx54Xy/1XXyyYXY5YxYyxY
XyyxXyX/YxxXXXxXXYXxyxx63xX/xxyYXXyYzx0XY+YxX5xyYyyxxxXXYX/94XXy
Xx63xYxXyXY3/XXxyyXX15XXXyz08XYY5YYXY/YXy/96x68XyyXXxYyXy4xYXx5x
7yxxyxxYxXxyx3y=
-----END PRIVATE KEY-----

View File

@ -1,114 +0,0 @@
// Copyright 2011 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This program makes a read only call to the Google Cloud Storage API,
// authenticated with OAuth2. A list of example APIs can be found at
// https://code.google.com/oauthplayground/
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
"code.google.com/p/goauth2/oauth/jwt"
)
const scope = "https://www.googleapis.com/auth/devstorage.read_only"
var (
secretsFile = flag.String("s", "", "JSON encoded secrets for the service account")
pemFile = flag.String("k", "", "private pem key file for the service account")
)
const usageMsg = `
You must specify -k and -s.
To obtain client secrets and pem, see the "OAuth 2 Credentials" section under
the "API Access" tab on this page: https://code.google.com/apis/console/
Google Cloud Storage must also be turned on in the API console.
`
func main() {
flag.Parse()
if *secretsFile == "" || *pemFile == "" {
flag.Usage()
fmt.Println(usageMsg)
return
}
// Read the secret file bytes into the config.
secretBytes, err := ioutil.ReadFile(*secretsFile)
if err != nil {
log.Fatal("error reading secerets file:", err)
}
var config struct {
Web struct {
ClientEmail string `json:"client_email"`
ClientID string `json:"client_id"`
TokenURI string `json:"token_uri"`
}
}
err = json.Unmarshal(secretBytes, &config)
if err != nil {
log.Fatal("error unmarshalling secerets:", err)
}
// Get the project ID from the client ID.
projectID := strings.SplitN(config.Web.ClientID, "-", 2)[0]
// Read the pem file bytes for the private key.
keyBytes, err := ioutil.ReadFile(*pemFile)
if err != nil {
log.Fatal("error reading private key file:", err)
}
// Craft the ClaimSet and JWT token.
t := jwt.NewToken(config.Web.ClientEmail, scope, keyBytes)
t.ClaimSet.Aud = config.Web.TokenURI
// We need to provide a client.
c := &http.Client{}
// Get the access token.
o, err := t.Assert(c)
if err != nil {
log.Fatal("assertion error:", err)
}
// Refresh token will be missing, but this access_token will be good
// for one hour.
fmt.Printf("access_token = %v\n", o.AccessToken)
fmt.Printf("refresh_token = %v\n", o.RefreshToken)
fmt.Printf("expires %v\n", o.Expiry)
// Form the request to list Google Cloud Storage buckets.
req, err := http.NewRequest("GET", "https://storage.googleapis.com/", nil)
if err != nil {
log.Fatal("http.NewRequest:", err)
}
req.Header.Set("Authorization", "OAuth "+o.AccessToken)
req.Header.Set("x-goog-api-version", "2")
req.Header.Set("x-goog-project-id", projectID)
// Make the request.
r, err := c.Do(req)
if err != nil {
log.Fatal("API request error:", err)
}
defer r.Body.Close()
// Write the response to standard output.
res, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Fatal("error reading API request results:", err)
}
fmt.Printf("\nRESULT:\n%s\n", res)
}

View File

@ -1,511 +0,0 @@
// Copyright 2012 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The jwt package provides support for creating credentials for OAuth2 service
// account requests.
//
// For examples of the package usage please see jwt_test.go.
// Example usage (error handling omitted for brevity):
//
// // Craft the ClaimSet and JWT token.
// iss := "XXXXXXXXXXXX@developer.gserviceaccount.com"
// scope := "https://www.googleapis.com/auth/devstorage.read_only"
// t := jwt.NewToken(iss, scope, pemKeyBytes)
//
// // We need to provide a client.
// c := &http.Client{}
//
// // Get the access token.
// o, _ := t.Assert(c)
//
// // Form the request to the service.
// req, _ := http.NewRequest("GET", "https://storage.googleapis.com/", nil)
// req.Header.Set("Authorization", "OAuth "+o.AccessToken)
// req.Header.Set("x-goog-api-version", "2")
// req.Header.Set("x-goog-project-id", "XXXXXXXXXXXX")
//
// // Make the request.
// result, _ := c.Do(req)
//
// For info on OAuth2 service accounts please see the online documentation.
// https://developers.google.com/accounts/docs/OAuth2ServiceAccount
//
package jwt
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"time"
"code.google.com/p/goauth2/oauth"
)
// These are the default/standard values for this to work for Google service accounts.
const (
stdAlgorithm = "RS256"
stdType = "JWT"
stdAssertionType = "http://oauth.net/grant_type/jwt/1.0/bearer"
stdGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
stdAud = "https://accounts.google.com/o/oauth2/token"
)
var (
ErrInvalidKey = errors.New("Invalid Key")
)
// base64Encode returns and Base64url encoded version of the input string with any
// trailing "=" stripped.
func base64Encode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// base64Decode decodes the Base64url encoded string
func base64Decode(s string) ([]byte, error) {
// add back missing padding
switch len(s) % 4 {
case 2:
s += "=="
case 3:
s += "="
}
return base64.URLEncoding.DecodeString(s)
}
// The JWT claim set contains information about the JWT including the
// permissions being requested (scopes), the target of the token, the issuer,
// the time the token was issued, and the lifetime of the token.
//
// Aud is usually https://accounts.google.com/o/oauth2/token
type ClaimSet struct {
Iss string `json:"iss"` // email address of the client_id of the application making the access token request
Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
Prn string `json:"prn,omitempty"` // email for which the application is requesting delegated access (Optional).
Exp int64 `json:"exp"`
Iat int64 `json:"iat"`
Typ string `json:"typ,omitempty"`
Sub string `json:"sub,omitempty"` // Add support for googleapi delegation support
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
// This array is marshalled using custom code (see (c *ClaimSet) encode()).
PrivateClaims map[string]interface{} `json:"-"`
exp time.Time
iat time.Time
}
// setTimes sets iat and exp to time.Now() and iat.Add(time.Hour) respectively.
//
// Note that these times have nothing to do with the expiration time for the
// access_token returned by the server. These have to do with the lifetime of
// the encoded JWT.
//
// A JWT can be re-used for up to one hour after it was encoded. The access
// token that is granted will also be good for one hour so there is little point
// in trying to use the JWT a second time.
func (c *ClaimSet) setTimes(t time.Time) {
c.iat = t
c.exp = c.iat.Add(time.Hour)
}
var (
jsonStart = []byte{'{'}
jsonEnd = []byte{'}'}
)
// encode returns the Base64url encoded form of the Signature.
func (c *ClaimSet) encode() string {
if c.exp.IsZero() || c.iat.IsZero() {
c.setTimes(time.Now())
}
if c.Aud == "" {
c.Aud = stdAud
}
c.Exp = c.exp.Unix()
c.Iat = c.iat.Unix()
b, err := json.Marshal(c)
if err != nil {
panic(err)
}
if len(c.PrivateClaims) == 0 {
return base64Encode(b)
}
// Marshal private claim set and then append it to b.
prv, err := json.Marshal(c.PrivateClaims)
if err != nil {
panic(fmt.Errorf("Invalid map of private claims %v", c.PrivateClaims))
}
// Concatenate public and private claim JSON objects.
if !bytes.HasSuffix(b, jsonEnd) {
panic(fmt.Errorf("Invalid JSON %s", b))
}
if !bytes.HasPrefix(prv, jsonStart) {
panic(fmt.Errorf("Invalid JSON %s", prv))
}
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
b = append(b, prv[1:]...) // Append private claims.
return base64Encode(b)
}
// Header describes the algorithm and type of token being generated,
// and optionally a KeyID describing additional parameters for the
// signature.
type Header struct {
Algorithm string `json:"alg"`
Type string `json:"typ"`
KeyId string `json:"kid,omitempty"`
}
func (h *Header) encode() string {
b, err := json.Marshal(h)
if err != nil {
panic(err)
}
return base64Encode(b)
}
// A JWT is composed of three parts: a header, a claim set, and a signature.
// The well formed and encoded JWT can then be exchanged for an access token.
//
// The Token is not a JWT, but is is encoded to produce a well formed JWT.
//
// When obtaining a key from the Google API console it will be downloaded in a
// PKCS12 encoding. To use this key you will need to convert it to a PEM file.
// This can be achieved with openssl.
//
// $ openssl pkcs12 -in <key.p12> -nocerts -passin pass:notasecret -nodes -out <key.pem>
//
// The contents of this file can then be used as the Key.
type Token struct {
ClaimSet *ClaimSet // claim set used to construct the JWT
Header *Header // header used to construct the JWT
Key []byte // PEM printable encoding of the private key
pKey *rsa.PrivateKey
header string
claim string
sig string
useExternalSigner bool
signer Signer
}
// NewToken returns a filled in *Token based on the standard header,
// and sets the Iat and Exp times based on when the call to Assert is
// made.
func NewToken(iss, scope string, key []byte) *Token {
c := &ClaimSet{
Iss: iss,
Scope: scope,
Aud: stdAud,
}
h := &Header{
Algorithm: stdAlgorithm,
Type: stdType,
}
t := &Token{
ClaimSet: c,
Header: h,
Key: key,
}
return t
}
// Signer is an interface that given a JWT token, returns the header &
// claim (serialized and urlEncoded to a byte slice), along with the
// signature and an error (if any occured). It could modify any data
// to sign (typically the KeyID).
//
// Example usage where a SHA256 hash of the original url-encoded token
// with an added KeyID and secret data is used as a signature:
//
// var privateData = "secret data added to hash, indexed by KeyID"
//
// type SigningService struct{}
//
// func (ss *SigningService) Sign(in *jwt.Token) (newTokenData, sig []byte, err error) {
// in.Header.KeyID = "signing service"
// newTokenData = in.EncodeWithoutSignature()
// dataToSign := fmt.Sprintf("%s.%s", newTokenData, privateData)
// h := sha256.New()
// _, err := h.Write([]byte(dataToSign))
// sig = h.Sum(nil)
// return
// }
type Signer interface {
Sign(in *Token) (tokenData, signature []byte, err error)
}
// NewSignerToken returns a *Token, using an external signer function
func NewSignerToken(iss, scope string, signer Signer) *Token {
t := NewToken(iss, scope, nil)
t.useExternalSigner = true
t.signer = signer
return t
}
// Expired returns a boolean value letting us know if the token has expired.
func (t *Token) Expired() bool {
return t.ClaimSet.exp.Before(time.Now())
}
// Encode constructs and signs a Token returning a JWT ready to use for
// requesting an access token.
func (t *Token) Encode() (string, error) {
var tok string
t.header = t.Header.encode()
t.claim = t.ClaimSet.encode()
err := t.sign()
if err != nil {
return tok, err
}
tok = fmt.Sprintf("%s.%s.%s", t.header, t.claim, t.sig)
return tok, nil
}
// EncodeWithoutSignature returns the url-encoded value of the Token
// before signing has occured (typically for use by external signers).
func (t *Token) EncodeWithoutSignature() string {
t.header = t.Header.encode()
t.claim = t.ClaimSet.encode()
return fmt.Sprintf("%s.%s", t.header, t.claim)
}
// sign computes the signature for a Token. The details for this can be found
// in the OAuth2 Service Account documentation.
// https://developers.google.com/accounts/docs/OAuth2ServiceAccount#computingsignature
func (t *Token) sign() error {
if t.useExternalSigner {
fulldata, sig, err := t.signer.Sign(t)
if err != nil {
return err
}
split := strings.Split(string(fulldata), ".")
if len(split) != 2 {
return errors.New("no token returned")
}
t.header = split[0]
t.claim = split[1]
t.sig = base64Encode(sig)
return err
}
ss := fmt.Sprintf("%s.%s", t.header, t.claim)
if t.pKey == nil {
err := t.parsePrivateKey()
if err != nil {
return err
}
}
h := sha256.New()
h.Write([]byte(ss))
b, err := rsa.SignPKCS1v15(rand.Reader, t.pKey, crypto.SHA256, h.Sum(nil))
t.sig = base64Encode(b)
return err
}
// parsePrivateKey converts the Token's Key ([]byte) into a parsed
// rsa.PrivateKey. If the key is not well formed this method will return an
// ErrInvalidKey error.
func (t *Token) parsePrivateKey() error {
block, _ := pem.Decode(t.Key)
if block == nil {
return ErrInvalidKey
}
parsedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return err
}
}
var ok bool
t.pKey, ok = parsedKey.(*rsa.PrivateKey)
if !ok {
return ErrInvalidKey
}
return nil
}
// Assert obtains an *oauth.Token from the remote server by encoding and sending
// a JWT. The access_token will expire in one hour (3600 seconds) and cannot be
// refreshed (no refresh_token is returned with the response). Once this token
// expires call this method again to get a fresh one.
func (t *Token) Assert(c *http.Client) (*oauth.Token, error) {
var o *oauth.Token
t.ClaimSet.setTimes(time.Now())
u, v, err := t.buildRequest()
if err != nil {
return o, err
}
resp, err := c.PostForm(u, v)
if err != nil {
return o, err
}
o, err = handleResponse(resp)
return o, err
}
// buildRequest sets up the URL values and the proper URL string for making our
// access_token request.
func (t *Token) buildRequest() (string, url.Values, error) {
v := url.Values{}
j, err := t.Encode()
if err != nil {
return t.ClaimSet.Aud, v, err
}
v.Set("grant_type", stdGrantType)
v.Set("assertion", j)
return t.ClaimSet.Aud, v, nil
}
// Used for decoding the response body.
type respBody struct {
IdToken string `json:"id_token"`
Access string `json:"access_token"`
Type string `json:"token_type"`
ExpiresIn time.Duration `json:"expires_in"`
}
// handleResponse returns a filled in *oauth.Token given the *http.Response from
// a *http.Request created by buildRequest.
func handleResponse(r *http.Response) (*oauth.Token, error) {
o := &oauth.Token{}
defer r.Body.Close()
if r.StatusCode != 200 {
return o, errors.New("invalid response: " + r.Status)
}
b := &respBody{}
err := json.NewDecoder(r.Body).Decode(b)
if err != nil {
return o, err
}
o.AccessToken = b.Access
if b.IdToken != "" {
// decode returned id token to get expiry
o.AccessToken = b.IdToken
s := strings.Split(b.IdToken, ".")
if len(s) < 2 {
return nil, errors.New("invalid token received")
}
d, err := base64Decode(s[1])
if err != nil {
return o, err
}
c := &ClaimSet{}
err = json.NewDecoder(bytes.NewBuffer(d)).Decode(c)
if err != nil {
return o, err
}
o.Expiry = time.Unix(c.Exp, 0)
return o, nil
}
o.Expiry = time.Now().Add(b.ExpiresIn * time.Second)
return o, nil
}
// Transport implements http.RoundTripper. When configured with a valid
// JWT and OAuth tokens it can be used to make authenticated HTTP requests.
//
// t := &jwt.Transport{jwtToken, oauthToken}
// r, _, err := t.Client().Get("http://example.org/url/requiring/auth")
//
// It will automatically refresh the OAuth token if it can, updating in place.
type Transport struct {
JWTToken *Token
OAuthToken *oauth.Token
// Transport is the HTTP transport to use when making requests.
// It will default to http.DefaultTransport if nil.
Transport http.RoundTripper
}
// Creates a new authenticated transport.
func NewTransport(token *Token) (*Transport, error) {
oa, err := token.Assert(new(http.Client))
if err != nil {
return nil, err
}
return &Transport{
JWTToken: token,
OAuthToken: oa,
}, nil
}
// Client returns an *http.Client that makes OAuth-authenticated requests.
func (t *Transport) Client() *http.Client {
return &http.Client{Transport: t}
}
// Fetches the internal transport.
func (t *Transport) transport() http.RoundTripper {
if t.Transport != nil {
return t.Transport
}
return http.DefaultTransport
}
// RoundTrip executes a single HTTP transaction using the Transport's
// OAuthToken as authorization headers.
//
// This method will attempt to renew the token if it has expired and may return
// an error related to that token renewal before attempting the client request.
// If the token cannot be renewed a non-nil os.Error value will be returned.
// If the token is invalid callers should expect HTTP-level errors,
// as indicated by the Response's StatusCode.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
// Sanity check the two tokens
if t.JWTToken == nil {
return nil, fmt.Errorf("no JWT token supplied")
}
if t.OAuthToken == nil {
return nil, fmt.Errorf("no OAuth token supplied")
}
// Refresh the OAuth token if it has expired
if t.OAuthToken.Expired() {
if oa, err := t.JWTToken.Assert(new(http.Client)); err != nil {
return nil, err
} else {
t.OAuthToken = oa
}
}
// To set the Authorization header, we must make a copy of the Request
// so that we don't modify the Request we were given.
// This is required by the specification of http.RoundTripper.
req = cloneRequest(req)
req.Header.Set("Authorization", "Bearer "+t.OAuthToken.AccessToken)
// Make the HTTP request.
return t.transport().RoundTrip(req)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}

View File

@ -1,486 +0,0 @@
// Copyright 2012 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// For package documentation please see jwt.go.
//
package jwt
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/json"
"encoding/pem"
"io/ioutil"
"net/http"
"testing"
"time"
)
const (
stdHeaderStr = `{"alg":"RS256","typ":"JWT"}`
iss = "761326798069-r5mljlln1rd4lrbhg75efgigp36m78j5@developer.gserviceaccount.com"
scope = "https://www.googleapis.com/auth/prediction"
exp = 1328554385
iat = 1328550785 // exp + 1 hour
)
// Base64url encoded Header
const headerEnc = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9"
// Base64url encoded ClaimSet
const claimSetEnc = "eyJpc3MiOiI3NjEzMjY3OTgwNjktcjVtbGpsbG4xcmQ0bHJiaGc3NWVmZ2lncDM2bTc4ajVAZGV2ZWxvcGVyLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzY29wZSI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2F1dGgvcHJlZGljdGlvbiIsImF1ZCI6Imh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsImV4cCI6MTMyODU1NDM4NSwiaWF0IjoxMzI4NTUwNzg1fQ"
// Base64url encoded Signature
const sigEnc = "olukbHreNiYrgiGCTEmY3eWGeTvYDSUHYoE84Jz3BRPBSaMdZMNOn_0CYK7UHPO7OdvUofjwft1dH59UxE9GWS02pjFti1uAQoImaqjLZoTXr8qiF6O_kDa9JNoykklWlRAIwGIZkDupCS-8cTAnM_ksSymiH1coKJrLDUX_BM0x2f4iMFQzhL5vT1ll-ZipJ0lNlxb5QsyXxDYcxtHYguF12-vpv3ItgT0STfcXoWzIGQoEbhwB9SBp9JYcQ8Ygz6pYDjm0rWX9LrchmTyDArCodpKLFtutNgcIFUP9fWxvwd1C2dNw5GjLcKr9a_SAERyoJ2WnCR1_j9N0wD2o0g"
// Base64url encoded Token
const tokEnc = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3NjEzMjY3OTgwNjktcjVtbGpsbG4xcmQ0bHJiaGc3NWVmZ2lncDM2bTc4ajVAZGV2ZWxvcGVyLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzY29wZSI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2F1dGgvcHJlZGljdGlvbiIsImF1ZCI6Imh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsImV4cCI6MTMyODU1NDM4NSwiaWF0IjoxMzI4NTUwNzg1fQ.olukbHreNiYrgiGCTEmY3eWGeTvYDSUHYoE84Jz3BRPBSaMdZMNOn_0CYK7UHPO7OdvUofjwft1dH59UxE9GWS02pjFti1uAQoImaqjLZoTXr8qiF6O_kDa9JNoykklWlRAIwGIZkDupCS-8cTAnM_ksSymiH1coKJrLDUX_BM0x2f4iMFQzhL5vT1ll-ZipJ0lNlxb5QsyXxDYcxtHYguF12-vpv3ItgT0STfcXoWzIGQoEbhwB9SBp9JYcQ8Ygz6pYDjm0rWX9LrchmTyDArCodpKLFtutNgcIFUP9fWxvwd1C2dNw5GjLcKr9a_SAERyoJ2WnCR1_j9N0wD2o0g"
// Private key for testing
const privateKeyPem = `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj
7wZgkdmM7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/
xmVU1WeruQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYs
SliS5qQpgyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18
pe+zpyl4+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xk
SBc//fy3ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABAoIBAQDGGHzQxGKX+ANk
nQi53v/c6632dJKYXVJC+PDAz4+bzU800Y+n/bOYsWf/kCp94XcG4Lgsdd0Gx+Zq
HD9CI1IcqqBRR2AFscsmmX6YzPLTuEKBGMW8twaYy3utlFxElMwoUEsrSWRcCA1y
nHSDzTt871c7nxCXHxuZ6Nm/XCL7Bg8uidRTSC1sQrQyKgTPhtQdYrPQ4WZ1A4J9
IisyDYmZodSNZe5P+LTJ6M1SCgH8KH9ZGIxv3diMwzNNpk3kxJc9yCnja4mjiGE2
YCNusSycU5IhZwVeCTlhQGcNeV/skfg64xkiJE34c2y2ttFbdwBTPixStGaF09nU
Z422D40BAoGBAPvVyRRsC3BF+qZdaSMFwI1yiXY7vQw5+JZh01tD28NuYdRFzjcJ
vzT2n8LFpj5ZfZFvSMLMVEFVMgQvWnN0O6xdXvGov6qlRUSGaH9u+TCPNnIldjMP
B8+xTwFMqI7uQr54wBB+Poq7dVRP+0oHb0NYAwUBXoEuvYo3c/nDoRcZAoGBAOWl
aLHjMv4CJbArzT8sPfic/8waSiLV9Ixs3Re5YREUTtnLq7LoymqB57UXJB3BNz/2
eCueuW71avlWlRtE/wXASj5jx6y5mIrlV4nZbVuyYff0QlcG+fgb6pcJQuO9DxMI
aqFGrWP3zye+LK87a6iR76dS9vRU+bHZpSVvGMKJAoGAFGt3TIKeQtJJyqeUWNSk
klORNdcOMymYMIlqG+JatXQD1rR6ThgqOt8sgRyJqFCVT++YFMOAqXOBBLnaObZZ
CFbh1fJ66BlSjoXff0W+SuOx5HuJJAa5+WtFHrPajwxeuRcNa8jwxUsB7n41wADu
UqWWSRedVBg4Ijbw3nWwYDECgYB0pLew4z4bVuvdt+HgnJA9n0EuYowVdadpTEJg
soBjNHV4msLzdNqbjrAqgz6M/n8Ztg8D2PNHMNDNJPVHjJwcR7duSTA6w2p/4k28
bvvk/45Ta3XmzlxZcZSOct3O31Cw0i2XDVc018IY5be8qendDYM08icNo7vQYkRH
504kQQKBgQDjx60zpz8ozvm1XAj0wVhi7GwXe+5lTxiLi9Fxq721WDxPMiHDW2XL
YXfFVy/9/GIMvEiGYdmarK1NW+VhWl1DC5xhDg0kvMfxplt4tynoq1uTsQTY31Mx
BeF5CT/JuNYk3bEBF0H/Q3VGO1/ggVS+YezdFbLWIRoMnLj6XCFEGg==
-----END RSA PRIVATE KEY-----`
// Public key to go with the private key for testing
const publicKeyPem = `-----BEGIN CERTIFICATE-----
MIIDIzCCAgugAwIBAgIJAMfISuBQ5m+5MA0GCSqGSIb3DQEBBQUAMBUxEzARBgNV
BAMTCnVuaXQtdGVzdHMwHhcNMTExMjA2MTYyNjAyWhcNMjExMjAzMTYyNjAyWjAV
MRMwEQYDVQQDEwp1bml0LXRlc3RzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEA4ej0p7bQ7L/r4rVGUz9RN4VQWoej1Bg1mYWIDYslvKrk1gpj7wZgkdmM
7oVK2OfgrSj/FCTkInKPqaCR0gD7K80q+mLBrN3PUkDrJQZpvRZIff3/xmVU1Wer
uQLFJjnFb2dqu0s/FY/2kWiJtBCakXvXEOb7zfbINuayL+MSsCGSdVYsSliS5qQp
gyDap+8b5fpXZVJkq92hrcNtbkg7hCYUJczt8n9hcCTJCfUpApvaFQ18pe+zpyl4
+WzkP66I28hniMQyUlA1hBiskT7qiouq0m8IOodhv2fagSZKjOTTU2xkSBc//fy3
ZpsL7WqgsZS7Q+0VRK8gKfqkxg5OYQIDAQABo3YwdDAdBgNVHQ4EFgQU2RQ8yO+O
gN8oVW2SW7RLrfYd9jEwRQYDVR0jBD4wPIAU2RQ8yO+OgN8oVW2SW7RLrfYd9jGh
GaQXMBUxEzARBgNVBAMTCnVuaXQtdGVzdHOCCQDHyErgUOZvuTAMBgNVHRMEBTAD
AQH/MA0GCSqGSIb3DQEBBQUAA4IBAQBRv+M/6+FiVu7KXNjFI5pSN17OcW5QUtPr
odJMlWrJBtynn/TA1oJlYu3yV5clc/71Vr/AxuX5xGP+IXL32YDF9lTUJXG/uUGk
+JETpKmQviPbRsvzYhz4pf6ZIOZMc3/GIcNq92ECbseGO+yAgyWUVKMmZM0HqXC9
ovNslqe0M8C1sLm1zAR5z/h/litE7/8O2ietija3Q/qtl2TOXJdCA6sgjJX2WUql
ybrC55ct18NKf3qhpcEkGQvFU40rVYApJpi98DiZPYFdx1oBDp/f4uZ3ojpxRVFT
cDwcJLfNRCPUhormsY7fDS9xSyThiHsW9mjJYdcaKQkwYZ0F11yB
-----END CERTIFICATE-----`
var (
privateKeyPemBytes = []byte(privateKeyPem)
publicKeyPemBytes = []byte(publicKeyPem)
stdHeader = &Header{Algorithm: stdAlgorithm, Type: stdType}
)
// Testing the urlEncode function.
func TestUrlEncode(t *testing.T) {
enc := base64Encode([]byte(stdHeaderStr))
b := []byte(enc)
if b[len(b)-1] == 61 {
t.Error("TestUrlEncode: last chat == \"=\"")
}
if enc != headerEnc {
t.Error("TestUrlEncode: enc != headerEnc")
t.Errorf(" enc = %s", enc)
t.Errorf(" headerEnc = %s", headerEnc)
}
}
// Test that the times are set properly.
func TestClaimSetSetTimes(t *testing.T) {
c := &ClaimSet{
Iss: iss,
Scope: scope,
}
iat := time.Unix(iat, 0)
c.setTimes(iat)
if c.exp.Unix() != exp {
t.Error("TestClaimSetSetTimes: c.exp != exp")
t.Errorf(" c.Exp = %d", c.exp.Unix())
t.Errorf(" exp = %d", exp)
}
}
// Given a well formed ClaimSet, test for proper encoding.
func TestClaimSetEncode(t *testing.T) {
c := &ClaimSet{
Iss: iss,
Scope: scope,
exp: time.Unix(exp, 0),
iat: time.Unix(iat, 0),
}
enc := c.encode()
re, err := base64Decode(enc)
if err != nil {
t.Fatalf("error decoding encoded claim set: %v", err)
}
wa, err := base64Decode(claimSetEnc)
if err != nil {
t.Fatalf("error decoding encoded expected claim set: %v", err)
}
if enc != claimSetEnc {
t.Error("TestClaimSetEncode: enc != claimSetEnc")
t.Errorf(" enc = %s", string(re))
t.Errorf(" claimSetEnc = %s", string(wa))
}
}
// Test that claim sets with private claim names are encoded correctly.
func TestClaimSetWithPrivateNameEncode(t *testing.T) {
iatT := time.Unix(iat, 0)
expT := time.Unix(exp, 0)
i, err := json.Marshal(iatT.Unix())
if err != nil {
t.Fatalf("error marshaling iatT value of %v: %v", iatT.Unix(), err)
}
iatStr := string(i)
e, err := json.Marshal(expT.Unix())
if err != nil {
t.Fatalf("error marshaling expT value of %v: %v", expT.Unix(), err)
}
expStr := string(e)
testCases := []struct {
desc string
input map[string]interface{}
want string
}{
// Test a simple int field.
{
"single simple field",
map[string]interface{}{"amount": 22},
`{` +
`"iss":"` + iss + `",` +
`"scope":"` + scope + `",` +
`"aud":"` + stdAud + `",` +
`"exp":` + expStr + `,` +
`"iat":` + iatStr + `,` +
`"amount":22` +
`}`,
},
{
"multiple simple fields",
map[string]interface{}{"tracking_code": "axZf", "amount": 22},
`{` +
`"iss":"` + iss + `",` +
`"scope":"` + scope + `",` +
`"aud":"` + stdAud + `",` +
`"exp":` + expStr + `,` +
`"iat":` + iatStr + `,` +
`"amount":22,` +
`"tracking_code":"axZf"` +
`}`,
},
{
"nested struct fields",
map[string]interface{}{
"tracking_code": "axZf",
"purchase": struct {
Description string `json:"desc"`
Quantity int32 `json:"q"`
Time int64 `json:"t"`
}{
"toaster",
5,
iat,
},
},
`{` +
`"iss":"` + iss + `",` +
`"scope":"` + scope + `",` +
`"aud":"` + stdAud + `",` +
`"exp":` + expStr + `,` +
`"iat":` + iatStr + `,` +
`"purchase":{"desc":"toaster","q":5,"t":` + iatStr + `},` +
`"tracking_code":"axZf"` +
`}`,
},
}
for _, testCase := range testCases {
c := &ClaimSet{
Iss: iss,
Scope: scope,
Aud: stdAud,
iat: iatT,
exp: expT,
PrivateClaims: testCase.input,
}
cJSON, err := base64Decode(c.encode())
if err != nil {
t.Fatalf("error decoding claim set: %v", err)
}
if string(cJSON) != testCase.want {
t.Errorf("TestClaimSetWithPrivateNameEncode: enc != want in case %s", testCase.desc)
t.Errorf(" enc = %s", cJSON)
t.Errorf(" want = %s", testCase.want)
}
}
}
// Test the NewToken constructor.
func TestNewToken(t *testing.T) {
tok := NewToken(iss, scope, privateKeyPemBytes)
if tok.ClaimSet.Iss != iss {
t.Error("TestNewToken: tok.ClaimSet.Iss != iss")
t.Errorf(" tok.ClaimSet.Iss = %s", tok.ClaimSet.Iss)
t.Errorf(" iss = %s", iss)
}
if tok.ClaimSet.Scope != scope {
t.Error("TestNewToken: tok.ClaimSet.Scope != scope")
t.Errorf(" tok.ClaimSet.Scope = %s", tok.ClaimSet.Scope)
t.Errorf(" scope = %s", scope)
}
if tok.ClaimSet.Aud != stdAud {
t.Error("TestNewToken: tok.ClaimSet.Aud != stdAud")
t.Errorf(" tok.ClaimSet.Aud = %s", tok.ClaimSet.Aud)
t.Errorf(" stdAud = %s", stdAud)
}
if !bytes.Equal(tok.Key, privateKeyPemBytes) {
t.Error("TestNewToken: tok.Key != privateKeyPemBytes")
t.Errorf(" tok.Key = %s", tok.Key)
t.Errorf(" privateKeyPemBytes = %s", privateKeyPemBytes)
}
}
// Make sure the private key parsing functions work.
func TestParsePrivateKey(t *testing.T) {
tok := &Token{
Key: privateKeyPemBytes,
}
err := tok.parsePrivateKey()
if err != nil {
t.Errorf("TestParsePrivateKey:tok.parsePrivateKey: %v", err)
}
}
// Test that the token signature generated matches the golden standard.
func TestTokenSign(t *testing.T) {
tok := &Token{
Key: privateKeyPemBytes,
claim: claimSetEnc,
header: headerEnc,
}
err := tok.parsePrivateKey()
if err != nil {
t.Errorf("TestTokenSign:tok.parsePrivateKey: %v", err)
}
err = tok.sign()
if err != nil {
t.Errorf("TestTokenSign:tok.sign: %v", err)
}
if tok.sig != sigEnc {
t.Error("TestTokenSign: tok.sig != sigEnc")
t.Errorf(" tok.sig = %s", tok.sig)
t.Errorf(" sigEnc = %s", sigEnc)
}
}
// Test that the token expiration function is working.
func TestTokenExpired(t *testing.T) {
c := &ClaimSet{}
tok := &Token{
ClaimSet: c,
}
now := time.Now()
c.setTimes(now)
if tok.Expired() != false {
t.Error("TestTokenExpired: tok.Expired != false")
}
// Set the times as if they were set 2 hours ago.
c.setTimes(now.Add(-2 * time.Hour))
if tok.Expired() != true {
t.Error("TestTokenExpired: tok.Expired != true")
}
}
// Given a well formed Token, test for proper encoding.
func TestTokenEncode(t *testing.T) {
c := &ClaimSet{
Iss: iss,
Scope: scope,
exp: time.Unix(exp, 0),
iat: time.Unix(iat, 0),
}
tok := &Token{
ClaimSet: c,
Header: stdHeader,
Key: privateKeyPemBytes,
}
enc, err := tok.Encode()
if err != nil {
t.Errorf("TestTokenEncode:tok.Assertion: %v", err)
}
if enc != tokEnc {
t.Error("TestTokenEncode: enc != tokEnc")
t.Errorf(" enc = %s", enc)
t.Errorf(" tokEnc = %s", tokEnc)
}
}
// Given a well formed Token we should get back a well formed request.
func TestBuildRequest(t *testing.T) {
c := &ClaimSet{
Iss: iss,
Scope: scope,
exp: time.Unix(exp, 0),
iat: time.Unix(iat, 0),
}
tok := &Token{
ClaimSet: c,
Header: stdHeader,
Key: privateKeyPemBytes,
}
u, v, err := tok.buildRequest()
if err != nil {
t.Errorf("TestBuildRequest:BuildRequest: %v", err)
}
if u != c.Aud {
t.Error("TestBuildRequest: u != c.Aud")
t.Errorf(" u = %s", u)
t.Errorf(" c.Aud = %s", c.Aud)
}
if v.Get("grant_type") != stdGrantType {
t.Error("TestBuildRequest: grant_type != stdGrantType")
t.Errorf(" grant_type = %s", v.Get("grant_type"))
t.Errorf(" stdGrantType = %s", stdGrantType)
}
if v.Get("assertion") != tokEnc {
t.Error("TestBuildRequest: assertion != tokEnc")
t.Errorf(" assertion = %s", v.Get("assertion"))
t.Errorf(" tokEnc = %s", tokEnc)
}
}
// Given a well formed access request response we should get back a oauth.Token.
func TestHandleResponse(t *testing.T) {
rb := &respBody{
Access: "1/8xbJqaOZXSUZbHLl5EOtu1pxz3fmmetKx9W8CV4t79M",
Type: "Bearer",
ExpiresIn: 3600,
}
b, err := json.Marshal(rb)
if err != nil {
t.Errorf("TestHandleResponse:json.Marshal: %v", err)
}
r := &http.Response{
Status: "200 OK",
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader(b)),
}
o, err := handleResponse(r)
if err != nil {
t.Errorf("TestHandleResponse:handleResponse: %v", err)
}
if o.AccessToken != rb.Access {
t.Error("TestHandleResponse: o.AccessToken != rb.Access")
t.Errorf(" o.AccessToken = %s", o.AccessToken)
t.Errorf(" rb.Access = %s", rb.Access)
}
if o.Expired() {
t.Error("TestHandleResponse: o.Expired == true")
}
}
// passthrough signature for test
type FakeSigner struct{}
func (f FakeSigner) Sign(tok *Token) ([]byte, []byte, error) {
block, _ := pem.Decode(privateKeyPemBytes)
pKey, _ := x509.ParsePKCS1PrivateKey(block.Bytes)
ss := headerEnc + "." + claimSetEnc
h := sha256.New()
h.Write([]byte(ss))
b, _ := rsa.SignPKCS1v15(rand.Reader, pKey, crypto.SHA256, h.Sum(nil))
return []byte(ss), b, nil
}
// Given an external signer, get back a valid and signed JWT
func TestExternalSigner(t *testing.T) {
tok := NewSignerToken(iss, scope, FakeSigner{})
enc, _ := tok.Encode()
if enc != tokEnc {
t.Errorf("TestExternalSigner: enc != tokEnc")
t.Errorf(" enc = %s", enc)
t.Errorf(" tokEnc = %s", tokEnc)
}
}
func TestHandleResponseWithNewExpiry(t *testing.T) {
rb := &respBody{
IdToken: tokEnc,
}
b, err := json.Marshal(rb)
if err != nil {
t.Errorf("TestHandleResponse:json.Marshal: %v", err)
}
r := &http.Response{
Status: "200 OK",
StatusCode: 200,
Body: ioutil.NopCloser(bytes.NewReader(b)),
}
o, err := handleResponse(r)
if err != nil {
t.Errorf("TestHandleResponse:handleResponse: %v", err)
}
if o.Expiry != time.Unix(exp, 0) {
t.Error("TestHandleResponse: o.Expiry != exp")
t.Errorf(" o.Expiry = %s", o.Expiry)
t.Errorf(" exp = %s", time.Unix(exp, 0))
}
}
// Placeholder for future Assert tests.
func TestAssert(t *testing.T) {
// Since this method makes a call to BuildRequest, an htttp.Client, and
// finally HandleResponse there is not much more to test. This is here
// as a placeholder if that changes.
}
// Benchmark for the end-to-end encoding of a well formed token.
func BenchmarkTokenEncode(b *testing.B) {
b.StopTimer()
c := &ClaimSet{
Iss: iss,
Scope: scope,
exp: time.Unix(exp, 0),
iat: time.Unix(iat, 0),
}
tok := &Token{
ClaimSet: c,
Key: privateKeyPemBytes,
}
b.StartTimer()
for i := 0; i < b.N; i++ {
tok.Encode()
}
}

View File

@ -1,405 +0,0 @@
// Copyright 2011 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The oauth package provides support for making
// OAuth2-authenticated HTTP requests.
//
// Example usage:
//
// // Specify your configuration. (typically as a global variable)
// var config = &oauth.Config{
// ClientId: YOUR_CLIENT_ID,
// ClientSecret: YOUR_CLIENT_SECRET,
// Scope: "https://www.googleapis.com/auth/buzz",
// AuthURL: "https://accounts.google.com/o/oauth2/auth",
// TokenURL: "https://accounts.google.com/o/oauth2/token",
// RedirectURL: "http://you.example.org/handler",
// }
//
// // A landing page redirects to the OAuth provider to get the auth code.
// func landing(w http.ResponseWriter, r *http.Request) {
// http.Redirect(w, r, config.AuthCodeURL("foo"), http.StatusFound)
// }
//
// // The user will be redirected back to this handler, that takes the
// // "code" query parameter and Exchanges it for an access token.
// func handler(w http.ResponseWriter, r *http.Request) {
// t := &oauth.Transport{Config: config}
// t.Exchange(r.FormValue("code"))
// // The Transport now has a valid Token. Create an *http.Client
// // with which we can make authenticated API requests.
// c := t.Client()
// c.Post(...)
// // ...
// // btw, r.FormValue("state") == "foo"
// }
//
package oauth
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"net/url"
"os"
"strings"
"time"
)
type OAuthError struct {
prefix string
msg string
}
func (oe OAuthError) Error() string {
return "OAuthError: " + oe.prefix + ": " + oe.msg
}
// Cache specifies the methods that implement a Token cache.
type Cache interface {
Token() (*Token, error)
PutToken(*Token) error
}
// CacheFile implements Cache. Its value is the name of the file in which
// the Token is stored in JSON format.
type CacheFile string
func (f CacheFile) Token() (*Token, error) {
file, err := os.Open(string(f))
if err != nil {
return nil, OAuthError{"CacheFile.Token", err.Error()}
}
defer file.Close()
tok := &Token{}
if err := json.NewDecoder(file).Decode(tok); err != nil {
return nil, OAuthError{"CacheFile.Token", err.Error()}
}
return tok, nil
}
func (f CacheFile) PutToken(tok *Token) error {
file, err := os.OpenFile(string(f), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return OAuthError{"CacheFile.PutToken", err.Error()}
}
if err := json.NewEncoder(file).Encode(tok); err != nil {
file.Close()
return OAuthError{"CacheFile.PutToken", err.Error()}
}
if err := file.Close(); err != nil {
return OAuthError{"CacheFile.PutToken", err.Error()}
}
return nil
}
// Config is the configuration of an OAuth consumer.
type Config struct {
// ClientId is the OAuth client identifier used when communicating with
// the configured OAuth provider.
ClientId string
// ClientSecret is the OAuth client secret used when communicating with
// the configured OAuth provider.
ClientSecret string
// Scope identifies the level of access being requested. Multiple scope
// values should be provided as a space-delimited string.
Scope string
// AuthURL is the URL the user will be directed to in order to grant
// access.
AuthURL string
// TokenURL is the URL used to retrieve OAuth tokens.
TokenURL string
// RedirectURL is the URL to which the user will be returned after
// granting (or denying) access.
RedirectURL string
// TokenCache allows tokens to be cached for subsequent requests.
TokenCache Cache
AccessType string // Optional, "online" (default) or "offline", no refresh token if "online"
// ApprovalPrompt indicates whether the user should be
// re-prompted for consent. If set to "auto" (default) the
// user will be prompted only if they haven't previously
// granted consent and the code can only be exchanged for an
// access token.
// If set to "force" the user will always be prompted, and the
// code can be exchanged for a refresh token.
ApprovalPrompt string
}
// Token contains an end-user's tokens.
// This is the data you must store to persist authentication.
type Token struct {
AccessToken string
RefreshToken string
Expiry time.Time // If zero the token has no (known) expiry time.
Extra map[string]string // May be nil.
}
func (t *Token) Expired() bool {
if t.Expiry.IsZero() {
return false
}
return t.Expiry.Before(time.Now())
}
// Transport implements http.RoundTripper. When configured with a valid
// Config and Token it can be used to make authenticated HTTP requests.
//
// t := &oauth.Transport{config}
// t.Exchange(code)
// // t now contains a valid Token
// r, _, err := t.Client().Get("http://example.org/url/requiring/auth")
//
// It will automatically refresh the Token if it can,
// updating the supplied Token in place.
type Transport struct {
*Config
*Token
// Transport is the HTTP transport to use when making requests.
// It will default to http.DefaultTransport if nil.
// (It should never be an oauth.Transport.)
Transport http.RoundTripper
}
// Client returns an *http.Client that makes OAuth-authenticated requests.
func (t *Transport) Client() *http.Client {
return &http.Client{Transport: t}
}
func (t *Transport) transport() http.RoundTripper {
if t.Transport != nil {
return t.Transport
}
return http.DefaultTransport
}
// AuthCodeURL returns a URL that the end-user should be redirected to,
// so that they may obtain an authorization code.
func (c *Config) AuthCodeURL(state string) string {
url_, err := url.Parse(c.AuthURL)
if err != nil {
panic("AuthURL malformed: " + err.Error())
}
q := url.Values{
"response_type": {"code"},
"client_id": {c.ClientId},
"redirect_uri": {c.RedirectURL},
"scope": {c.Scope},
"state": {state},
"access_type": {c.AccessType},
"approval_prompt": {c.ApprovalPrompt},
}.Encode()
if url_.RawQuery == "" {
url_.RawQuery = q
} else {
url_.RawQuery += "&" + q
}
return url_.String()
}
// Exchange takes a code and gets access Token from the remote server.
func (t *Transport) Exchange(code string) (*Token, error) {
if t.Config == nil {
return nil, OAuthError{"Exchange", "no Config supplied"}
}
// If the transport or the cache already has a token, it is
// passed to `updateToken` to preserve existing refresh token.
tok := t.Token
if tok == nil && t.TokenCache != nil {
tok, _ = t.TokenCache.Token()
}
if tok == nil {
tok = new(Token)
}
err := t.updateToken(tok, url.Values{
"grant_type": {"authorization_code"},
"redirect_uri": {t.RedirectURL},
"scope": {t.Scope},
"code": {code},
})
if err != nil {
return nil, err
}
t.Token = tok
if t.TokenCache != nil {
return tok, t.TokenCache.PutToken(tok)
}
return tok, nil
}
// RoundTrip executes a single HTTP transaction using the Transport's
// Token as authorization headers.
//
// This method will attempt to renew the Token if it has expired and may return
// an error related to that Token renewal before attempting the client request.
// If the Token cannot be renewed a non-nil os.Error value will be returned.
// If the Token is invalid callers should expect HTTP-level errors,
// as indicated by the Response's StatusCode.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
if t.Token == nil {
if t.Config == nil {
return nil, OAuthError{"RoundTrip", "no Config supplied"}
}
if t.TokenCache == nil {
return nil, OAuthError{"RoundTrip", "no Token supplied"}
}
var err error
t.Token, err = t.TokenCache.Token()
if err != nil {
return nil, err
}
}
// Refresh the Token if it has expired.
if t.Expired() {
if err := t.Refresh(); err != nil {
return nil, err
}
}
// To set the Authorization header, we must make a copy of the Request
// so that we don't modify the Request we were given.
// This is required by the specification of http.RoundTripper.
req = cloneRequest(req)
req.Header.Set("Authorization", "Bearer "+t.AccessToken)
// Make the HTTP request.
return t.transport().RoundTrip(req)
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
// Refresh renews the Transport's AccessToken using its RefreshToken.
func (t *Transport) Refresh() error {
if t.Token == nil {
return OAuthError{"Refresh", "no existing Token"}
}
if t.RefreshToken == "" {
return OAuthError{"Refresh", "Token expired; no Refresh Token"}
}
if t.Config == nil {
return OAuthError{"Refresh", "no Config supplied"}
}
err := t.updateToken(t.Token, url.Values{
"grant_type": {"refresh_token"},
"refresh_token": {t.RefreshToken},
})
if err != nil {
return err
}
if t.TokenCache != nil {
return t.TokenCache.PutToken(t.Token)
}
return nil
}
// AuthenticateClient gets an access Token using the client_credentials grant
// type.
func (t *Transport) AuthenticateClient() error {
if t.Config == nil {
return OAuthError{"Exchange", "no Config supplied"}
}
if t.Token == nil {
t.Token = &Token{}
}
return t.updateToken(t.Token, url.Values{"grant_type": {"client_credentials"}})
}
func (t *Transport) updateToken(tok *Token, v url.Values) error {
v.Set("client_id", t.ClientId)
v.Set("client_secret", t.ClientSecret)
client := &http.Client{Transport: t.transport()}
req, err := http.NewRequest("POST", t.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.SetBasicAuth(t.ClientId, t.ClientSecret)
r, err := client.Do(req)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode != 200 {
return OAuthError{"updateToken", r.Status}
}
var b struct {
Access string `json:"access_token"`
Refresh string `json:"refresh_token"`
ExpiresIn time.Duration `json:"expires_in"`
Id string `json:"id_token"`
}
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
if err != nil {
return err
}
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
switch content {
case "application/x-www-form-urlencoded", "text/plain":
vals, err := url.ParseQuery(string(body))
if err != nil {
return err
}
b.Access = vals.Get("access_token")
b.Refresh = vals.Get("refresh_token")
b.ExpiresIn, _ = time.ParseDuration(vals.Get("expires_in") + "s")
b.Id = vals.Get("id_token")
default:
if err = json.Unmarshal(body, &b); err != nil {
return fmt.Errorf("got bad response from server: %q", body)
}
// The JSON parser treats the unitless ExpiresIn like 'ns' instead of 's' as above,
// so compensate here.
b.ExpiresIn *= time.Second
}
if b.Access == "" {
return errors.New("received empty access token from authorization server")
}
tok.AccessToken = b.Access
// Don't overwrite `RefreshToken` with an empty value
if len(b.Refresh) > 0 {
tok.RefreshToken = b.Refresh
}
if b.ExpiresIn == 0 {
tok.Expiry = time.Time{}
} else {
tok.Expiry = time.Now().Add(b.ExpiresIn)
}
if b.Id != "" {
if tok.Extra == nil {
tok.Extra = make(map[string]string)
}
tok.Extra["id_token"] = b.Id
}
return nil
}

View File

@ -1,214 +0,0 @@
// Copyright 2011 The goauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package oauth
import (
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"runtime"
"testing"
"time"
)
var requests = []struct {
path, query, auth string // request
contenttype, body string // response
}{
{
path: "/token",
query: "grant_type=authorization_code&code=c0d3&client_id=cl13nt1d&client_secret=s3cr3t",
contenttype: "application/json",
auth: "Basic Y2wxM250MWQ6czNjcjN0",
body: `
{
"access_token":"token1",
"refresh_token":"refreshtoken1",
"id_token":"idtoken1",
"expires_in":3600
}
`,
},
{path: "/secure", auth: "Bearer token1", body: "first payload"},
{
path: "/token",
query: "grant_type=refresh_token&refresh_token=refreshtoken1&client_id=cl13nt1d&client_secret=s3cr3t",
contenttype: "application/json",
auth: "Basic Y2wxM250MWQ6czNjcjN0",
body: `
{
"access_token":"token2",
"refresh_token":"refreshtoken2",
"id_token":"idtoken2",
"expires_in":3600
}
`,
},
{path: "/secure", auth: "Bearer token2", body: "second payload"},
{
path: "/token",
query: "grant_type=refresh_token&refresh_token=refreshtoken2&client_id=cl13nt1d&client_secret=s3cr3t",
contenttype: "application/x-www-form-urlencoded",
body: "access_token=token3&refresh_token=refreshtoken3&id_token=idtoken3&expires_in=3600",
auth: "Basic Y2wxM250MWQ6czNjcjN0",
},
{path: "/secure", auth: "Bearer token3", body: "third payload"},
{
path: "/token",
query: "grant_type=client_credentials&client_id=cl13nt1d&client_secret=s3cr3t",
contenttype: "application/json",
auth: "Basic Y2wxM250MWQ6czNjcjN0",
body: `
{
"access_token":"token4",
"expires_in":3600
}
`,
},
{path: "/secure", auth: "Bearer token4", body: "fourth payload"},
}
func TestOAuth(t *testing.T) {
// Set up test server.
n := 0
handler := func(w http.ResponseWriter, r *http.Request) {
if n >= len(requests) {
t.Errorf("too many requests: %d", n)
return
}
req := requests[n]
n++
// Check request.
if g, w := r.URL.Path, req.path; g != w {
t.Errorf("request[%d] got path %s, want %s", n, g, w)
}
want, _ := url.ParseQuery(req.query)
for k := range want {
if g, w := r.FormValue(k), want.Get(k); g != w {
t.Errorf("query[%s] = %s, want %s", k, g, w)
}
}
if g, w := r.Header.Get("Authorization"), req.auth; w != "" && g != w {
t.Errorf("Authorization: %v, want %v", g, w)
}
// Send response.
w.Header().Set("Content-Type", req.contenttype)
io.WriteString(w, req.body)
}
server := httptest.NewServer(http.HandlerFunc(handler))
defer server.Close()
config := &Config{
ClientId: "cl13nt1d",
ClientSecret: "s3cr3t",
Scope: "https://example.net/scope",
AuthURL: server.URL + "/auth",
TokenURL: server.URL + "/token",
}
// TODO(adg): test AuthCodeURL
transport := &Transport{Config: config}
_, err := transport.Exchange("c0d3")
if err != nil {
t.Fatalf("Exchange: %v", err)
}
checkToken(t, transport.Token, "token1", "refreshtoken1", "idtoken1")
c := transport.Client()
resp, err := c.Get(server.URL + "/secure")
if err != nil {
t.Fatalf("Get: %v", err)
}
checkBody(t, resp, "first payload")
// test automatic refresh
transport.Expiry = time.Now().Add(-time.Hour)
resp, err = c.Get(server.URL + "/secure")
if err != nil {
t.Fatalf("Get: %v", err)
}
checkBody(t, resp, "second payload")
checkToken(t, transport.Token, "token2", "refreshtoken2", "idtoken2")
// refresh one more time, but get URL-encoded token instead of JSON
transport.Expiry = time.Now().Add(-time.Hour)
resp, err = c.Get(server.URL + "/secure")
if err != nil {
t.Fatalf("Get: %v", err)
}
checkBody(t, resp, "third payload")
checkToken(t, transport.Token, "token3", "refreshtoken3", "idtoken3")
transport.Token = &Token{}
err = transport.AuthenticateClient()
if err != nil {
t.Fatalf("AuthenticateClient: %v", err)
}
checkToken(t, transport.Token, "token4", "", "")
resp, err = c.Get(server.URL + "/secure")
if err != nil {
t.Fatalf("Get: %v", err)
}
checkBody(t, resp, "fourth payload")
}
func checkToken(t *testing.T, tok *Token, access, refresh, id string) {
if g, w := tok.AccessToken, access; g != w {
t.Errorf("AccessToken = %q, want %q", g, w)
}
if g, w := tok.RefreshToken, refresh; g != w {
t.Errorf("RefreshToken = %q, want %q", g, w)
}
if g, w := tok.Extra["id_token"], id; g != w {
t.Errorf("Extra['id_token'] = %q, want %q", g, w)
}
exp := tok.Expiry.Sub(time.Now())
if (time.Hour-time.Second) > exp || exp > time.Hour {
t.Errorf("Expiry = %v, want ~1 hour", exp)
}
}
func checkBody(t *testing.T, r *http.Response, body string) {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("reading reponse body: %v, want %q", err, body)
}
if g, w := string(b), body; g != w {
t.Errorf("request body mismatch: got %q, want %q", g, w)
}
}
func TestCachePermissions(t *testing.T) {
if runtime.GOOS == "windows" {
// Windows doesn't support file mode bits.
return
}
td, err := ioutil.TempDir("", "oauth-test")
if err != nil {
t.Fatalf("ioutil.TempDir: %v", err)
}
defer os.RemoveAll(td)
tempFile := filepath.Join(td, "cache-file")
cf := CacheFile(tempFile)
if err := cf.PutToken(new(Token)); err != nil {
t.Fatalf("PutToken: %v", err)
}
fi, err := os.Stat(tempFile)
if err != nil {
t.Fatalf("os.Stat: %v", err)
}
if fi.Mode()&0077 != 0 {
t.Errorf("Created cache file has mode %#o, want non-accessible to group+other", fi.Mode())
}
}

View File

@ -0,0 +1,43 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
install:
go install
test: install generate-test-pbs
go test
generate-test-pbs:
make install
make -C testdata
make -C proto3_proto
make

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,197 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy.
// TODO: MessageSet and RawMessage.
package proto
import (
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(pb Message) Message {
in := reflect.ValueOf(pb)
if in.IsNil() {
return pb
}
out := reflect.New(in.Type().Elem())
// out is empty so a merge is a deep copy.
mergeStruct(out.Elem(), in.Elem())
return out.Interface().(Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
// Explicit test prior to mergeStruct so that mistyped nils will fail
panic("proto: type mismatch")
}
if in.IsNil() {
// Merging nil into non-nil is a quiet no-op
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i))
}
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
emOut := out.Addr().Interface().(extendableProto)
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
func mergeAny(out, in reflect.Value) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(in)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key))
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem())
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i))
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value))
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

View File

@ -0,0 +1,227 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
"github.com/golang/protobuf/proto"
pb "./testdata"
)
var cloneTestMessage = &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &pb.InnerMessage{
Host: proto.String("niles"),
Port: proto.Int32(9099),
Connected: proto.Bool(true),
},
Others: []*pb.OtherMessage{
{
Value: []byte("some bytes"),
},
},
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
}
func init() {
ext := &pb.Ext{
Data: proto.String("extension"),
}
if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
panic("SetExtension: " + err.Error())
}
}
func TestClone(t *testing.T) {
m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
if !proto.Equal(m, cloneTestMessage) {
t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
}
// Verify it was a deep copy.
*m.Inner.Port++
if proto.Equal(m, cloneTestMessage) {
t.Error("Mutating clone changed the original")
}
// Byte fields and repeated fields should be copied.
if &m.Pet[0] == &cloneTestMessage.Pet[0] {
t.Error("Pet: repeated field not copied")
}
if &m.Others[0] == &cloneTestMessage.Others[0] {
t.Error("Others: repeated field not copied")
}
if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
t.Error("Others[0].Value: bytes field not copied")
}
if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
t.Error("RepBytes: repeated field not copied")
}
if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
t.Error("RepBytes[0]: bytes field not copied")
}
}
func TestCloneNil(t *testing.T) {
var m *pb.MyMessage
if c := proto.Clone(m); !proto.Equal(m, c) {
t.Errorf("Clone(%v) = %v", m, c)
}
}
var mergeTests = []struct {
src, dst, want proto.Message
}{
{
src: &pb.MyMessage{
Count: proto.Int32(42),
},
dst: &pb.MyMessage{
Name: proto.String("Dave"),
},
want: &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
},
},
{
src: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("hey"),
Connected: proto.Bool(true),
},
Pet: []string{"horsey"},
Others: []*pb.OtherMessage{
{
Value: []byte("some bytes"),
},
},
},
dst: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("niles"),
Port: proto.Int32(9099),
},
Pet: []string{"bunny", "kitty"},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(31415926535),
},
{
// Explicitly test a src=nil field
Inner: nil,
},
},
},
want: &pb.MyMessage{
Inner: &pb.InnerMessage{
Host: proto.String("hey"),
Connected: proto.Bool(true),
Port: proto.Int32(9099),
},
Pet: []string{"bunny", "kitty", "horsey"},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(31415926535),
},
{},
{
Value: []byte("some bytes"),
},
},
},
},
{
src: &pb.MyMessage{
RepBytes: [][]byte{[]byte("wow")},
},
dst: &pb.MyMessage{
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham")},
},
want: &pb.MyMessage{
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(6),
},
RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
},
},
// Check that a scalar bytes field replaces rather than appends.
{
src: &pb.OtherMessage{Value: []byte("foo")},
dst: &pb.OtherMessage{Value: []byte("bar")},
want: &pb.OtherMessage{Value: []byte("foo")},
},
{
src: &pb.MessageWithMap{
NameMapping: map[int32]string{6: "Nigel"},
MsgMapping: map[int64]*pb.FloatingPoint{
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
},
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
},
dst: &pb.MessageWithMap{
NameMapping: map[int32]string{
6: "Bruce", // should be overwritten
7: "Andrew",
},
},
want: &pb.MessageWithMap{
NameMapping: map[int32]string{
6: "Nigel",
7: "Andrew",
},
MsgMapping: map[int64]*pb.FloatingPoint{
0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
},
ByteMapping: map[bool][]byte{true: []byte("wowsa")},
},
},
}
func TestMerge(t *testing.T) {
for _, m := range mergeTests {
got := proto.Clone(m.dst)
proto.Merge(got, m.src)
if !proto.Equal(got, m.want) {
t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
}
}
}

View File

@ -0,0 +1,823 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
"os"
"reflect"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// The fundamental decoders that interpret bytes on the wire.
// Those that take integer types all return uint64 and are
// therefore of type valueDecoder.
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
// x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
// x, err already 0
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// These are not ValueDecoders: they produce an array of bytes or a string.
// bytes, embedded messages
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
// If the protocol buffer has extensions, and the field matches, add it as an extension.
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
oi := o.index
err := o.skip(t, tag, wire)
if err != nil {
return err
}
if !unrecField.IsValid() {
return nil
}
ptr := structPointer_Bytes(base, unrecField)
// Add the skipped field to struct field
obuf := o.buf
o.buf = *ptr
o.EncodeVarint(uint64(tag<<3 | wire))
*ptr = append(o.buf, obuf[oi:o.index]...)
o.buf = obuf
return nil
}
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
var u uint64
var err error
switch wire {
case WireVarint:
_, err = o.DecodeVarint()
case WireFixed64:
_, err = o.DecodeFixed64()
case WireBytes:
_, err = o.DecodeRawBytes(false)
case WireFixed32:
_, err = o.DecodeFixed32()
case WireStartGroup:
for {
u, err = o.DecodeVarint()
if err != nil {
break
}
fwire := int(u & 0x7)
if fwire == WireEndGroup {
break
}
ftag := int(u >> 3)
err = o.skip(t, ftag, fwire)
if err != nil {
break
}
}
default:
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
}
return err
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The method should reset the receiver before
// decoding starts. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
return UnmarshalMerge(buf, pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(Unmarshaler); ok {
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
typ, base, err := getbase(pb)
if err != nil {
return err
}
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
if collectStats {
stats.Decode++
}
return err
}
// unmarshalType does the work of unmarshaling a structure.
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
var state errorState
required, reqFields := prop.reqCount, uint64(0)
var err error
for err == nil && o.index < len(o.buf) {
oi := o.index
var u uint64
u, err = o.DecodeVarint()
if err != nil {
break
}
wire := int(u & 0x7)
if wire == WireEndGroup {
if is_group {
return nil // input is satisfied
}
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
}
tag := int(u >> 3)
if tag <= 0 {
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
}
fieldnum, ok := prop.decoderTags.get(tag)
if !ok {
// Maybe it's an extension?
if prop.extendable {
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
if err = o.skip(st, tag, wire); err == nil {
ext := e.ExtensionMap()[int32(tag)] // may be missing
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
e.ExtensionMap()[int32(tag)] = ext
}
continue
}
}
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
continue
}
p := prop.Prop[fieldnum]
if p.dec == nil {
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
continue
}
dec := p.dec
if wire != WireStartGroup && wire != p.WireType {
if wire == WireBytes && p.packedDec != nil {
// a packable field
dec = p.packedDec
} else {
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
continue
}
}
decErr := dec(o, p, base)
if decErr != nil && !state.shouldContinue(decErr, p) {
err = decErr
}
if err == nil && p.Required {
// Successfully decoded a required field.
if tag <= 64 {
// use bitmap for fields 1-64 to catch field reuse.
var mask uint64 = 1 << uint64(tag-1)
if reqFields&mask == 0 {
// new required field
reqFields |= mask
required--
}
} else {
// This is imprecise. It can be fooled by a required field
// with a tag > 64 that is encoded twice; that's very rare.
// A fully correct implementation would require allocating
// a data structure, which we would like to avoid.
required--
}
}
}
if err == nil {
if is_group {
return io.ErrUnexpectedEOF
}
if state.err != nil {
return state.err
}
if required > 0 {
// Not enough information to determine the exact field. If we use extra
// CPU, we could determine the field only if the missing required field
// has a tag <= 64 and we check reqFields.
return &RequiredNotSetError{"{Unknown}"}
}
}
return err
}
// Individual type decoders
// For each,
// u is the decoded value,
// v is a pointer to the field (pointer) in the struct
// Sizes of the pools to allocate inside the Buffer.
// The goal is modest amortization and allocation
// on at least 16-byte boundaries.
const (
boolPoolSize = 16
uint32PoolSize = 8
uint64PoolSize = 4
)
// Decode a bool.
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
if len(o.bools) == 0 {
o.bools = make([]bool, boolPoolSize)
}
o.bools[0] = u != 0
*structPointer_Bool(base, p.field) = &o.bools[0]
o.bools = o.bools[1:]
return nil
}
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
*structPointer_BoolVal(base, p.field) = u != 0
return nil
}
// Decode an int32.
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
return nil
}
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
return nil
}
// Decode an int64.
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64_Set(structPointer_Word64(base, p.field), o, u)
return nil
}
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
return nil
}
// Decode a string.
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
sp := new(string)
*sp = s
*structPointer_String(base, p.field) = sp
return nil
}
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
*structPointer_StringVal(base, p.field) = s
return nil
}
// Decode a slice of bytes ([]byte).
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
*structPointer_Bytes(base, p.field) = b
return nil
}
// Decode a slice of bools ([]bool).
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
v := structPointer_BoolSlice(base, p.field)
*v = append(*v, u != 0)
return nil
}
// Decode a slice of bools ([]bool) in packed format.
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
v := structPointer_BoolSlice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded bools
y := *v
for i := 0; i < nb; i++ {
u, err := p.valDec(o)
if err != nil {
return err
}
y = append(y, u != 0)
}
*v = y
return nil
}
// Decode a slice of int32s ([]int32).
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word32Slice(base, p.field).Append(uint32(u))
return nil
}
// Decode a slice of int32s ([]int32) in packed format.
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
v := structPointer_Word32Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int32s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(uint32(u))
}
return nil
}
// Decode a slice of int64s ([]int64).
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
u, err := p.valDec(o)
if err != nil {
return err
}
structPointer_Word64Slice(base, p.field).Append(u)
return nil
}
// Decode a slice of int64s ([]int64) in packed format.
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
v := structPointer_Word64Slice(base, p.field)
nn, err := o.DecodeVarint()
if err != nil {
return err
}
nb := int(nn) // number of bytes of encoded int64s
fin := o.index + nb
if fin < o.index {
return errOverflow
}
for o.index < fin {
u, err := p.valDec(o)
if err != nil {
return err
}
v.Append(u)
}
return nil
}
// Decode a slice of strings ([]string).
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
s, err := o.DecodeStringBytes()
if err != nil {
return err
}
v := structPointer_StringSlice(base, p.field)
*v = append(*v, s)
return nil
}
// Decode a slice of slice of bytes ([][]byte).
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
b, err := o.DecodeRawBytes(true)
if err != nil {
return err
}
v := structPointer_BytesSlice(base, p.field)
*v = append(*v, b)
return nil
}
// Decode a map field.
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
oi := o.index // index at the end of this map entry
o.index -= len(raw) // move buffer back to start of map entry
mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V
if mptr.Elem().IsNil() {
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
}
v := mptr.Elem() // map[K]V
// Prepare addressable doubly-indirect placeholders for the key and value types.
// See enc_new_map for why.
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
keybase := toStructPointer(keyptr.Addr()) // **K
var valbase structPointer
var valptr reflect.Value
switch p.mtype.Elem().Kind() {
case reflect.Slice:
// []byte
var dummy []byte
valptr = reflect.ValueOf(&dummy) // *[]byte
valbase = toStructPointer(valptr) // *[]byte
case reflect.Ptr:
// message; valptr is **Msg; need to allocate the intermediate pointer
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valptr.Set(reflect.New(valptr.Type().Elem()))
valbase = toStructPointer(valptr)
default:
// everything else
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
valbase = toStructPointer(valptr.Addr()) // **V
}
// Decode.
// This parses a restricted wire format, namely the encoding of a message
// with two fields. See enc_new_map for the format.
for o.index < oi {
// tagcode for key and value properties are always a single byte
// because they have tags 1 and 2.
tagcode := o.buf[o.index]
o.index++
switch tagcode {
case p.mkeyprop.tagcode[0]:
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
return err
}
case p.mvalprop.tagcode[0]:
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
return err
}
default:
// TODO: Should we silently skip this instead?
return fmt.Errorf("proto: bad map data tag %d", raw[0])
}
}
v.SetMapIndex(keyptr.Elem(), valptr.Elem())
return nil
}
// Decode a group.
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
return o.unmarshalType(p.stype, p.sprop, true, bas)
}
// Decode an embedded message.
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
raw, e := o.DecodeRawBytes(false)
if e != nil {
return e
}
bas := structPointer_GetStructPointer(base, p.field)
if structPointer_IsNil(bas) {
// allocate new nested message
bas = toStructPointer(reflect.New(p.stype))
structPointer_SetStructPointer(base, p.field, bas)
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := structPointer_Interface(bas, p.stype)
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, false, bas)
o.buf = obuf
o.index = oi
return err
}
// Decode a slice of embedded messages.
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, false, base)
}
// Decode a slice of embedded groups.
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
return o.dec_slice_struct(p, true, base)
}
// Decode a slice of structs ([]*struct).
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
v := reflect.New(p.stype)
bas := toStructPointer(v)
structPointer_StructPointerSlice(base, p.field).Append(bas)
if is_group {
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
return err
}
raw, err := o.DecodeRawBytes(false)
if err != nil {
return err
}
// If the object can unmarshal itself, let it.
if p.isUnmarshaler {
iv := v.Interface()
return iv.(Unmarshaler).Unmarshal(raw)
}
obuf := o.buf
oi := o.index
o.buf = raw
o.index = 0
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
o.buf = obuf
o.index = oi
return err
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,256 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
// TODO: MessageSet.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN.
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal (a "bytes" field,
although represented by []byte, is not a repeated field)
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
b1, ok := f1.Interface().(raw)
if ok {
b2 := f2.Interface().(raw)
// RawMessage
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
return false
}
continue
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
if !bytes.Equal(u1, u2) {
return false
}
return true
}
// v1 and v2 are known to have the same type.
func equalAny(v1, v2 reflect.Value) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2) {
return false
}
}
return true
case reflect.Ptr:
return equalAny(v1.Elem(), v2.Elem())
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i)) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// em1 and em2 are extension maps.
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1, m2 := e1.value, e2.value
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
continue
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
return false
}
}
return true
}

View File

@ -0,0 +1,191 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
pb "./testdata"
. "github.com/golang/protobuf/proto"
)
// Four identical base messages.
// The init function adds extensions to some of them.
var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
// Two messages with non-message extensions.
var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
func init() {
ext1 := &pb.Ext{Data: String("Kirk")}
ext2 := &pb.Ext{Data: String("Picard")}
// messageWithExtension1a has ext1, but never marshals it.
if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
panic("SetExtension on 1a failed: " + err.Error())
}
// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
panic("SetExtension on 1b failed: " + err.Error())
}
buf, err := Marshal(messageWithExtension1b)
if err != nil {
panic("Marshal of 1b failed: " + err.Error())
}
messageWithExtension1b.Reset()
if err := Unmarshal(buf, messageWithExtension1b); err != nil {
panic("Unmarshal of 1b failed: " + err.Error())
}
// messageWithExtension2 has ext2.
if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
panic("SetExtension on 2 failed: " + err.Error())
}
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
panic("SetExtension on Int32-1 failed: " + err.Error())
}
if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
panic("SetExtension on Int32-2 failed: " + err.Error())
}
}
var EqualTests = []struct {
desc string
a, b Message
exp bool
}{
{"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
{"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
{"nil vs nil", nil, nil, true},
{"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
{"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
{"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
{"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
{"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
{"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
{"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
{"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
{"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
{"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
{"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
{"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
{"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
{"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
{
"nested, different",
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
false,
},
{
"nested, equal",
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
&pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
true,
},
{"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
{"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
{"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
{
"repeated bytes",
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
&pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
true,
},
{"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
{"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
{"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
{"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
{"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
{
"message with group",
&pb.MyMessage{
Count: Int32(1),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: Int32(5),
},
},
&pb.MyMessage{
Count: Int32(1),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: Int32(5),
},
},
true,
},
{
"map same",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
true,
},
{
"map different entry",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
false,
},
{
"map different key only",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
false,
},
{
"map different value only",
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
&pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
false,
},
}
func TestEqual(t *testing.T) {
for _, tc := range EqualTests {
if res := Equal(tc.a, tc.b); res != tc.exp {
t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
}
}
}

View File

@ -0,0 +1,353 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base extendableProto, id int32, b []byte) {
base.ExtensionMap()[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
// Check the extended type.
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
func encodeExtensionMap(m map[int32]Extension) error {
for k, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
p := NewBuffer(nil)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
if err := props.enc(p, props, toStructPointer(x)); err != nil {
return err
}
e.enc = p.buf
m[k] = e
}
return nil
}
func sizeExtensionMap(m map[int32]Extension) (n int) {
for _, e := range m {
if e.value == nil || e.desc == nil {
// Extension is only in its encoded form.
n += len(e.enc)
continue
}
// We don't skip extensions that have an encoded form set,
// because the extension value may have been mutated after
// the last time this function was called.
et := reflect.TypeOf(e.desc.ExtensionType)
props := extensionProperties(e.desc)
// If e.value has type T, the encoder expects a *struct{ X T }.
// Pass a *T with a zero field and hope it all works out.
x := reflect.New(et)
x.Elem().Set(reflect.ValueOf(e.value))
n += props.size(props, toStructPointer(x))
}
return
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
_, ok := pb.ExtensionMap()[extension.Field]
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
// TODO: Check types, field numbers, etc.?
delete(pb.ExtensionMap(), extension.Field)
}
// GetExtension parses and returns the given extension of pb.
// If the extension is not present it returns ErrMissingExtension.
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
if err := checkExtensionTypes(pb, extension); err != nil {
return nil, err
}
emap := pb.ExtensionMap()
e, ok := emap[extension.Field]
if !ok {
return nil, ErrMissingExtension
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
o := NewBuffer(b)
t := reflect.TypeOf(extension.ExtensionType)
rep := extension.repeated()
props := extensionProperties(extension)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate a "field" to store the pointer/slice itself; the
// pointer/slice will be stored here. We pass
// the address of this field to props.dec.
// This passes a zero field and a *t and lets props.dec
// interpret it as a *struct{ x t }.
value := reflect.New(t).Elem()
for {
// Discard wire type and field number varint. It isn't needed.
if _, err := o.DecodeVarint(); err != nil {
return nil, err
}
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
return nil, err
}
if !rep || o.index >= len(o.buf) {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, ok := pb.(extendableProto)
if !ok {
err = errors.New("proto: not an extendable proto")
return
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
if err := checkExtensionTypes(pb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}

View File

@ -0,0 +1,137 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
pb "./testdata"
"github.com/golang/protobuf/proto"
)
func TestGetExtensionsWithMissingExtensions(t *testing.T) {
msg := &pb.MyMessage{}
ext1 := &pb.Ext{}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
t.Fatalf("Could not set ext1: %s", ext1)
}
exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
pb.E_Ext_More,
pb.E_Ext_Text,
})
if err != nil {
t.Fatalf("GetExtensions() failed: %s", err)
}
if exts[0] != ext1 {
t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
}
if exts[1] != nil {
t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
}
}
func TestGetExtensionStability(t *testing.T) {
check := func(m *pb.MyMessage) bool {
ext1, err := proto.GetExtension(m, pb.E_Ext_More)
if err != nil {
t.Fatalf("GetExtension() failed: %s", err)
}
ext2, err := proto.GetExtension(m, pb.E_Ext_More)
if err != nil {
t.Fatalf("GetExtension() failed: %s", err)
}
return ext1 == ext2
}
msg := &pb.MyMessage{Count: proto.Int32(4)}
ext0 := &pb.Ext{}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
t.Fatalf("Could not set ext1: %s", ext0)
}
if !check(msg) {
t.Errorf("GetExtension() not stable before marshaling")
}
bb, err := proto.Marshal(msg)
if err != nil {
t.Fatalf("Marshal() failed: %s", err)
}
msg1 := &pb.MyMessage{}
err = proto.Unmarshal(bb, msg1)
if err != nil {
t.Fatalf("Unmarshal() failed: %s", err)
}
if !check(msg1) {
t.Errorf("GetExtension() not stable after unmarshaling")
}
}
func TestExtensionsRoundTrip(t *testing.T) {
msg := &pb.MyMessage{}
ext1 := &pb.Ext{
Data: proto.String("hi"),
}
ext2 := &pb.Ext{
Data: proto.String("there"),
}
exists := proto.HasExtension(msg, pb.E_Ext_More)
if exists {
t.Error("Extension More present unexpectedly")
}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
t.Error(err)
}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
t.Error(err)
}
e, err := proto.GetExtension(msg, pb.E_Ext_More)
if err != nil {
t.Error(err)
}
x, ok := e.(*pb.Ext)
if !ok {
t.Errorf("e has type %T, expected testdata.Ext", e)
} else if *x.Data != "there" {
t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
}
proto.ClearExtension(msg, pb.E_Ext_More)
if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
t.Errorf("got %v, expected ErrMissingExtension", e)
}
if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
t.Error("expected bad extension error, got nil")
}
if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
t.Error("expected extension err")
}
if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
t.Error("expected some sort of type mismatch error, got nil")
}
}

View File

@ -0,0 +1,751 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
Helpers for getting values are superseded by the
GetFoo methods and their use is deprecated.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed with the enum's type name. Enum types have
a String method, and a Enum method to assist in message construction.
- Nested groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Marshal and Unmarshal are functions to encode and decode the wire format.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; };
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
}
The resulting file, test.pb.go, is:
package example
import "github.com/golang/protobuf/proto"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *Test) Reset() { *this = Test{} }
func (this *Test) String() string { return proto.CompactTextString(this) }
const Default_Test_Type int32 = 77
func (this *Test) GetLabel() string {
if this != nil && this.Label != nil {
return *this.Label
}
return ""
}
func (this *Test) GetType() int32 {
if this != nil && this.Type != nil {
return *this.Type
}
return Default_Test_Type
}
func (this *Test) GetOptionalgroup() *Test_OptionalGroup {
if this != nil {
return this.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} }
func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) }
func (this *Test_OptionalGroup) GetRequiredField() string {
if this != nil && this.RequiredField != nil {
return *this.RequiredField
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
"./example.pb"
)
func main() {
test := &example.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Optionalgroup: &example.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := new(example.Test)
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// etc.
}
*/
package proto
import (
"encoding/json"
"fmt"
"log"
"reflect"
"strconv"
"sync"
)
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // write point
// pools of basic types to amortize allocation.
bools []bool
uint32s []uint32
uint64s []uint64
// extra pools, only used with pointer_reflect.go
int32s []int32
int64s []int64
float32s []float32
float64s []float64
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (o *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := o.buf
index := o.index
o.buf = b
o.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := o.index
if index == len(o.buf) {
break
}
op, err := o.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = o.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = o.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = o.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
break
case WireVarint:
u, err = o.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
if err != nil {
fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
if err != nil {
fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth)
}
fmt.Printf("\n")
o.buf = obuf
o.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
if f.IsNil() {
continue
}
// f is *T or []*T
if f.Kind() == reflect.Ptr {
setDefaults(f, recur, zeros)
} else {
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
func ptrToStruct(t reflect.Type) bool {
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
// nested messages
if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) {
dm.nested = append(dm.nested, fi)
continue
}
sf := scalarField{
index: fi,
kind: ft.Elem().Kind(),
}
// scalar fields without defaults
if !prop.HasDefault {
dm.scalars = append(dm.scalars, sf)
continue
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
log.Printf("proto: bad default bool %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
continue
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
continue
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
continue
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
continue
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
continue
}
sf.value = x
default:
log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
continue
}
dm.scalars = append(dm.scalars, sf)
}
return dm
}
// Map fields may have key types of non-float scalars, strings and enums.
// The easiest way to sort them in some deterministic order is to use fmt.
// If this turns out to be inefficient we can always consider other options,
// such as doing a Schwartzian transform.
type mapKeys []reflect.Value
func (s mapKeys) Len() int { return len(s) }
func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s mapKeys) Less(i, j int) bool {
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
}

View File

@ -0,0 +1,287 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
)
// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and MessageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
//
// When a proto1 proto has a field that looks like:
// optional message<MessageSet> info = 3;
// the protocol compiler produces a field in the generated struct that looks like:
// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
// The package is automatically inserted so there is no need for that proto file to
// import this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type MessageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure MessageSet is a Message.
var _ Message = (*MessageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *MessageSet) Has(pb Message) bool {
if ms.find(pb) != nil {
return true
}
return false
}
func (ms *MessageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return ErrNoMessageTypeId
}
return nil // TODO: return error instead?
}
func (ms *MessageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return ErrNoMessageTypeId
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *MessageSet) Reset() { *ms = MessageSet{} }
func (ms *MessageSet) String() string { return CompactTextString(ms) }
func (*MessageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
if err := encodeExtensionMap(m); err != nil {
return nil, err
}
// Sort extension IDs to provide a deterministic encoding.
// See also enc_map in encode.go.
ids := make([]int, 0, len(m))
for id := range m {
ids = append(ids, int(id))
}
sort.Ints(ids)
ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
for _, id := range ids {
e := m[int32(id)]
// Remove the wire type and field number varint, as well as the length varint.
msg := skipVarint(skipVarint(e.enc))
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: Int32(int32(id)),
Message: msg,
})
}
return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
ms := new(MessageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
if i > 0 {
b.WriteByte(',')
}
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -0,0 +1,66 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"bytes"
"testing"
)
func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
// Check that a repeated message set entry will be concatenated.
in := &MessageSet{
Item: []*_MessageSet_Item{
{TypeId: Int32(12345), Message: []byte("hoo")},
{TypeId: Int32(12345), Message: []byte("hah")},
},
}
b, err := Marshal(in)
if err != nil {
t.Fatalf("Marshal: %v", err)
}
t.Logf("Marshaled bytes: %q", b)
m := make(map[int32]Extension)
if err := UnmarshalMessageSet(b, m); err != nil {
t.Fatalf("UnmarshalMessageSet: %v", err)
}
ext, ok := m[12345]
if !ok {
t.Fatalf("Didn't retrieve extension 12345; map is %v", m)
}
// Skip wire type/field number and length varints.
got := skipVarint(skipVarint(ext.enc))
if want := []byte("hoohah"); !bytes.Equal(got, want) {
t.Errorf("Combined extension is %q, want %q", got, want)
}
}

View File

@ -0,0 +1,479 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build appengine,!appenginevm
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"math"
"reflect"
)
// A structPointer is a pointer to a struct.
type structPointer struct {
v reflect.Value
}
// toStructPointer returns a structPointer equivalent to the given reflect value.
// The reflect value must itself be a pointer to a struct.
func toStructPointer(v reflect.Value) structPointer {
return structPointer{v}
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p.v.IsNil()
}
// Interface returns the struct pointer as an interface value.
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
return p.v.Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// field returns the given field in the struct as a reflect value.
func structPointer_field(p structPointer, f field) reflect.Value {
// Special case: an extension map entry with a value of type T
// passes a *T to the struct-handling code with a zero field,
// expecting that it will be treated as equivalent to *struct{ X T },
// which has the same memory layout. We have to handle that case
// specially, because reflect will panic if we call FieldByIndex on a
// non-struct.
if f == nil {
return p.v.Elem()
}
return p.v.Elem().FieldByIndex(f)
}
// ifield returns the given field in the struct as an interface value.
func structPointer_ifield(p structPointer, f field) interface{} {
return structPointer_field(p, f).Addr().Interface()
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return structPointer_ifield(p, f).(*[]byte)
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return structPointer_ifield(p, f).(*[][]byte)
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return structPointer_ifield(p, f).(**bool)
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return structPointer_ifield(p, f).(*bool)
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return structPointer_ifield(p, f).(*[]bool)
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return structPointer_ifield(p, f).(**string)
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return structPointer_ifield(p, f).(*string)
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return structPointer_ifield(p, f).(*[]string)
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return structPointer_ifield(p, f).(*map[int32]Extension)
}
// Map returns the reflect.Value for the address of a map field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
return structPointer_field(p, f).Addr()
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
structPointer_field(p, f).Set(q.v)
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return structPointer{structPointer_field(p, f)}
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
return structPointerSlice{structPointer_field(p, f)}
}
// A structPointerSlice represents the address of a slice of pointers to structs
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
type structPointerSlice struct {
v reflect.Value
}
func (p structPointerSlice) Len() int { return p.v.Len() }
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
func (p structPointerSlice) Append(q structPointer) {
p.v.Set(reflect.Append(p.v, q.v))
}
var (
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
float32Type = reflect.TypeOf(float32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
)
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
type word32 struct {
v reflect.Value
}
// IsNil reports whether p is nil.
func word32_IsNil(p word32) bool {
return p.v.IsNil()
}
// Set sets p to point at a newly allocated word with bits set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
t := p.v.Type().Elem()
switch t {
case int32Type:
if len(o.int32s) == 0 {
o.int32s = make([]int32, uint32PoolSize)
}
o.int32s[0] = int32(x)
p.v.Set(reflect.ValueOf(&o.int32s[0]))
o.int32s = o.int32s[1:]
return
case uint32Type:
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
o.uint32s = o.uint32s[1:]
return
case float32Type:
if len(o.float32s) == 0 {
o.float32s = make([]float32, uint32PoolSize)
}
o.float32s[0] = math.Float32frombits(x)
p.v.Set(reflect.ValueOf(&o.float32s[0]))
o.float32s = o.float32s[1:]
return
}
// must be enum
p.v.Set(reflect.New(t))
p.v.Elem().SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32_Get(p word32) uint32 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32{structPointer_field(p, f)}
}
// A word32Val represents a field of type int32, uint32, float32, or enum.
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
type word32Val struct {
v reflect.Value
}
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
switch p.v.Type() {
case int32Type:
p.v.SetInt(int64(x))
return
case uint32Type:
p.v.SetUint(uint64(x))
return
case float32Type:
p.v.SetFloat(float64(math.Float32frombits(x)))
return
}
// must be enum
p.v.SetInt(int64(int32(x)))
}
// Get gets the bits pointed at by p, as a uint32.
func word32Val_Get(p word32Val) uint32 {
elem := p.v
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val{structPointer_field(p, f)}
}
// A word32Slice is a slice of 32-bit values.
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
type word32Slice struct {
v reflect.Value
}
func (p word32Slice) Append(x uint32) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int32:
elem.SetInt(int64(int32(x)))
case reflect.Uint32:
elem.SetUint(uint64(x))
case reflect.Float32:
elem.SetFloat(float64(math.Float32frombits(x)))
}
}
func (p word32Slice) Len() int {
return p.v.Len()
}
func (p word32Slice) Index(i int) uint32 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int32:
return uint32(elem.Int())
case reflect.Uint32:
return uint32(elem.Uint())
case reflect.Float32:
return math.Float32bits(float32(elem.Float()))
}
panic("unreachable")
}
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
return word32Slice{structPointer_field(p, f)}
}
// word64 is like word32 but for 64-bit values.
type word64 struct {
v reflect.Value
}
func word64_Set(p word64, o *Buffer, x uint64) {
t := p.v.Type().Elem()
switch t {
case int64Type:
if len(o.int64s) == 0 {
o.int64s = make([]int64, uint64PoolSize)
}
o.int64s[0] = int64(x)
p.v.Set(reflect.ValueOf(&o.int64s[0]))
o.int64s = o.int64s[1:]
return
case uint64Type:
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
o.uint64s = o.uint64s[1:]
return
case float64Type:
if len(o.float64s) == 0 {
o.float64s = make([]float64, uint64PoolSize)
}
o.float64s[0] = math.Float64frombits(x)
p.v.Set(reflect.ValueOf(&o.float64s[0]))
o.float64s = o.float64s[1:]
return
}
panic("unreachable")
}
func word64_IsNil(p word64) bool {
return p.v.IsNil()
}
func word64_Get(p word64) uint64 {
elem := p.v.Elem()
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64{structPointer_field(p, f)}
}
// word64Val is like word32Val but for 64-bit values.
type word64Val struct {
v reflect.Value
}
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
switch p.v.Type() {
case int64Type:
p.v.SetInt(int64(x))
return
case uint64Type:
p.v.SetUint(x)
return
case float64Type:
p.v.SetFloat(math.Float64frombits(x))
return
}
panic("unreachable")
}
func word64Val_Get(p word64Val) uint64 {
elem := p.v
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return elem.Uint()
case reflect.Float64:
return math.Float64bits(elem.Float())
}
panic("unreachable")
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val{structPointer_field(p, f)}
}
type word64Slice struct {
v reflect.Value
}
func (p word64Slice) Append(x uint64) {
n, m := p.v.Len(), p.v.Cap()
if n < m {
p.v.SetLen(n + 1)
} else {
t := p.v.Type().Elem()
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
}
elem := p.v.Index(n)
switch elem.Kind() {
case reflect.Int64:
elem.SetInt(int64(int64(x)))
case reflect.Uint64:
elem.SetUint(uint64(x))
case reflect.Float64:
elem.SetFloat(float64(math.Float64frombits(x)))
}
}
func (p word64Slice) Len() int {
return p.v.Len()
}
func (p word64Slice) Index(i int) uint64 {
elem := p.v.Index(i)
switch elem.Kind() {
case reflect.Int64:
return uint64(elem.Int())
case reflect.Uint64:
return uint64(elem.Uint())
case reflect.Float64:
return math.Float64bits(float64(elem.Float()))
}
panic("unreachable")
}
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
return word64Slice{structPointer_field(p, f)}
}

View File

@ -0,0 +1,266 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !appengine appenginevm
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"unsafe"
)
// NOTE: These type_Foo functions would more idiomatically be methods,
// but Go does not allow methods on pointer types, and we must preserve
// some pointer type for the garbage collector. We use these
// funcs with clunky names as our poor approximation to methods.
//
// An alternative would be
// type structPointer struct { p unsafe.Pointer }
// but that does not registerize as well.
// A structPointer is a pointer to a struct.
type structPointer unsafe.Pointer
// toStructPointer returns a structPointer equivalent to the given reflect value.
func toStructPointer(v reflect.Value) structPointer {
return structPointer(unsafe.Pointer(v.Pointer()))
}
// IsNil reports whether p is nil.
func structPointer_IsNil(p structPointer) bool {
return p == nil
}
// Interface returns the struct pointer, assumed to have element type t,
// as an interface value.
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
}
// A field identifies a field in a struct, accessible from a structPointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != ^field(0)
}
// Bytes returns the address of a []byte field in the struct.
func structPointer_Bytes(p structPointer, f field) *[]byte {
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BytesSlice returns the address of a [][]byte field in the struct.
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Bool returns the address of a *bool field in the struct.
func structPointer_Bool(p structPointer, f field) **bool {
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolVal returns the address of a bool field in the struct.
func structPointer_BoolVal(p structPointer, f field) *bool {
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// BoolSlice returns the address of a []bool field in the struct.
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// String returns the address of a *string field in the struct.
func structPointer_String(p structPointer, f field) **string {
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringVal returns the address of a string field in the struct.
func structPointer_StringVal(p structPointer, f field) *string {
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StringSlice returns the address of a []string field in the struct.
func structPointer_StringSlice(p structPointer, f field) *[]string {
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// ExtMap returns the address of an extension map field in the struct.
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// Map returns the reflect.Value for the address of a map field in the struct.
func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
}
// SetStructPointer writes a *struct field in the struct.
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
}
// GetStructPointer reads a *struct field in the struct.
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// StructPointerSlice the address of a []*struct field in the struct.
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
type structPointerSlice []structPointer
func (v *structPointerSlice) Len() int { return len(*v) }
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
// A word32 is the address of a "pointer to 32-bit value" field.
type word32 **uint32
// IsNil reports whether *v is nil.
func word32_IsNil(p word32) bool {
return *p == nil
}
// Set sets *v to point at a newly allocated word set to x.
func word32_Set(p word32, o *Buffer, x uint32) {
if len(o.uint32s) == 0 {
o.uint32s = make([]uint32, uint32PoolSize)
}
o.uint32s[0] = x
*p = &o.uint32s[0]
o.uint32s = o.uint32s[1:]
}
// Get gets the value pointed at by *v.
func word32_Get(p word32) uint32 {
return **p
}
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32(p structPointer, f field) word32 {
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Val is the address of a 32-bit value field.
type word32Val *uint32
// Set sets *p to x.
func word32Val_Set(p word32Val, x uint32) {
*p = x
}
// Get gets the value pointed at by p.
func word32Val_Get(p word32Val) uint32 {
return *p
}
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
func structPointer_Word32Val(p structPointer, f field) word32Val {
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// A word32Slice is a slice of 32-bit values.
type word32Slice []uint32
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
func (v *word32Slice) Len() int { return len(*v) }
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}
// word64 is like word32 but for 64-bit values.
type word64 **uint64
func word64_Set(p word64, o *Buffer, x uint64) {
if len(o.uint64s) == 0 {
o.uint64s = make([]uint64, uint64PoolSize)
}
o.uint64s[0] = x
*p = &o.uint64s[0]
o.uint64s = o.uint64s[1:]
}
func word64_IsNil(p word64) bool {
return *p == nil
}
func word64_Get(p word64) uint64 {
return **p
}
func structPointer_Word64(p structPointer, f field) word64 {
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Val is like word32Val but for 64-bit values.
type word64Val *uint64
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
*p = x
}
func word64Val_Get(p word64Val) uint64 {
return *p
}
func structPointer_Word64Val(p structPointer, f field) word64Val {
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
}
// word64Slice is like word32Slice but for 64-bit values.
type word64Slice []uint64
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
func (v *word64Slice) Len() int { return len(*v) }
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
}

View File

@ -0,0 +1,724 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
const startSize = 10 // initial slice/string sizes
// Encoders are defined in encode.go
// An encoder outputs the full representation of a field, including its
// tag and encoder type.
type encoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueEncoder encodes a single integer in a particular encoding.
type valueEncoder func(o *Buffer, x uint64) error
// Sizers are defined in encode.go
// A sizer returns the encoded size of a field, including its tag and encoder
// type.
type sizer func(prop *Properties, base structPointer) int
// A valueSizer returns the encoded size of a single integer in a particular
// encoding.
type valueSizer func(x uint64) int
// Decoders are defined in decode.go
// A decoder creates a value from its wire representation.
// Unrecognized subelements are saved in unrec.
type decoder func(p *Buffer, prop *Properties, base structPointer) error
// A valueDecoder decodes a single integer in a particular encoding.
type valueDecoder func(o *Buffer) (x uint64, err error)
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
unrecField field // field id of the XXX_unrecognized []byte field
extendable bool // is this an extendable proto
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
Default string // default value
HasDefault bool // whether an explicit default was provided
def_uint64 uint64
enc encoder
valEnc valueEncoder // set for bool and numeric types only
field field
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
tagbuf [8]byte
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
isMarshaler bool
isUnmarshaler bool
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
size sizer
valSize valueSizer // set for bool and numeric types only
dec decoder
valDec valueDecoder // set for bool and numeric types only
// If this is a packable field, this will be the decoder for the packed version of the field.
packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s = ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
if p.OrigName != p.Name {
s += ",name=" + p.OrigName
}
if p.proto3 {
s += ",proto3"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeVarint
p.valDec = (*Buffer).DecodeVarint
p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
p.valEnc = (*Buffer).EncodeFixed32
p.valDec = (*Buffer).DecodeFixed32
p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
p.valEnc = (*Buffer).EncodeFixed64
p.valDec = (*Buffer).DecodeFixed64
p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag32
p.valDec = (*Buffer).DecodeZigzag32
p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
p.valEnc = (*Buffer).EncodeZigzag64
p.valDec = (*Buffer).DecodeZigzag64
p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break
}
}
}
}
func logNoSliceEnc(t1, t2 reflect.Type) {
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// Initialize the fields for encoding and decoding.
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
p.enc = nil
p.dec = nil
p.size = nil
switch t1 := typ; t1.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
// proto3 scalar types
case reflect.Bool:
p.enc = (*Buffer).enc_proto3_bool
p.dec = (*Buffer).dec_proto3_bool
p.size = size_proto3_bool
case reflect.Int32:
p.enc = (*Buffer).enc_proto3_int32
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_proto3_uint32
p.dec = (*Buffer).dec_proto3_int32 // can reuse
p.size = size_proto3_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_proto3_int64
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.Float32:
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int32
p.size = size_proto3_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
p.dec = (*Buffer).dec_proto3_int64
p.size = size_proto3_int64
case reflect.String:
p.enc = (*Buffer).enc_proto3_string
p.dec = (*Buffer).dec_proto3_string
p.size = size_proto3_string
case reflect.Ptr:
switch t2 := t1.Elem(); t2.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
break
case reflect.Bool:
p.enc = (*Buffer).enc_bool
p.dec = (*Buffer).dec_bool
p.size = size_bool
case reflect.Int32:
p.enc = (*Buffer).enc_int32
p.dec = (*Buffer).dec_int32
p.size = size_int32
case reflect.Uint32:
p.enc = (*Buffer).enc_uint32
p.dec = (*Buffer).dec_int32 // can reuse
p.size = size_uint32
case reflect.Int64, reflect.Uint64:
p.enc = (*Buffer).enc_int64
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.Float32:
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
p.dec = (*Buffer).dec_int32
p.size = size_uint32
case reflect.Float64:
p.enc = (*Buffer).enc_int64 // can just treat them as bits
p.dec = (*Buffer).dec_int64
p.size = size_int64
case reflect.String:
p.enc = (*Buffer).enc_string
p.dec = (*Buffer).dec_string
p.size = size_string
case reflect.Struct:
p.stype = t1.Elem()
p.isMarshaler = isMarshaler(t1)
p.isUnmarshaler = isUnmarshaler(t1)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_struct_message
p.dec = (*Buffer).dec_struct_message
p.size = size_struct_message
} else {
p.enc = (*Buffer).enc_struct_group
p.dec = (*Buffer).dec_struct_group
p.size = size_struct_group
}
}
case reflect.Slice:
switch t2 := t1.Elem(); t2.Kind() {
default:
logNoSliceEnc(t1, t2)
break
case reflect.Bool:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_bool
p.size = size_slice_packed_bool
} else {
p.enc = (*Buffer).enc_slice_bool
p.size = size_slice_bool
}
p.dec = (*Buffer).dec_slice_bool
p.packedDec = (*Buffer).dec_slice_packed_bool
case reflect.Int32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int32
p.size = size_slice_packed_int32
} else {
p.enc = (*Buffer).enc_slice_int32
p.size = size_slice_int32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Uint32:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case reflect.Int64, reflect.Uint64:
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_byte
p.dec = (*Buffer).dec_slice_byte
p.size = size_slice_byte
if p.proto3 {
p.enc = (*Buffer).enc_proto3_slice_byte
p.size = size_proto3_slice_byte
}
case reflect.Float32, reflect.Float64:
switch t2.Bits() {
case 32:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_uint32
p.size = size_slice_packed_uint32
} else {
p.enc = (*Buffer).enc_slice_uint32
p.size = size_slice_uint32
}
p.dec = (*Buffer).dec_slice_int32
p.packedDec = (*Buffer).dec_slice_packed_int32
case 64:
// can just treat them as bits
if p.Packed {
p.enc = (*Buffer).enc_slice_packed_int64
p.size = size_slice_packed_int64
} else {
p.enc = (*Buffer).enc_slice_int64
p.size = size_slice_int64
}
p.dec = (*Buffer).dec_slice_int64
p.packedDec = (*Buffer).dec_slice_packed_int64
default:
logNoSliceEnc(t1, t2)
break
}
case reflect.String:
p.enc = (*Buffer).enc_slice_string
p.dec = (*Buffer).dec_slice_string
p.size = size_slice_string
case reflect.Ptr:
switch t3 := t2.Elem(); t3.Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
break
case reflect.Struct:
p.stype = t2.Elem()
p.isMarshaler = isMarshaler(t2)
p.isUnmarshaler = isUnmarshaler(t2)
if p.Wire == "bytes" {
p.enc = (*Buffer).enc_slice_struct_message
p.dec = (*Buffer).dec_slice_struct_message
p.size = size_slice_struct_message
} else {
p.enc = (*Buffer).enc_slice_struct_group
p.dec = (*Buffer).dec_slice_struct_group
p.size = size_slice_struct_group
}
}
case reflect.Slice:
switch t2.Elem().Kind() {
default:
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
break
case reflect.Uint8:
p.enc = (*Buffer).enc_slice_slice_byte
p.dec = (*Buffer).dec_slice_slice_byte
p.size = size_slice_slice_byte
}
}
case reflect.Map:
p.enc = (*Buffer).enc_new_map
p.dec = (*Buffer).dec_new_map
p.size = size_new_map
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
// precalculate tag code
wire := p.WireType
if p.Packed {
wire = WireBytes
}
x := uint32(p.Tag)<<3 | uint32(wire)
i := 0
for i = 0; x > 127; i++ {
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
x >>= 7
}
p.tagbuf[i] = uint8(x)
p.tagcode = p.tagbuf[0 : i+1]
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
)
// isMarshaler reports whether type t implements Marshaler.
func isMarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isMarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isMarshaler")
}
return t.Implements(marshalerType)
}
// isUnmarshaler reports whether type t implements Unmarshaler.
func isUnmarshaler(t reflect.Type) bool {
// We're checking for (likely) pointer-receiver methods
// so if t is not a pointer, something is very wrong.
// The calls above only invoke isUnmarshaler on pointer types.
if t.Kind() != reflect.Ptr {
panic("proto: misuse of isUnmarshaler")
}
return t.Implements(unmarshalerType)
}
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if f != nil {
p.field = toField(f)
}
if tag == "" {
return
}
p.Parse(tag)
p.setEncAndDec(typ, f, lockGetProp)
}
var (
mutex sync.Mutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
mutex.Lock()
sprop := getPropertiesLocked(t)
mutex.Unlock()
return sprop
}
// getPropertiesLocked requires that mutex is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
if f.Name == "XXX_extensions" { // special case
p.enc = (*Buffer).enc_map
p.dec = nil // not needed
p.size = size_map
}
if f.Name == "XXX_unrecognized" { // special case
prop.unrecField = toField(&f)
}
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") {
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
}
}
// Re-order prop.order.
sort.Sort(prop)
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// Return the Properties object for the x[0]'th field of the structure.
func propByIndex(t reflect.Type, x []int) *Properties {
if len(x) != 1 {
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
return nil
}
prop := GetProperties(t)
return prop.Prop[x[0]]
}
// Get the address and type of a pointer to a struct from an interface.
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
if pb == nil {
err = ErrNil
return
}
// get the reflect type of the pointer to the struct.
t = reflect.TypeOf(pb)
// get the address of the struct.
value := reflect.ValueOf(pb)
b = toStructPointer(value)
return
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}

View File

@ -0,0 +1,44 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2014 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include ../../Make.protobuf
all: regenerate
regenerate:
rm -f proto3.pb.go
make proto3.pb.go
# The following rules are just aids to development. Not needed for typical testing.
diff: regenerate
git diff proto3.pb.go

View File

@ -0,0 +1,58 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package proto3_proto;
message Message {
enum Humour {
UNKNOWN = 0;
PUNS = 1;
SLAPSTICK = 2;
BILL_BAILEY = 3;
}
string name = 1;
Humour hilarity = 2;
uint32 height_in_cm = 3;
bytes data = 4;
int64 result_count = 7;
bool true_scotsman = 8;
float score = 9;
repeated uint64 key = 5;
Nested nested = 6;
}
message Nested {
string bunny = 1;
}

View File

@ -0,0 +1,93 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2014 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"testing"
pb "./proto3_proto"
"github.com/golang/protobuf/proto"
)
func TestProto3ZeroValues(t *testing.T) {
tests := []struct {
desc string
m proto.Message
}{
{"zero message", &pb.Message{}},
{"empty bytes field", &pb.Message{Data: []byte{}}},
}
for _, test := range tests {
b, err := proto.Marshal(test.m)
if err != nil {
t.Errorf("%s: proto.Marshal: %v", test.desc, err)
continue
}
if len(b) > 0 {
t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
}
}
}
func TestRoundTripProto3(t *testing.T) {
m := &pb.Message{
Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
Key: []uint64{1, 0xdeadbeef},
Nested: &pb.Nested{
Bunny: "Monty",
},
}
t.Logf(" m: %v", m)
b, err := proto.Marshal(m)
if err != nil {
t.Fatalf("proto.Marshal: %v", err)
}
t.Logf(" b: %q", b)
m2 := new(pb.Message)
if err := proto.Unmarshal(b, m2); err != nil {
t.Fatalf("proto.Unmarshal: %v", err)
}
t.Logf("m2: %v", m2)
if !proto.Equal(m, m2) {
t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
}
}

View File

@ -0,0 +1,63 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"testing"
)
// This is a separate file and package from size_test.go because that one uses
// generated messages and thus may not be in package proto without having a circular
// dependency, whereas this file tests unexported details of size.go.
func TestVarintSize(t *testing.T) {
// Check the edge cases carefully.
testCases := []struct {
n uint64
size int
}{
{0, 1},
{1, 1},
{127, 1},
{128, 2},
{16383, 2},
{16384, 3},
{1<<63 - 1, 9},
{1 << 63, 10},
}
for _, tc := range testCases {
size := sizeVarint(tc.n)
if size != tc.size {
t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
}
}
}

View File

@ -0,0 +1,135 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"log"
"testing"
proto3pb "./proto3_proto"
pb "./testdata"
. "github.com/golang/protobuf/proto"
)
var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
// messageWithExtension2 is in equal_test.go.
var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
func init() {
if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
log.Panicf("SetExtension: %v", err)
}
if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
log.Panicf("SetExtension: %v", err)
}
// Force messageWithExtension3 to have the extension encoded.
Marshal(messageWithExtension3)
}
var SizeTests = []struct {
desc string
pb Message
}{
{"empty", &pb.OtherMessage{}},
// Basic types.
{"bool", &pb.Defaults{F_Bool: Bool(true)}},
{"int32", &pb.Defaults{F_Int32: Int32(12)}},
{"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
{"small int64", &pb.Defaults{F_Int64: Int64(1)}},
{"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
{"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
{"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
{"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
{"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
{"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
{"float", &pb.Defaults{F_Float: Float32(12.6)}},
{"double", &pb.Defaults{F_Double: Float64(13.9)}},
{"string", &pb.Defaults{F_String: String("niles")}},
{"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
{"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
{"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
{"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
{"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
// Repeated.
{"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
{"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
{"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
{"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
{"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
{"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
// Need enough large numbers to verify that the header is counting the number of bytes
// for the field, not the number of elements.
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
}}},
{"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
{"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
// Nested.
{"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
{"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
// Other things.
{"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
{"extension (unencoded)", messageWithExtension1},
{"extension (encoded)", messageWithExtension3},
// proto3 message
{"proto3 empty", &proto3pb.Message{}},
{"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
{"proto3 int64", &proto3pb.Message{ResultCount: 1}},
{"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
{"proto3 float", &proto3pb.Message{Score: 12.6}},
{"proto3 string", &proto3pb.Message{Name: "Snezana"}},
{"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
{"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
{"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
{"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
{"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
{"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
}
func TestSize(t *testing.T) {
for _, tc := range SizeTests {
size := Size(tc.pb)
b, err := Marshal(tc.pb)
if err != nil {
t.Errorf("%v: Marshal failed: %v", tc.desc, err)
continue
}
if size != len(b) {
t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
t.Logf("%v: bytes: %#v", tc.desc, b)
}
}
}

View File

@ -0,0 +1,50 @@
# Go support for Protocol Buffers - Google's data interchange format
#
# Copyright 2010 The Go Authors. All rights reserved.
# https://github.com/golang/protobuf
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include ../../Make.protobuf
all: regenerate
regenerate:
rm -f test.pb.go
make test.pb.go
# The following rules are just aids to development. Not needed for typical testing.
diff: regenerate
git diff test.pb.go
restore:
cp test.pb.go.golden test.pb.go
preserve:
cp test.pb.go test.pb.go.golden

View File

@ -0,0 +1,86 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Verify that the compiler output for test.proto is unchanged.
package testdata
import (
"crypto/sha1"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
)
// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
func sum(t *testing.T, name string) string {
data, err := ioutil.ReadFile(name)
if err != nil {
t.Fatal(err)
}
t.Logf("sum(%q): length is %d", name, len(data))
hash := sha1.New()
_, err = hash.Write(data)
if err != nil {
t.Fatal(err)
}
return fmt.Sprintf("% x", hash.Sum(nil))
}
func run(t *testing.T, name string, args ...string) {
cmd := exec.Command(name, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
t.Fatal(err)
}
}
func TestGolden(t *testing.T) {
// Compute the original checksum.
goldenSum := sum(t, "test.pb.go")
// Run the proto compiler.
run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
newFile := filepath.Join(os.TempDir(), "test.pb.go")
defer os.Remove(newFile)
// Compute the new checksum.
newSum := sum(t, newFile)
// Verify
if newSum != goldenSum {
run(t, "diff", "-u", "test.pb.go", newFile)
t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,434 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// A feature-rich test file for the protocol compiler and libraries.
syntax = "proto2";
package testdata;
enum FOO { FOO1 = 1; };
message GoEnum {
required FOO foo = 1;
}
message GoTestField {
required string Label = 1;
required string Type = 2;
}
message GoTest {
// An enum, for completeness.
enum KIND {
VOID = 0;
// Basic types
BOOL = 1;
BYTES = 2;
FINGERPRINT = 3;
FLOAT = 4;
INT = 5;
STRING = 6;
TIME = 7;
// Groupings
TUPLE = 8;
ARRAY = 9;
MAP = 10;
// Table types
TABLE = 11;
// Functions
FUNCTION = 12; // last tag
};
// Some typical parameters
required KIND Kind = 1;
optional string Table = 2;
optional int32 Param = 3;
// Required, repeated and optional foreign fields.
required GoTestField RequiredField = 4;
repeated GoTestField RepeatedField = 5;
optional GoTestField OptionalField = 6;
// Required fields of all basic types
required bool F_Bool_required = 10;
required int32 F_Int32_required = 11;
required int64 F_Int64_required = 12;
required fixed32 F_Fixed32_required = 13;
required fixed64 F_Fixed64_required = 14;
required uint32 F_Uint32_required = 15;
required uint64 F_Uint64_required = 16;
required float F_Float_required = 17;
required double F_Double_required = 18;
required string F_String_required = 19;
required bytes F_Bytes_required = 101;
required sint32 F_Sint32_required = 102;
required sint64 F_Sint64_required = 103;
// Repeated fields of all basic types
repeated bool F_Bool_repeated = 20;
repeated int32 F_Int32_repeated = 21;
repeated int64 F_Int64_repeated = 22;
repeated fixed32 F_Fixed32_repeated = 23;
repeated fixed64 F_Fixed64_repeated = 24;
repeated uint32 F_Uint32_repeated = 25;
repeated uint64 F_Uint64_repeated = 26;
repeated float F_Float_repeated = 27;
repeated double F_Double_repeated = 28;
repeated string F_String_repeated = 29;
repeated bytes F_Bytes_repeated = 201;
repeated sint32 F_Sint32_repeated = 202;
repeated sint64 F_Sint64_repeated = 203;
// Optional fields of all basic types
optional bool F_Bool_optional = 30;
optional int32 F_Int32_optional = 31;
optional int64 F_Int64_optional = 32;
optional fixed32 F_Fixed32_optional = 33;
optional fixed64 F_Fixed64_optional = 34;
optional uint32 F_Uint32_optional = 35;
optional uint64 F_Uint64_optional = 36;
optional float F_Float_optional = 37;
optional double F_Double_optional = 38;
optional string F_String_optional = 39;
optional bytes F_Bytes_optional = 301;
optional sint32 F_Sint32_optional = 302;
optional sint64 F_Sint64_optional = 303;
// Default-valued fields of all basic types
optional bool F_Bool_defaulted = 40 [default=true];
optional int32 F_Int32_defaulted = 41 [default=32];
optional int64 F_Int64_defaulted = 42 [default=64];
optional fixed32 F_Fixed32_defaulted = 43 [default=320];
optional fixed64 F_Fixed64_defaulted = 44 [default=640];
optional uint32 F_Uint32_defaulted = 45 [default=3200];
optional uint64 F_Uint64_defaulted = 46 [default=6400];
optional float F_Float_defaulted = 47 [default=314159.];
optional double F_Double_defaulted = 48 [default=271828.];
optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
optional sint32 F_Sint32_defaulted = 402 [default = -32];
optional sint64 F_Sint64_defaulted = 403 [default = -64];
// Packed repeated fields (no string or bytes).
repeated bool F_Bool_repeated_packed = 50 [packed=true];
repeated int32 F_Int32_repeated_packed = 51 [packed=true];
repeated int64 F_Int64_repeated_packed = 52 [packed=true];
repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
repeated float F_Float_repeated_packed = 57 [packed=true];
repeated double F_Double_repeated_packed = 58 [packed=true];
repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
// Required, repeated, and optional groups.
required group RequiredGroup = 70 {
required string RequiredField = 71;
};
repeated group RepeatedGroup = 80 {
required string RequiredField = 81;
};
optional group OptionalGroup = 90 {
required string RequiredField = 91;
};
}
// For testing skipping of unrecognized fields.
// Numbers are all big, larger than tag numbers in GoTestField,
// the message used in the corresponding test.
message GoSkipTest {
required int32 skip_int32 = 11;
required fixed32 skip_fixed32 = 12;
required fixed64 skip_fixed64 = 13;
required string skip_string = 14;
required group SkipGroup = 15 {
required int32 group_int32 = 16;
required string group_string = 17;
}
}
// For testing packed/non-packed decoder switching.
// A serialized instance of one should be deserializable as the other.
message NonPackedTest {
repeated int32 a = 1;
}
message PackedTest {
repeated int32 b = 1 [packed=true];
}
message MaxTag {
// Maximum possible tag number.
optional string last_field = 536870911;
}
message OldMessage {
message Nested {
optional string name = 1;
}
optional Nested nested = 1;
optional int32 num = 2;
}
// NewMessage is wire compatible with OldMessage;
// imagine it as a future version.
message NewMessage {
message Nested {
optional string name = 1;
optional string food_group = 2;
}
optional Nested nested = 1;
// This is an int32 in OldMessage.
optional int64 num = 2;
}
// Smaller tests for ASCII formatting.
message InnerMessage {
required string host = 1;
optional int32 port = 2 [default=4000];
optional bool connected = 3;
}
message OtherMessage {
optional int64 key = 1;
optional bytes value = 2;
optional float weight = 3;
optional InnerMessage inner = 4;
}
message MyMessage {
required int32 count = 1;
optional string name = 2;
optional string quote = 3;
repeated string pet = 4;
optional InnerMessage inner = 5;
repeated OtherMessage others = 6;
repeated InnerMessage rep_inner = 12;
enum Color {
RED = 0;
GREEN = 1;
BLUE = 2;
};
optional Color bikeshed = 7;
optional group SomeGroup = 8 {
optional int32 group_field = 9;
}
// This field becomes [][]byte in the generated code.
repeated bytes rep_bytes = 10;
optional double bigfloat = 11;
extensions 100 to max;
}
message Ext {
extend MyMessage {
optional Ext more = 103;
optional string text = 104;
optional int32 number = 105;
}
optional string data = 1;
}
extend MyMessage {
repeated string greeting = 106;
}
message MyMessageSet {
option message_set_wire_format = true;
extensions 100 to max;
}
message Empty {
}
extend MyMessageSet {
optional Empty x201 = 201;
optional Empty x202 = 202;
optional Empty x203 = 203;
optional Empty x204 = 204;
optional Empty x205 = 205;
optional Empty x206 = 206;
optional Empty x207 = 207;
optional Empty x208 = 208;
optional Empty x209 = 209;
optional Empty x210 = 210;
optional Empty x211 = 211;
optional Empty x212 = 212;
optional Empty x213 = 213;
optional Empty x214 = 214;
optional Empty x215 = 215;
optional Empty x216 = 216;
optional Empty x217 = 217;
optional Empty x218 = 218;
optional Empty x219 = 219;
optional Empty x220 = 220;
optional Empty x221 = 221;
optional Empty x222 = 222;
optional Empty x223 = 223;
optional Empty x224 = 224;
optional Empty x225 = 225;
optional Empty x226 = 226;
optional Empty x227 = 227;
optional Empty x228 = 228;
optional Empty x229 = 229;
optional Empty x230 = 230;
optional Empty x231 = 231;
optional Empty x232 = 232;
optional Empty x233 = 233;
optional Empty x234 = 234;
optional Empty x235 = 235;
optional Empty x236 = 236;
optional Empty x237 = 237;
optional Empty x238 = 238;
optional Empty x239 = 239;
optional Empty x240 = 240;
optional Empty x241 = 241;
optional Empty x242 = 242;
optional Empty x243 = 243;
optional Empty x244 = 244;
optional Empty x245 = 245;
optional Empty x246 = 246;
optional Empty x247 = 247;
optional Empty x248 = 248;
optional Empty x249 = 249;
optional Empty x250 = 250;
}
message MessageList {
repeated group Message = 1 {
required string name = 2;
required int32 count = 3;
}
}
message Strings {
optional string string_field = 1;
optional bytes bytes_field = 2;
}
message Defaults {
enum Color {
RED = 0;
GREEN = 1;
BLUE = 2;
}
// Default-valued fields of all basic types.
// Same as GoTest, but copied here to make testing easier.
optional bool F_Bool = 1 [default=true];
optional int32 F_Int32 = 2 [default=32];
optional int64 F_Int64 = 3 [default=64];
optional fixed32 F_Fixed32 = 4 [default=320];
optional fixed64 F_Fixed64 = 5 [default=640];
optional uint32 F_Uint32 = 6 [default=3200];
optional uint64 F_Uint64 = 7 [default=6400];
optional float F_Float = 8 [default=314159.];
optional double F_Double = 9 [default=271828.];
optional string F_String = 10 [default="hello, \"world!\"\n"];
optional bytes F_Bytes = 11 [default="Bignose"];
optional sint32 F_Sint32 = 12 [default=-32];
optional sint64 F_Sint64 = 13 [default=-64];
optional Color F_Enum = 14 [default=GREEN];
// More fields with crazy defaults.
optional float F_Pinf = 15 [default=inf];
optional float F_Ninf = 16 [default=-inf];
optional float F_Nan = 17 [default=nan];
// Sub-message.
optional SubDefaults sub = 18;
// Redundant but explicit defaults.
optional string str_zero = 19 [default=""];
}
message SubDefaults {
optional int64 n = 1 [default=7];
}
message RepeatedEnum {
enum Color {
RED = 1;
}
repeated Color color = 1;
}
message MoreRepeated {
repeated bool bools = 1;
repeated bool bools_packed = 2 [packed=true];
repeated int32 ints = 3;
repeated int32 ints_packed = 4 [packed=true];
repeated int64 int64s_packed = 7 [packed=true];
repeated string strings = 5;
repeated fixed32 fixeds = 6;
}
// GroupOld and GroupNew have the same wire format.
// GroupNew has a new field inside a group.
message GroupOld {
optional group G = 101 {
optional int32 x = 2;
}
}
message GroupNew {
optional group G = 101 {
optional int32 x = 2;
optional int32 y = 3;
}
}
message FloatingPoint {
required double f = 1;
}
message MessageWithMap {
map<int32, string> name_mapping = 1;
map<sint64, FloatingPoint> msg_mapping = 2;
map<bool, bytes> byte_mapping = 3;
}

View File

@ -0,0 +1,789 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"fmt"
"io"
"log"
"math"
"os"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Printf("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
var (
messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
)
// raw is the interface satisfied by RawMessage.
type raw interface {
Bytes() []byte
}
func writeStruct(w *textWriter, sv reflect.Value) error {
if sv.Type() == messageSetType {
return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
}
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys() // TODO: should we sort these for deterministic output?
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
switch fv.Kind() {
case reflect.Bool:
if !fv.Bool() {
continue
}
case reflect.Int32, reflect.Int64:
if fv.Int() == 0 {
continue
}
case reflect.Uint32, reflect.Uint64:
if fv.Uint() == 0 {
continue
}
case reflect.Float32, reflect.Float64:
if fv.Float() == 0 {
continue
}
case reflect.String:
if fv.String() == "" {
continue
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if b, ok := fv.Interface().(raw); ok {
if err := writeRaw(w, b.Bytes()); err != nil {
return err
}
continue
}
// Enums have a String method, so writeAny will work fine.
if err := writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if pv.Type().Implements(extendableProtoType) {
if err := writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeRaw writes an uninterpreted raw message.
func writeRaw(w *textWriter, b []byte) error {
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if err := writeUnknownStruct(w, b); err != nil {
return err
}
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
return nil
}
// writeAny writes an arbitrary field.
func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else if err := writeStruct(w, v); err != nil {
return err
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeMessageSet(w *textWriter, ms *MessageSet) error {
for _, item := range ms.Item {
id := *item.TypeId
if msd, ok := messageSetMap[id]; ok {
// Known message set type.
if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
return err
}
w.indent()
pb := reflect.New(msd.t.Elem())
if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
return err
}
} else {
if err := writeStruct(w, pb.Elem()); err != nil {
return err
}
}
} else {
// Unknown type.
if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
return err
}
w.indent()
if err := writeUnknownStruct(w, item.Message); err != nil {
return err
}
}
w.unindent()
if _, err := w.Write(gtNewline); err != nil {
return err
}
}
return nil
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep := pv.Interface().(extendableProto)
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m := ep.ExtensionMap()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil {
return err
}
continue
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
func marshalText(w io.Writer, pb Message, compact bool) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: compact,
}
if tm, ok := pb.(encoding.TextMarshaler); ok {
text, err := tm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error {
return marshalText(w, pb, false)
}
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string {
var buf bytes.Buffer
marshalText(&buf, pb, false)
return buf.String()
}
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string {
var buf bytes.Buffer
marshalText(&buf, pb, true)
return buf.String()
}

View File

@ -0,0 +1,757 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %v", p.s[0:i+1])
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
base := 8
ss := s[:2]
s = s[2:]
if r == 'x' || r == 'X' {
base = 16
} else {
ss = string(r) + ss
}
i, err := strconv.ParseUint(ss, base, 8)
if err != nil {
return "", "", err
}
return string([]byte{byte(i)}), s, nil
case 'u', 'U':
n := 4
if r == 'U' {
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
}
bs := make([]byte, n/2)
for i := 0; i < n; i += 2 {
a, ok1 := unhex(s[i])
b, ok2 := unhex(s[i+1])
if !ok1 || !ok2 {
return "", "", errBadHex
}
bs[i/2] = a<<4 | b
}
s = s[n:]
return string(bs), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Adapted from src/pkg/strconv/quote.go.
func unhex(b byte) (v byte, ok bool) {
switch {
case '0' <= b && b <= '9':
return b - '0', true
case 'a' <= b && b <= 'f':
return b - 'a' + 10, true
case 'A' <= b && b <= 'F':
return b - 'A' + 10, true
}
return 0, false
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || p.s[0] != '"' {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {
sprops := GetProperties(st)
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
reqCount := GetProperties(st).reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]".
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
tok = p.next()
if tok.err != nil {
return tok.err
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == tok.value {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", tok.value)
}
// Check the extension terminator.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != "]" {
return p.errorf("unrecognized extension terminator %q", tok.value)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(extendableProto)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
} else {
// This is a normal, non-extension field.
name := tok.value
fi, props, ok := structFieldByName(st, name)
if !ok {
return p.errorf("unknown field name %q in %v", name, st)
}
dst := sv.Field(fi)
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// Technically the "key" and "value" could come in any order,
// but in practice they won't.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
if err := p.consumeToken("key"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeToken("value"); err != nil {
return err
}
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeToken(terminator); err != nil {
return err
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
} else if props.Required {
reqCount--
}
}
// For backward compatibility, permit a semicolon or comma after a field.
tok = p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field. May already exist.
flen := fv.Len()
if flen == fv.Cap() {
nav := reflect.MakeSlice(at, flen, 2*flen+1)
reflect.Copy(nav, fv)
fv.Set(nav)
}
fv.SetLen(flen + 1)
// Read one.
p.back()
return p.readAny(fv.Index(flen), props)
case reflect.Bool:
// Either "true", "false", 1 or 0.
switch tok.value {
case "true", "1":
fv.SetBool(true)
return nil
case "false", "0":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
err := um.UnmarshalText([]byte(s))
return err
}
pb.Reset()
v := reflect.ValueOf(pb)
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
return pe
}
return nil
}

View File

@ -0,0 +1,509 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"math"
"reflect"
"testing"
proto3pb "./proto3_proto"
. "./testdata"
. "github.com/golang/protobuf/proto"
)
type UnmarshalTextTest struct {
in string
err string // if "", no error expected
out *MyMessage
}
func buildExtStructTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
SetExtension(msg, E_Ext_More, &Ext{
Data: String("Hello, world!"),
})
return UnmarshalTextTest{in: text, out: msg}
}
func buildExtDataTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
SetExtension(msg, E_Ext_Text, String("Hello, world!"))
SetExtension(msg, E_Ext_Number, Int32(1729))
return UnmarshalTextTest{in: text, out: msg}
}
func buildExtRepStringTest(text string) UnmarshalTextTest {
msg := &MyMessage{
Count: Int32(42),
}
if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
panic(err)
}
return UnmarshalTextTest{in: text, out: msg}
}
var unMarshalTextTests = []UnmarshalTextTest{
// Basic
{
in: " count:42\n name:\"Dave\" ",
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
},
},
// Empty quoted string
{
in: `count:42 name:""`,
out: &MyMessage{
Count: Int32(42),
Name: String(""),
},
},
// Quoted string concatenation
{
in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
out: &MyMessage{
Count: Int32(42),
Name: String("My name is elsewhere"),
},
},
// Quoted string with escaped apostrophe
{
in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
out: &MyMessage{
Count: Int32(42),
Name: String("HOLIDAY - New Year's Day"),
},
},
// Quoted string with single quote
{
in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
out: &MyMessage{
Count: Int32(42),
Name: String(`Roger "The Ramster" Ramjet`),
},
},
// Quoted string with all the accepted special characters from the C++ test
{
in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
out: &MyMessage{
Count: Int32(42),
Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
},
},
// Quoted string with quoted backslash
{
in: `count:42 name: "\\'xyz"`,
out: &MyMessage{
Count: Int32(42),
Name: String(`\'xyz`),
},
},
// Quoted string with UTF-8 bytes.
{
in: "count:42 name: '\303\277\302\201\xAB'",
out: &MyMessage{
Count: Int32(42),
Name: String("\303\277\302\201\xAB"),
},
},
// Bad quoted string
{
in: `inner: < host: "\0" >` + "\n",
err: `line 1.15: invalid quoted string "\0"`,
},
// Number too large for int64
{
in: "count: 1 others { key: 123456789012345678901 }",
err: "line 1.23: invalid int64: 123456789012345678901",
},
// Number too large for int32
{
in: "count: 1234567890123",
err: "line 1.7: invalid int32: 1234567890123",
},
// Number in hexadecimal
{
in: "count: 0x2beef",
out: &MyMessage{
Count: Int32(0x2beef),
},
},
// Number in octal
{
in: "count: 024601",
out: &MyMessage{
Count: Int32(024601),
},
},
// Floating point number with "f" suffix
{
in: "count: 4 others:< weight: 17.0f >",
out: &MyMessage{
Count: Int32(4),
Others: []*OtherMessage{
{
Weight: Float32(17),
},
},
},
},
// Floating point positive infinity
{
in: "count: 4 bigfloat: inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(1)),
},
},
// Floating point negative infinity
{
in: "count: 4 bigfloat: -inf",
out: &MyMessage{
Count: Int32(4),
Bigfloat: Float64(math.Inf(-1)),
},
},
// Number too large for float32
{
in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
},
// Number posing as a quoted string
{
in: `inner: < host: 12 >` + "\n",
err: `line 1.15: invalid string: 12`,
},
// Quoted string posing as int32
{
in: `count: "12"`,
err: `line 1.7: invalid int32: "12"`,
},
// Quoted string posing a float32
{
in: `others:< weight: "17.4" >`,
err: `line 1.17: invalid float32: "17.4"`,
},
// Enum
{
in: `count:42 bikeshed: BLUE`,
out: &MyMessage{
Count: Int32(42),
Bikeshed: MyMessage_BLUE.Enum(),
},
},
// Repeated field
{
in: `count:42 pet: "horsey" pet:"bunny"`,
out: &MyMessage{
Count: Int32(42),
Pet: []string{"horsey", "bunny"},
},
},
// Repeated message with/without colon and <>/{}
{
in: `count:42 others:{} others{} others:<> others:{}`,
out: &MyMessage{
Count: Int32(42),
Others: []*OtherMessage{
{},
{},
{},
{},
},
},
},
// Missing colon for inner message
{
in: `count:42 inner < host: "cauchy.syd" >`,
out: &MyMessage{
Count: Int32(42),
Inner: &InnerMessage{
Host: String("cauchy.syd"),
},
},
},
// Missing colon for string field
{
in: `name "Dave"`,
err: `line 1.5: expected ':', found "\"Dave\""`,
},
// Missing colon for int32 field
{
in: `count 42`,
err: `line 1.6: expected ':', found "42"`,
},
// Missing required field
{
in: `name: "Pawel"`,
err: `proto: required field "testdata.MyMessage.count" not set`,
out: &MyMessage{
Name: String("Pawel"),
},
},
// Repeated non-repeated field
{
in: `name: "Rob" name: "Russ"`,
err: `line 1.12: non-repeated field "name" was repeated`,
},
// Group
{
in: `count: 17 SomeGroup { group_field: 12 }`,
out: &MyMessage{
Count: Int32(17),
Somegroup: &MyMessage_SomeGroup{
GroupField: Int32(12),
},
},
},
// Semicolon between fields
{
in: `count:3;name:"Calvin"`,
out: &MyMessage{
Count: Int32(3),
Name: String("Calvin"),
},
},
// Comma between fields
{
in: `count:4,name:"Ezekiel"`,
out: &MyMessage{
Count: Int32(4),
Name: String("Ezekiel"),
},
},
// Extension
buildExtStructTest(`count: 42 [testdata.Ext.more]:<data:"Hello, world!" >`),
buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
// Big all-in-one
{
in: "count:42 # Meaning\n" +
`name:"Dave" ` +
`quote:"\"I didn't want to go.\"" ` +
`pet:"bunny" ` +
`pet:"kitty" ` +
`pet:"horsey" ` +
`inner:<` +
` host:"footrest.syd" ` +
` port:7001 ` +
` connected:true ` +
`> ` +
`others:<` +
` key:3735928559 ` +
` value:"\x01A\a\f" ` +
`> ` +
`others:<` +
" weight:58.9 # Atomic weight of Co\n" +
` inner:<` +
` host:"lesha.mtv" ` +
` port:8002 ` +
` >` +
`>`,
out: &MyMessage{
Count: Int32(42),
Name: String("Dave"),
Quote: String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &InnerMessage{
Host: String("footrest.syd"),
Port: Int32(7001),
Connected: Bool(true),
},
Others: []*OtherMessage{
{
Key: Int64(3735928559),
Value: []byte{0x1, 'A', '\a', '\f'},
},
{
Weight: Float32(58.9),
Inner: &InnerMessage{
Host: String("lesha.mtv"),
Port: Int32(8002),
},
},
},
},
},
}
func TestUnmarshalText(t *testing.T) {
for i, test := range unMarshalTextTests {
pb := new(MyMessage)
err := UnmarshalText(test.in, pb)
if test.err == "" {
// We don't expect failure.
if err != nil {
t.Errorf("Test %d: Unexpected error: %v", i, err)
} else if !reflect.DeepEqual(pb, test.out) {
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
i, pb, test.out)
}
} else {
// We do expect failure.
if err == nil {
t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
} else if err.Error() != test.err {
t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
i, err.Error(), test.err)
} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
i, pb, test.out)
}
}
}
}
func TestUnmarshalTextCustomMessage(t *testing.T) {
msg := &textMessage{}
if err := UnmarshalText("custom", msg); err != nil {
t.Errorf("Unexpected error from custom unmarshal: %v", err)
}
if UnmarshalText("not custom", msg) == nil {
t.Errorf("Didn't get expected error from custom unmarshal")
}
}
// Regression test; this caused a panic.
func TestRepeatedEnum(t *testing.T) {
pb := new(RepeatedEnum)
if err := UnmarshalText("color: RED", pb); err != nil {
t.Fatal(err)
}
exp := &RepeatedEnum{
Color: []RepeatedEnum_Color{RepeatedEnum_RED},
}
if !Equal(pb, exp) {
t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
}
}
func TestProto3TextParsing(t *testing.T) {
m := new(proto3pb.Message)
const in = `name: "Wallace" true_scotsman: true`
want := &proto3pb.Message{
Name: "Wallace",
TrueScotsman: true,
}
if err := UnmarshalText(in, m); err != nil {
t.Fatal(err)
}
if !Equal(m, want) {
t.Errorf("\n got %v\nwant %v", m, want)
}
}
func TestMapParsing(t *testing.T) {
m := new(MessageWithMap)
const in = `name_mapping:<key:1234 value:"Feist"> name_mapping:<key:1 value:"Beatles">` +
`msg_mapping:<key:-4 value:<f: 2.0>>` +
`byte_mapping:<key:true value:"so be it">`
want := &MessageWithMap{
NameMapping: map[int32]string{
1: "Beatles",
1234: "Feist",
},
MsgMapping: map[int64]*FloatingPoint{
-4: {F: Float64(2.0)},
},
ByteMapping: map[bool][]byte{
true: []byte("so be it"),
},
}
if err := UnmarshalText(in, m); err != nil {
t.Fatal(err)
}
if !Equal(m, want) {
t.Errorf("\n got %v\nwant %v", m, want)
}
}
var benchInput string
func init() {
benchInput = "count: 4\n"
for i := 0; i < 1000; i++ {
benchInput += "pet: \"fido\"\n"
}
// Check it is valid input.
pb := new(MyMessage)
err := UnmarshalText(benchInput, pb)
if err != nil {
panic("Bad benchmark input: " + err.Error())
}
}
func BenchmarkUnmarshalText(b *testing.B) {
pb := new(MyMessage)
for i := 0; i < b.N; i++ {
UnmarshalText(benchInput, pb)
}
b.SetBytes(int64(len(benchInput)))
}

View File

@ -0,0 +1,436 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto_test
import (
"bytes"
"errors"
"io/ioutil"
"math"
"strings"
"testing"
"github.com/golang/protobuf/proto"
proto3pb "./proto3_proto"
pb "./testdata"
)
// textMessage implements the methods that allow it to marshal and unmarshal
// itself as text.
type textMessage struct {
}
func (*textMessage) MarshalText() ([]byte, error) {
return []byte("custom"), nil
}
func (*textMessage) UnmarshalText(bytes []byte) error {
if string(bytes) != "custom" {
return errors.New("expected 'custom'")
}
return nil
}
func (*textMessage) Reset() {}
func (*textMessage) String() string { return "" }
func (*textMessage) ProtoMessage() {}
func newTestMessage() *pb.MyMessage {
msg := &pb.MyMessage{
Count: proto.Int32(42),
Name: proto.String("Dave"),
Quote: proto.String(`"I didn't want to go."`),
Pet: []string{"bunny", "kitty", "horsey"},
Inner: &pb.InnerMessage{
Host: proto.String("footrest.syd"),
Port: proto.Int32(7001),
Connected: proto.Bool(true),
},
Others: []*pb.OtherMessage{
{
Key: proto.Int64(0xdeadbeef),
Value: []byte{1, 65, 7, 12},
},
{
Weight: proto.Float32(6.022),
Inner: &pb.InnerMessage{
Host: proto.String("lesha.mtv"),
Port: proto.Int32(8002),
},
},
},
Bikeshed: pb.MyMessage_BLUE.Enum(),
Somegroup: &pb.MyMessage_SomeGroup{
GroupField: proto.Int32(8),
},
// One normally wouldn't do this.
// This is an undeclared tag 13, as a varint (wire type 0) with value 4.
XXX_unrecognized: []byte{13<<3 | 0, 4},
}
ext := &pb.Ext{
Data: proto.String("Big gobs for big rats"),
}
if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
panic(err)
}
greetings := []string{"adg", "easy", "cow"}
if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
panic(err)
}
// Add an unknown extension. We marshal a pb.Ext, and fake the ID.
b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
if err != nil {
panic(err)
}
b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
proto.SetRawExtension(msg, 201, b)
// Extensions can be plain fields, too, so let's test that.
b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
proto.SetRawExtension(msg, 202, b)
return msg
}
const text = `count: 42
name: "Dave"
quote: "\"I didn't want to go.\""
pet: "bunny"
pet: "kitty"
pet: "horsey"
inner: <
host: "footrest.syd"
port: 7001
connected: true
>
others: <
key: 3735928559
value: "\001A\007\014"
>
others: <
weight: 6.022
inner: <
host: "lesha.mtv"
port: 8002
>
>
bikeshed: BLUE
SomeGroup {
group_field: 8
}
/* 2 unknown bytes */
13: 4
[testdata.Ext.more]: <
data: "Big gobs for big rats"
>
[testdata.greeting]: "adg"
[testdata.greeting]: "easy"
[testdata.greeting]: "cow"
/* 13 unknown bytes */
201: "\t3G skiing"
/* 3 unknown bytes */
202: 19
`
func TestMarshalText(t *testing.T) {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, newTestMessage()); err != nil {
t.Fatalf("proto.MarshalText: %v", err)
}
s := buf.String()
if s != text {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
}
}
func TestMarshalTextCustomMessage(t *testing.T) {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, &textMessage{}); err != nil {
t.Fatalf("proto.MarshalText: %v", err)
}
s := buf.String()
if s != "custom" {
t.Errorf("Got %q, expected %q", s, "custom")
}
}
func TestMarshalTextNil(t *testing.T) {
want := "<nil>"
tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
for i, test := range tests {
buf := new(bytes.Buffer)
if err := proto.MarshalText(buf, test); err != nil {
t.Fatal(err)
}
if got := buf.String(); got != want {
t.Errorf("%d: got %q want %q", i, got, want)
}
}
}
func TestMarshalTextUnknownEnum(t *testing.T) {
// The Color enum only specifies values 0-2.
m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
got := m.String()
const want = `bikeshed:3 `
if got != want {
t.Errorf("\n got %q\nwant %q", got, want)
}
}
func BenchmarkMarshalTextBuffered(b *testing.B) {
buf := new(bytes.Buffer)
m := newTestMessage()
for i := 0; i < b.N; i++ {
buf.Reset()
proto.MarshalText(buf, m)
}
}
func BenchmarkMarshalTextUnbuffered(b *testing.B) {
w := ioutil.Discard
m := newTestMessage()
for i := 0; i < b.N; i++ {
proto.MarshalText(w, m)
}
}
func compact(src string) string {
// s/[ \n]+/ /g; s/ $//;
dst := make([]byte, len(src))
space, comment := false, false
j := 0
for i := 0; i < len(src); i++ {
if strings.HasPrefix(src[i:], "/*") {
comment = true
i++
continue
}
if comment && strings.HasPrefix(src[i:], "*/") {
comment = false
i++
continue
}
if comment {
continue
}
c := src[i]
if c == ' ' || c == '\n' {
space = true
continue
}
if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
space = false
}
if c == '{' {
space = false
}
if space {
dst[j] = ' '
j++
space = false
}
dst[j] = c
j++
}
if space {
dst[j] = ' '
j++
}
return string(dst[0:j])
}
var compactText = compact(text)
func TestCompactText(t *testing.T) {
s := proto.CompactTextString(newTestMessage())
if s != compactText {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
}
}
func TestStringEscaping(t *testing.T) {
testCases := []struct {
in *pb.Strings
out string
}{
{
// Test data from C++ test (TextFormatTest.StringEscape).
// Single divergence: we don't escape apostrophes.
&pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
"string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
},
{
// Test data from the same C++ test.
&pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
"string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
},
{
// Some UTF-8.
&pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
`string_field: "\000\001\377\201"` + "\n",
},
}
for i, tc := range testCases {
var buf bytes.Buffer
if err := proto.MarshalText(&buf, tc.in); err != nil {
t.Errorf("proto.MarsalText: %v", err)
continue
}
s := buf.String()
if s != tc.out {
t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
continue
}
// Check round-trip.
pb := new(pb.Strings)
if err := proto.UnmarshalText(s, pb); err != nil {
t.Errorf("#%d: UnmarshalText: %v", i, err)
continue
}
if !proto.Equal(pb, tc.in) {
t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
}
}
}
// A limitedWriter accepts some output before it fails.
// This is a proxy for something like a nearly-full or imminently-failing disk,
// or a network connection that is about to die.
type limitedWriter struct {
b bytes.Buffer
limit int
}
var outOfSpace = errors.New("proto: insufficient space")
func (w *limitedWriter) Write(p []byte) (n int, err error) {
var avail = w.limit - w.b.Len()
if avail <= 0 {
return 0, outOfSpace
}
if len(p) <= avail {
return w.b.Write(p)
}
n, _ = w.b.Write(p[:avail])
return n, outOfSpace
}
func TestMarshalTextFailing(t *testing.T) {
// Try lots of different sizes to exercise more error code-paths.
for lim := 0; lim < len(text); lim++ {
buf := new(limitedWriter)
buf.limit = lim
err := proto.MarshalText(buf, newTestMessage())
// We expect a certain error, but also some partial results in the buffer.
if err != outOfSpace {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
}
s := buf.b.String()
x := text[:buf.limit]
if s != x {
t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
}
}
}
func TestFloats(t *testing.T) {
tests := []struct {
f float64
want string
}{
{0, "0"},
{4.7, "4.7"},
{math.Inf(1), "inf"},
{math.Inf(-1), "-inf"},
{math.NaN(), "nan"},
}
for _, test := range tests {
msg := &pb.FloatingPoint{F: &test.f}
got := strings.TrimSpace(msg.String())
want := `f:` + test.want
if got != want {
t.Errorf("f=%f: got %q, want %q", test.f, got, want)
}
}
}
func TestRepeatedNilText(t *testing.T) {
m := &pb.MessageList{
Message: []*pb.MessageList_Message{
nil,
&pb.MessageList_Message{
Name: proto.String("Horse"),
},
nil,
},
}
want := `Message <nil>
Message {
name: "Horse"
}
Message <nil>
`
if s := proto.MarshalTextString(m); s != want {
t.Errorf(" got: %s\nwant: %s", s, want)
}
}
func TestProto3Text(t *testing.T) {
tests := []struct {
m proto.Message
want string
}{
// zero message
{&proto3pb.Message{}, ``},
// zero message except for an empty byte slice
{&proto3pb.Message{Data: []byte{}}, ``},
// trivial case
{&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
// empty map
{&pb.MessageWithMap{}, ``},
// non-empty map; current map format is the same as a repeated struct
{
&pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}},
`name_mapping:<key:1234 value:"Feist" >`,
},
}
for _, test := range tests {
got := strings.TrimSpace(test.m.String())
if got != test.want {
t.Errorf("\n got %s\nwant %s", got, test.want)
}
}
}

View File

@ -1,575 +0,0 @@
// The elb package provides types and functions for interaction with the AWS
// Elastic Load Balancing service (ELB)
package elb
import (
"encoding/xml"
"net/http"
"net/url"
"strconv"
"time"
"github.com/mitchellh/goamz/aws"
)
// The ELB type encapsulates operations operations with the elb endpoint.
type ELB struct {
aws.Auth
aws.Region
httpClient *http.Client
}
const APIVersion = "2012-06-01"
// New creates a new ELB instance.
func New(auth aws.Auth, region aws.Region) *ELB {
return NewWithClient(auth, region, aws.RetryingClient)
}
func NewWithClient(auth aws.Auth, region aws.Region, httpClient *http.Client) *ELB {
return &ELB{auth, region, httpClient}
}
func (elb *ELB) query(params map[string]string, resp interface{}) error {
params["Version"] = APIVersion
params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339)
endpoint, err := url.Parse(elb.Region.ELBEndpoint)
if err != nil {
return err
}
sign(elb.Auth, "GET", "/", params, endpoint.Host)
endpoint.RawQuery = multimap(params).Encode()
r, err := elb.httpClient.Get(endpoint.String())
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 200 {
return buildError(r)
}
decoder := xml.NewDecoder(r.Body)
decodedBody := decoder.Decode(resp)
return decodedBody
}
func buildError(r *http.Response) error {
var (
err Error
errors xmlErrors
)
xml.NewDecoder(r.Body).Decode(&errors)
if len(errors.Errors) > 0 {
err = errors.Errors[0]
}
err.StatusCode = r.StatusCode
if err.Message == "" {
err.Message = r.Status
}
return &err
}
func multimap(p map[string]string) url.Values {
q := make(url.Values, len(p))
for k, v := range p {
q[k] = []string{v}
}
return q
}
func makeParams(action string) map[string]string {
params := make(map[string]string)
params["Action"] = action
return params
}
// ----------------------------------------------------------------------------
// ELB objects
// A listener attaches to an elb
type Listener struct {
InstancePort int64 `xml:"Listener>InstancePort"`
InstanceProtocol string `xml:"Listener>InstanceProtocol"`
SSLCertificateId string `xml:"Listener>SSLCertificateId"`
LoadBalancerPort int64 `xml:"Listener>LoadBalancerPort"`
Protocol string `xml:"Listener>Protocol"`
}
// An Instance attaches to an elb
type Instance struct {
InstanceId string `xml:"InstanceId"`
}
// A tag attached to an elb
type Tag struct {
Key string `xml:"Key"`
Value string `xml:"Value"`
}
// An InstanceState from an elb health query
type InstanceState struct {
InstanceId string `xml:"InstanceId"`
Description string `xml:"Description"`
State string `xml:"State"`
ReasonCode string `xml:"ReasonCode"`
}
// ----------------------------------------------------------------------------
// AddTags
type AddTags struct {
LoadBalancerNames []string
Tags []Tag
}
type AddTagsResp struct {
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) AddTags(options *AddTags) (resp *AddTagsResp, err error) {
params := makeParams("AddTags")
for i, v := range options.LoadBalancerNames {
params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v
}
for i, v := range options.Tags {
params["Tags.member."+strconv.Itoa(i+1)+".Key"] = v.Key
params["Tags.member."+strconv.Itoa(i+1)+".Value"] = v.Value
}
resp = &AddTagsResp{}
err = elb.query(params, resp)
return resp, err
}
// ----------------------------------------------------------------------------
// RemoveTags
type RemoveTags struct {
LoadBalancerNames []string
TagKeys []string
}
type RemoveTagsResp struct {
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) RemoveTags(options *RemoveTags) (resp *RemoveTagsResp, err error) {
params := makeParams("RemoveTags")
for i, v := range options.LoadBalancerNames {
params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v
}
for i, v := range options.TagKeys {
params["Tags.member."+strconv.Itoa(i+1)+".Key"] = v
}
resp = &RemoveTagsResp{}
err = elb.query(params, resp)
return resp, err
}
// ----------------------------------------------------------------------------
// Create
// The CreateLoadBalancer request parameters
type CreateLoadBalancer struct {
AvailZone []string
Listeners []Listener
LoadBalancerName string
Internal bool // true for vpc elbs
SecurityGroups []string
Subnets []string
Tags []Tag
}
type CreateLoadBalancerResp struct {
DNSName string `xml:"CreateLoadBalancerResult>DNSName"`
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) CreateLoadBalancer(options *CreateLoadBalancer) (resp *CreateLoadBalancerResp, err error) {
params := makeParams("CreateLoadBalancer")
params["LoadBalancerName"] = options.LoadBalancerName
for i, v := range options.AvailZone {
params["AvailabilityZones.member."+strconv.Itoa(i+1)] = v
}
for i, v := range options.SecurityGroups {
params["SecurityGroups.member."+strconv.Itoa(i+1)] = v
}
for i, v := range options.Subnets {
params["Subnets.member."+strconv.Itoa(i+1)] = v
}
for i, v := range options.Listeners {
params["Listeners.member."+strconv.Itoa(i+1)+".LoadBalancerPort"] = strconv.FormatInt(v.LoadBalancerPort, 10)
params["Listeners.member."+strconv.Itoa(i+1)+".InstancePort"] = strconv.FormatInt(v.InstancePort, 10)
params["Listeners.member."+strconv.Itoa(i+1)+".Protocol"] = v.Protocol
params["Listeners.member."+strconv.Itoa(i+1)+".InstanceProtocol"] = v.InstanceProtocol
params["Listeners.member."+strconv.Itoa(i+1)+".SSLCertificateId"] = v.SSLCertificateId
}
for i, v := range options.Tags {
params["Tags.member."+strconv.Itoa(i+1)+".Key"] = v.Key
params["Tags.member."+strconv.Itoa(i+1)+".Value"] = v.Value
}
if options.Internal {
params["Scheme"] = "internal"
}
resp = &CreateLoadBalancerResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// ----------------------------------------------------------------------------
// Destroy
// The DestroyLoadBalancer request parameters
type DeleteLoadBalancer struct {
LoadBalancerName string
}
func (elb *ELB) DeleteLoadBalancer(options *DeleteLoadBalancer) (resp *SimpleResp, err error) {
params := makeParams("DeleteLoadBalancer")
params["LoadBalancerName"] = options.LoadBalancerName
resp = &SimpleResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// ----------------------------------------------------------------------------
// Describe
// An individual load balancer
type LoadBalancer struct {
LoadBalancerName string `xml:"LoadBalancerName"`
Listeners []Listener `xml:"ListenerDescriptions>member"`
Instances []Instance `xml:"Instances>member"`
HealthCheck HealthCheck `xml:"HealthCheck"`
AvailabilityZones []string `xml:"AvailabilityZones>member"`
HostedZoneNameID string `xml:"CanonicalHostedZoneNameID"`
DNSName string `xml:"DNSName"`
SecurityGroups []string `xml:"SecurityGroups>member"`
Scheme string `xml:"Scheme"`
Subnets []string `xml:"Subnets>member"`
}
// DescribeLoadBalancer request params
type DescribeLoadBalancer struct {
Names []string
}
type DescribeLoadBalancersResp struct {
RequestId string `xml:"ResponseMetadata>RequestId"`
LoadBalancers []LoadBalancer `xml:"DescribeLoadBalancersResult>LoadBalancerDescriptions>member"`
}
func (elb *ELB) DescribeLoadBalancers(options *DescribeLoadBalancer) (resp *DescribeLoadBalancersResp, err error) {
params := makeParams("DescribeLoadBalancers")
for i, v := range options.Names {
params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v
}
resp = &DescribeLoadBalancersResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// ----------------------------------------------------------------------------
// Attributes
type AccessLog struct {
EmitInterval int64
Enabled bool
S3BucketName string
S3BucketPrefix string
}
type ConnectionDraining struct {
Enabled bool
Timeout int64
}
type LoadBalancerAttributes struct {
CrossZoneLoadBalancingEnabled bool
ConnectionSettingsIdleTimeout int64
ConnectionDraining ConnectionDraining
AccessLog AccessLog
}
type ModifyLoadBalancerAttributes struct {
LoadBalancerName string
LoadBalancerAttributes LoadBalancerAttributes
}
func (elb *ELB) ModifyLoadBalancerAttributes(options *ModifyLoadBalancerAttributes) (resp *SimpleResp, err error) {
params := makeParams("ModifyLoadBalancerAttributes")
params["LoadBalancerName"] = options.LoadBalancerName
params["LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled"] = strconv.FormatBool(options.LoadBalancerAttributes.CrossZoneLoadBalancingEnabled)
if options.LoadBalancerAttributes.ConnectionSettingsIdleTimeout > 0 {
params["LoadBalancerAttributes.ConnectionSettings.IdleTimeout"] = strconv.Itoa(int(options.LoadBalancerAttributes.ConnectionSettingsIdleTimeout))
}
if options.LoadBalancerAttributes.ConnectionDraining.Timeout > 0 {
params["LoadBalancerAttributes.ConnectionDraining.Timeout"] = strconv.Itoa(int(options.LoadBalancerAttributes.ConnectionDraining.Timeout))
}
params["LoadBalancerAttributes.ConnectionDraining.Enabled"] = strconv.FormatBool(options.LoadBalancerAttributes.ConnectionDraining.Enabled)
params["LoadBalancerAttributes.AccessLog.Enabled"] = strconv.FormatBool(options.LoadBalancerAttributes.AccessLog.Enabled)
if options.LoadBalancerAttributes.AccessLog.Enabled {
params["LoadBalancerAttributes.AccessLog.EmitInterval"] = strconv.Itoa(int(options.LoadBalancerAttributes.AccessLog.EmitInterval))
params["LoadBalancerAttributes.AccessLog.S3BucketName"] = options.LoadBalancerAttributes.AccessLog.S3BucketName
params["LoadBalancerAttributes.AccessLog.S3BucketPrefix"] = options.LoadBalancerAttributes.AccessLog.S3BucketPrefix
}
resp = &SimpleResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// ----------------------------------------------------------------------------
// Instance Registration / deregistration
// The RegisterInstancesWithLoadBalancer request parameters
type RegisterInstancesWithLoadBalancer struct {
LoadBalancerName string
Instances []string
}
type RegisterInstancesWithLoadBalancerResp struct {
Instances []Instance `xml:"RegisterInstancesWithLoadBalancerResult>Instances>member"`
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) RegisterInstancesWithLoadBalancer(options *RegisterInstancesWithLoadBalancer) (resp *RegisterInstancesWithLoadBalancerResp, err error) {
params := makeParams("RegisterInstancesWithLoadBalancer")
params["LoadBalancerName"] = options.LoadBalancerName
for i, v := range options.Instances {
params["Instances.member."+strconv.Itoa(i+1)+".InstanceId"] = v
}
resp = &RegisterInstancesWithLoadBalancerResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// The DeregisterInstancesFromLoadBalancer request parameters
type DeregisterInstancesFromLoadBalancer struct {
LoadBalancerName string
Instances []string
}
type DeregisterInstancesFromLoadBalancerResp struct {
Instances []Instance `xml:"DeregisterInstancesFromLoadBalancerResult>Instances>member"`
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) DeregisterInstancesFromLoadBalancer(options *DeregisterInstancesFromLoadBalancer) (resp *DeregisterInstancesFromLoadBalancerResp, err error) {
params := makeParams("DeregisterInstancesFromLoadBalancer")
params["LoadBalancerName"] = options.LoadBalancerName
for i, v := range options.Instances {
params["Instances.member."+strconv.Itoa(i+1)+".InstanceId"] = v
}
resp = &DeregisterInstancesFromLoadBalancerResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// ----------------------------------------------------------------------------
// DescribeTags
type DescribeTags struct {
LoadBalancerNames []string
}
type LoadBalancerTag struct {
Tags []Tag `xml:"Tags>member"`
LoadBalancerName string `xml:"LoadBalancerName"`
}
type DescribeTagsResp struct {
LoadBalancerTags []LoadBalancerTag `xml:"DescribeTagsResult>TagDescriptions>member"`
NextToken string `xml:"DescribeTagsResult>NextToken"`
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) DescribeTags(options *DescribeTags) (resp *DescribeTagsResp, err error) {
params := makeParams("DescribeTags")
for i, v := range options.LoadBalancerNames {
params["LoadBalancerNames.member."+strconv.Itoa(i+1)] = v
}
resp = &DescribeTagsResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// ----------------------------------------------------------------------------
// Health Checks
type HealthCheck struct {
HealthyThreshold int64 `xml:"HealthyThreshold"`
UnhealthyThreshold int64 `xml:"UnhealthyThreshold"`
Interval int64 `xml:"Interval"`
Target string `xml:"Target"`
Timeout int64 `xml:"Timeout"`
}
type ConfigureHealthCheck struct {
LoadBalancerName string
Check HealthCheck
}
type ConfigureHealthCheckResp struct {
Check HealthCheck `xml:"ConfigureHealthCheckResult>HealthCheck"`
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) ConfigureHealthCheck(options *ConfigureHealthCheck) (resp *ConfigureHealthCheckResp, err error) {
params := makeParams("ConfigureHealthCheck")
params["LoadBalancerName"] = options.LoadBalancerName
params["HealthCheck.HealthyThreshold"] = strconv.Itoa(int(options.Check.HealthyThreshold))
params["HealthCheck.UnhealthyThreshold"] = strconv.Itoa(int(options.Check.UnhealthyThreshold))
params["HealthCheck.Interval"] = strconv.Itoa(int(options.Check.Interval))
params["HealthCheck.Target"] = options.Check.Target
params["HealthCheck.Timeout"] = strconv.Itoa(int(options.Check.Timeout))
resp = &ConfigureHealthCheckResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// ----------------------------------------------------------------------------
// Instance Health
// The DescribeInstanceHealth request parameters
type DescribeInstanceHealth struct {
LoadBalancerName string
}
type DescribeInstanceHealthResp struct {
InstanceStates []InstanceState `xml:"DescribeInstanceHealthResult>InstanceStates>member"`
RequestId string `xml:"ResponseMetadata>RequestId"`
}
func (elb *ELB) DescribeInstanceHealth(options *DescribeInstanceHealth) (resp *DescribeInstanceHealthResp, err error) {
params := makeParams("DescribeInstanceHealth")
params["LoadBalancerName"] = options.LoadBalancerName
resp = &DescribeInstanceHealthResp{}
err = elb.query(params, resp)
if err != nil {
resp = nil
}
return
}
// Responses
type SimpleResp struct {
RequestId string `xml:"ResponseMetadata>RequestId"`
}
type xmlErrors struct {
Errors []Error `xml:"Error"`
}
// Error encapsulates an elb error.
type Error struct {
// HTTP status code of the error.
StatusCode int
// AWS code of the error.
Code string
// Message explaining the error.
Message string
}
func (e *Error) Error() string {
var prefix string
if e.Code != "" {
prefix = e.Code + ": "
}
if prefix == "" && e.StatusCode > 0 {
prefix = strconv.Itoa(e.StatusCode) + ": "
}
return prefix + e.Message
}

View File

@ -1,235 +0,0 @@
package elb_test
import (
"github.com/mitchellh/goamz/aws"
"github.com/mitchellh/goamz/elb"
"github.com/mitchellh/goamz/testutil"
. "github.com/motain/gocheck"
"testing"
)
func Test(t *testing.T) {
TestingT(t)
}
type S struct {
elb *elb.ELB
}
var _ = Suite(&S{})
var testServer = testutil.NewHTTPServer()
func (s *S) SetUpSuite(c *C) {
testServer.Start()
auth := aws.Auth{"abc", "123", ""}
s.elb = elb.NewWithClient(auth, aws.Region{ELBEndpoint: testServer.URL}, testutil.DefaultClient)
}
func (s *S) TearDownTest(c *C) {
testServer.Flush()
}
func (s *S) TestAddTags(c *C) {
testServer.Response(200, nil, AddTagsExample)
options := elb.AddTags{
LoadBalancerNames: []string{"foobar"},
Tags: []elb.Tag{
{
Key: "hello",
Value: "world",
},
},
}
resp, err := s.elb.AddTags(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"AddTags"})
c.Assert(req.Form["LoadBalancerNames.member.1"], DeepEquals, []string{"foobar"})
c.Assert(req.Form["Tags.member.1.Key"], DeepEquals, []string{"hello"})
c.Assert(req.Form["Tags.member.1.Value"], DeepEquals, []string{"world"})
c.Assert(err, IsNil)
c.Assert(resp.RequestId, Equals, "360e81f7-1100-11e4-b6ed-0f30EXAMPLE")
}
func (s *S) TestRemoveTags(c *C) {
testServer.Response(200, nil, RemoveTagsExample)
options := elb.RemoveTags{
LoadBalancerNames: []string{"foobar"},
TagKeys: []string{"hello"},
}
resp, err := s.elb.RemoveTags(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"RemoveTags"})
c.Assert(req.Form["LoadBalancerNames.member.1"], DeepEquals, []string{"foobar"})
c.Assert(req.Form["Tags.member.1.Key"], DeepEquals, []string{"hello"})
c.Assert(err, IsNil)
c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE")
}
func (s *S) TestCreateLoadBalancer(c *C) {
testServer.Response(200, nil, CreateLoadBalancerExample)
options := elb.CreateLoadBalancer{
AvailZone: []string{"us-east-1a"},
Listeners: []elb.Listener{elb.Listener{
InstancePort: 80,
InstanceProtocol: "http",
SSLCertificateId: "needToAddASSLCertToYourAWSAccount",
LoadBalancerPort: 80,
Protocol: "http",
},
},
LoadBalancerName: "foobar",
Internal: false,
SecurityGroups: []string{"sg1"},
Subnets: []string{"sn1"},
}
resp, err := s.elb.CreateLoadBalancer(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"CreateLoadBalancer"})
c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"})
c.Assert(err, IsNil)
c.Assert(resp.RequestId, Equals, "1549581b-12b7-11e3-895e-1334aEXAMPLE")
}
func (s *S) TestDeleteLoadBalancer(c *C) {
testServer.Response(200, nil, DeleteLoadBalancerExample)
options := elb.DeleteLoadBalancer{
LoadBalancerName: "foobar",
}
resp, err := s.elb.DeleteLoadBalancer(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"DeleteLoadBalancer"})
c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"})
c.Assert(err, IsNil)
c.Assert(resp.RequestId, Equals, "1549581b-12b7-11e3-895e-1334aEXAMPLE")
}
func (s *S) TestDescribeLoadBalancers(c *C) {
testServer.Response(200, nil, DescribeLoadBalancersExample)
options := elb.DescribeLoadBalancer{
Names: []string{"foobar"},
}
resp, err := s.elb.DescribeLoadBalancers(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeLoadBalancers"})
c.Assert(err, IsNil)
c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE")
c.Assert(resp.LoadBalancers[0].LoadBalancerName, Equals, "MyLoadBalancer")
c.Assert(resp.LoadBalancers[0].Listeners[0].Protocol, Equals, "HTTP")
c.Assert(resp.LoadBalancers[0].Instances[0].InstanceId, Equals, "i-e4cbe38d")
c.Assert(resp.LoadBalancers[0].AvailabilityZones[0].AvailabilityZone, Equals, "us-east-1a")
c.Assert(resp.LoadBalancers[0].Scheme, Equals, "internet-facing")
c.Assert(resp.LoadBalancers[0].DNSName, Equals, "MyLoadBalancer-123456789.us-east-1.elb.amazonaws.com")
c.Assert(resp.LoadBalancers[0].HealthCheck.HealthyThreshold, Equals, int64(2))
c.Assert(resp.LoadBalancers[0].HealthCheck.UnhealthyThreshold, Equals, int64(10))
c.Assert(resp.LoadBalancers[0].HealthCheck.Interval, Equals, int64(90))
c.Assert(resp.LoadBalancers[0].HealthCheck.Target, Equals, "HTTP:80/")
c.Assert(resp.LoadBalancers[0].HealthCheck.Timeout, Equals, int64(60))
}
func (s *S) TestRegisterInstancesWithLoadBalancer(c *C) {
testServer.Response(200, nil, RegisterInstancesWithLoadBalancerExample)
options := elb.RegisterInstancesWithLoadBalancer{
LoadBalancerName: "foobar",
Instances: []string{"instance-1", "instance-2"},
}
resp, err := s.elb.RegisterInstancesWithLoadBalancer(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"RegisterInstancesWithLoadBalancer"})
c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"})
c.Assert(req.Form["Instances.member.1.InstanceId"], DeepEquals, []string{"instance-1"})
c.Assert(req.Form["Instances.member.2.InstanceId"], DeepEquals, []string{"instance-2"})
c.Assert(err, IsNil)
c.Assert(resp.Instances[0].InstanceId, Equals, "i-315b7e51")
c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE")
}
func (s *S) TestDeregisterInstancesFromLoadBalancer(c *C) {
testServer.Response(200, nil, DeregisterInstancesFromLoadBalancerExample)
options := elb.DeregisterInstancesFromLoadBalancer{
LoadBalancerName: "foobar",
Instances: []string{"instance-1", "instance-2"},
}
resp, err := s.elb.DeregisterInstancesFromLoadBalancer(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"DeregisterInstancesFromLoadBalancer"})
c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"})
c.Assert(req.Form["Instances.member.1.InstanceId"], DeepEquals, []string{"instance-1"})
c.Assert(req.Form["Instances.member.2.InstanceId"], DeepEquals, []string{"instance-2"})
c.Assert(err, IsNil)
c.Assert(resp.Instances[0].InstanceId, Equals, "i-6ec63d59")
c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE")
}
func (s *S) TestConfigureHealthCheck(c *C) {
testServer.Response(200, nil, ConfigureHealthCheckExample)
options := elb.ConfigureHealthCheck{
LoadBalancerName: "foobar",
Check: elb.HealthCheck{
HealthyThreshold: 2,
UnhealthyThreshold: 2,
Interval: 30,
Target: "HTTP:80/ping",
Timeout: 3,
},
}
resp, err := s.elb.ConfigureHealthCheck(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"ConfigureHealthCheck"})
c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"})
c.Assert(err, IsNil)
c.Assert(resp.Check.HealthyThreshold, Equals, int64(2))
c.Assert(resp.Check.UnhealthyThreshold, Equals, int64(2))
c.Assert(resp.Check.Interval, Equals, int64(30))
c.Assert(resp.Check.Target, Equals, "HTTP:80/ping")
c.Assert(resp.Check.Timeout, Equals, int64(3))
c.Assert(resp.RequestId, Equals, "83c88b9d-12b7-11e3-8b82-87b12EXAMPLE")
}
func (s *S) TestDescribeInstanceHealth(c *C) {
testServer.Response(200, nil, DescribeInstanceHealthExample)
options := elb.DescribeInstanceHealth{
LoadBalancerName: "foobar",
}
resp, err := s.elb.DescribeInstanceHealth(&options)
req := testServer.WaitRequest()
c.Assert(req.Form["Action"], DeepEquals, []string{"DescribeInstanceHealth"})
c.Assert(req.Form["LoadBalancerName"], DeepEquals, []string{"foobar"})
c.Assert(err, IsNil)
c.Assert(resp.InstanceStates[0].InstanceId, Equals, "i-90d8c2a5")
c.Assert(resp.InstanceStates[0].State, Equals, "InService")
c.Assert(resp.InstanceStates[1].InstanceId, Equals, "i-06ea3e60")
c.Assert(resp.InstanceStates[1].State, Equals, "OutOfService")
c.Assert(resp.RequestId, Equals, "1549581b-12b7-11e3-895e-1334aEXAMPLE")
}

View File

@ -1,182 +0,0 @@
package elb_test
var ErrorDump = `
<?xml version="1.0" encoding="UTF-8"?>
<Response><Errors><Error><Code>UnsupportedOperation</Code>
<Message></Message>
</Error></Errors><RequestID>0503f4e9-bbd6-483c-b54f-c4ae9f3b30f4</RequestID></Response>
`
// http://goo.gl/OkMdtJ
var AddTagsExample = `
<AddTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<AddTagsResult/>
<ResponseMetadata>
<RequestId>360e81f7-1100-11e4-b6ed-0f30EXAMPLE</RequestId>
</ResponseMetadata>
</AddTagsResponse>
`
// http://goo.gl/nT2E89
var RemoveTagsExample = `
<RemoveTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<RemoveTagsResult/>
<ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
</ResponseMetadata>
</RemoveTagsResponse>
`
// http://goo.gl/gQRD2H
var CreateLoadBalancerExample = `
<CreateLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<CreateLoadBalancerResult>
<DNSName>MyLoadBalancer-1234567890.us-east-1.elb.amazonaws.com</DNSName>
</CreateLoadBalancerResult>
<ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
</ResponseMetadata>
</CreateLoadBalancerResponse>
`
// http://goo.gl/GLZeBN
var DeleteLoadBalancerExample = `
<DeleteLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
</ResponseMetadata>
</DeleteLoadBalancerResponse>
`
// http://goo.gl/8UgpQ8
var DescribeLoadBalancersExample = `
<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<DescribeLoadBalancersResult>
<LoadBalancerDescriptions>
<member>
<SecurityGroups/>
<LoadBalancerName>MyLoadBalancer</LoadBalancerName>
<CreatedTime>2013-05-24T21:15:31.280Z</CreatedTime>
<HealthCheck>
<Interval>90</Interval>
<Target>HTTP:80/</Target>
<HealthyThreshold>2</HealthyThreshold>
<Timeout>60</Timeout>
<UnhealthyThreshold>10</UnhealthyThreshold>
</HealthCheck>
<ListenerDescriptions>
<member>
<PolicyNames/>
<Listener>
<Protocol>HTTP</Protocol>
<LoadBalancerPort>80</LoadBalancerPort>
<InstanceProtocol>HTTP</InstanceProtocol>
<SSLCertificateId>needToAddASSLCertToYourAWSAccount</SSLCertificateId>
<InstancePort>80</InstancePort>
</Listener>
</member>
</ListenerDescriptions>
<Instances>
<member>
<InstanceId>i-e4cbe38d</InstanceId>
</member>
</Instances>
<Policies>
<AppCookieStickinessPolicies/>
<OtherPolicies/>
<LBCookieStickinessPolicies/>
</Policies>
<AvailabilityZones>
<member>us-east-1a</member>
</AvailabilityZones>
<CanonicalHostedZoneNameID>ZZZZZZZZZZZ123X</CanonicalHostedZoneNameID>
<CanonicalHostedZoneName>MyLoadBalancer-123456789.us-east-1.elb.amazonaws.com</CanonicalHostedZoneName>
<Scheme>internet-facing</Scheme>
<SourceSecurityGroup>
<OwnerAlias>amazon-elb</OwnerAlias>
<GroupName>amazon-elb-sg</GroupName>
</SourceSecurityGroup>
<DNSName>MyLoadBalancer-123456789.us-east-1.elb.amazonaws.com</DNSName>
<BackendServerDescriptions/>
<Subnets/>
</member>
</LoadBalancerDescriptions>
</DescribeLoadBalancersResult>
<ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
</ResponseMetadata>
</DescribeLoadBalancersResponse>
`
// http://goo.gl/Uz1N66
var RegisterInstancesWithLoadBalancerExample = `
<RegisterInstancesWithLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<RegisterInstancesWithLoadBalancerResult>
<Instances>
<member>
<InstanceId>i-315b7e51</InstanceId>
</member>
</Instances>
</RegisterInstancesWithLoadBalancerResult>
<ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
</ResponseMetadata>
</RegisterInstancesWithLoadBalancerResponse>
`
// http://goo.gl/5OMv62
var DeregisterInstancesFromLoadBalancerExample = `
<DeregisterInstancesFromLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<DeregisterInstancesFromLoadBalancerResult>
<Instances>
<member>
<InstanceId>i-6ec63d59</InstanceId>
</member>
</Instances>
</DeregisterInstancesFromLoadBalancerResult>
<ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
</ResponseMetadata>
</DeregisterInstancesFromLoadBalancerResponse>
`
// http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_ConfigureHealthCheck.html
var ConfigureHealthCheckExample = `
<ConfigureHealthCheckResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<ConfigureHealthCheckResult>
<HealthCheck>
<Interval>30</Interval>
<Target>HTTP:80/ping</Target>
<HealthyThreshold>2</HealthyThreshold>
<Timeout>3</Timeout>
<UnhealthyThreshold>2</UnhealthyThreshold>
</HealthCheck>
</ConfigureHealthCheckResult>
<ResponseMetadata>
<RequestId>83c88b9d-12b7-11e3-8b82-87b12EXAMPLE</RequestId>
</ResponseMetadata>
</ConfigureHealthCheckResponse>`
// http://goo.gl/cGNxfj
var DescribeInstanceHealthExample = `
<DescribeInstanceHealthResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2012-06-01/">
<DescribeInstanceHealthResult>
<InstanceStates>
<member>
<Description>N/A</Description>
<InstanceId>i-90d8c2a5</InstanceId>
<State>InService</State>
<ReasonCode>N/A</ReasonCode>
</member>
<member>
<Description>N/A</Description>
<InstanceId>i-06ea3e60</InstanceId>
<State>OutOfService</State>
<ReasonCode>N/A</ReasonCode>
</member>
</InstanceStates>
</DescribeInstanceHealthResult>
<ResponseMetadata>
<RequestId>1549581b-12b7-11e3-895e-1334aEXAMPLE</RequestId>
</ResponseMetadata>
</DescribeInstanceHealthResponse>`

View File

@ -1,38 +0,0 @@
package elb
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"github.com/mitchellh/goamz/aws"
"sort"
"strings"
)
// ----------------------------------------------------------------------------
// Version 2 signing (http://goo.gl/RSRp5)
var b64 = base64.StdEncoding
func sign(auth aws.Auth, method, path string, params map[string]string, host string) {
params["AWSAccessKeyId"] = auth.AccessKey
params["SignatureVersion"] = "2"
params["SignatureMethod"] = "HmacSHA256"
if auth.Token != "" {
params["SecurityToken"] = auth.Token
}
var sarray []string
for k, v := range params {
sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v))
}
sort.StringSlice(sarray).Sort()
joined := strings.Join(sarray, "&")
payload := method + "\n" + host + "\n" + path + "\n" + joined
hash := hmac.New(sha256.New, []byte(auth.SecretKey))
hash.Write([]byte(payload))
signature := make([]byte, b64.EncodedLen(hash.Size()))
b64.Encode(signature, hash.Sum(nil))
params["Signature"] = string(signature)
}

14
Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,14 @@
language: go
go:
- 1.3
- 1.4
install:
- export GOPATH="$HOME/gopath"
- mkdir -p "$GOPATH/src/golang.org/x"
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
- go get -v -t -d -tags='appengine appenginevm' golang.org/x/oauth2/...
script:
- go test -v -tags='appengine appenginevm' golang.org/x/oauth2/...

3
Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -0,0 +1,25 @@
# Contributing
We don't use GitHub pull requests but use Gerrit for code reviews,
similar to the Go project.
1. Sign one of the contributor license agreements below.
2. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
3. Get the package by running `go get -d golang.org/x/oauth2`.
Make changes and create a change by running `git codereview change <name>`, provide a command message, and use `git codereview mail` to create a Gerrit CL.
Keep amending to the change and mail as your recieve feedback.
For more information about the workflow, see Go's [Contribution Guidelines](https://golang.org/doc/contribute.html).
Before we can accept any pull requests
we have to jump through a couple of legal hurdles,
primarily a Contributor License Agreement (CLA):
- **If you are an individual writing original source code**
and you're sure you own the intellectual property,
then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html).
- **If you work for a company that wants to allow you to contribute your work**,
then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html).
You can sign these electronically (just scroll to the bottom).
After that, we'll be able to accept your pull requests.

View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
Godeps/_workspace/src/golang.org/x/oauth2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The oauth2 Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

18
Godeps/_workspace/src/golang.org/x/oauth2/README.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
# OAuth2 for Go
[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
oauth2 package contains a client implementation for OAuth 2.0 spec.
## Installation
~~~~
go get golang.org/x/oauth2
~~~~
See godoc for further documentation and examples.
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)

View File

@ -0,0 +1,39 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine,!appenginevm
// App Engine hooks.
package oauth2
import (
"log"
"net/http"
"sync"
"appengine"
"appengine/urlfetch"
)
var warnOnce sync.Once
func init() {
registerContextClientFunc(contextClientAppEngine)
}
func contextClientAppEngine(ctx Context) (*http.Client, error) {
if actx, ok := ctx.(appengine.Context); ok {
return urlfetch.Client(actx), nil
}
// The user did it wrong. We'll log once (and hope they see it
// in dev_appserver), but stil return (nil, nil) in case some
// other contextClientFunc hook finds a way to proceed.
warnOnce.Do(gaeDoingItWrongHelp)
return nil, nil
}
func gaeDoingItWrongHelp() {
log.Printf("WARNING: you attempted to use the oauth2 package without passing a valid appengine.Context or *http.Request as the oauth2.Context. App Engine requires that all service RPCs (including urlfetch) be associated with an *http.Request/appengine.Context.")
}

View File

@ -0,0 +1,50 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package oauth2_test
import (
"fmt"
"log"
"testing"
"golang.org/x/oauth2"
)
// TODO(jbd): Remove after Go 1.4.
// Related to https://codereview.appspot.com/107320046
func TestA(t *testing.T) {}
func ExampleConfig() {
conf := &oauth2.Config{
ClientID: "YOUR_CLIENT_ID",
ClientSecret: "YOUR_CLIENT_SECRET",
Scopes: []string{"SCOPE1", "SCOPE2"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://provider.com/o/oauth2/auth",
TokenURL: "https://provider.com/o/oauth2/token",
},
}
// Redirect user to consent page to ask for permission
// for the scopes specified above.
url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
fmt.Printf("Visit the URL for the auth dialog: %v", url)
// Use the authorization code that is pushed to the redirect URL.
// NewTransportWithCode will do the handshake to retrieve
// an access token and initiate a Transport that is
// authorized and authenticated by the retrieved token.
var code string
if _, err := fmt.Scan(&code); err != nil {
log.Fatal(err)
}
tok, err := conf.Exchange(oauth2.NoContext, code)
if err != nil {
log.Fatal(err)
}
client := conf.Client(oauth2.NoContext, tok)
client.Get("...")
}

View File

@ -0,0 +1,16 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package github provides constants for using OAuth2 to access Github.
package github
import (
"golang.org/x/oauth2"
)
// Endpoint is Github's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://github.com/login/oauth/authorize",
TokenURL: "https://github.com/login/oauth/access_token",
}

View File

@ -0,0 +1,37 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine,!appenginevm
package google
import (
"time"
"appengine"
"golang.org/x/oauth2"
)
// AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
// that involves user accounts, see oauth2.Config instead.
//
// You are required to provide a valid appengine.Context as context.
func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource {
return &appEngineTokenSource{
ctx: ctx,
scopes: scope,
fetcherFunc: aeFetcherFunc,
}
}
var aeFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) {
c, ok := ctx.(appengine.Context)
if !ok {
return "", time.Time{}, errInvalidContext
}
return appengine.AccessToken(c, scope...)
}

View File

@ -0,0 +1,36 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appenginevm !appengine
package google
import (
"time"
"golang.org/x/oauth2"
"google.golang.org/appengine"
)
// AppEngineTokenSource returns a token source that fetches tokens
// issued to the current App Engine application's service account.
// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
// that involves user accounts, see oauth2.Config instead.
//
// You are required to provide a valid appengine.Context as context.
func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource {
return &appEngineTokenSource{
ctx: ctx,
scopes: scope,
fetcherFunc: aeVMFetcherFunc,
}
}
var aeVMFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) {
c, ok := ctx.(appengine.Context)
if !ok {
return "", time.Time{}, errInvalidContext
}
return appengine.AccessToken(c, scope...)
}

View File

@ -0,0 +1,133 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appenginevm !appengine
package google_test
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"testing"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt"
"google.golang.org/appengine"
"google.golang.org/appengine/urlfetch"
)
// Remove after Go 1.4.
// Related to https://codereview.appspot.com/107320046
func TestA(t *testing.T) {}
func Example_webServer() {
// Your credentials should be obtained from the Google
// Developer Console (https://console.developers.google.com).
conf := &oauth2.Config{
ClientID: "YOUR_CLIENT_ID",
ClientSecret: "YOUR_CLIENT_SECRET",
RedirectURL: "YOUR_REDIRECT_URL",
Scopes: []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/blogger",
},
Endpoint: google.Endpoint,
}
// Redirect user to Google's consent page to ask for permission
// for the scopes specified above.
url := conf.AuthCodeURL("state")
fmt.Printf("Visit the URL for the auth dialog: %v", url)
// Handle the exchange code to initiate a transport.
tok, err := conf.Exchange(oauth2.NoContext, "authorization-code")
if err != nil {
log.Fatal(err)
}
client := conf.Client(oauth2.NoContext, tok)
client.Get("...")
}
func ExampleJWTConfigFromJSON() {
// Your credentials should be obtained from the Google
// Developer Console (https://console.developers.google.com).
// Navigate to your project, then see the "Credentials" page
// under "APIs & Auth".
// To create a service account client, click "Create new Client ID",
// select "Service Account", and click "Create Client ID". A JSON
// key file will then be downloaded to your computer.
data, err := ioutil.ReadFile("/path/to/your-project-key.json")
if err != nil {
log.Fatal(err)
}
conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery")
if err != nil {
log.Fatal(err)
}
// Initiate an http.Client. The following GET request will be
// authorized and authenticated on the behalf of
// your service account.
client := conf.Client(oauth2.NoContext)
client.Get("...")
}
func Example_serviceAccount() {
// Your credentials should be obtained from the Google
// Developer Console (https://console.developers.google.com).
conf := &jwt.Config{
Email: "xxx@developer.gserviceaccount.com",
// The contents of your RSA private key or your PEM file
// that contains a private key.
// If you have a p12 file instead, you
// can use `openssl` to export the private key into a pem file.
//
// $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
//
// The field only supports PEM containers with no passphrase.
// The openssl command will convert p12 keys to passphrase-less PEM containers.
PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
Scopes: []string{
"https://www.googleapis.com/auth/bigquery",
"https://www.googleapis.com/auth/blogger",
},
TokenURL: google.JWTTokenURL,
// If you would like to impersonate a user, you can
// create a transport with a subject. The following GET
// request will be made on the behalf of user@example.com.
// Optional.
Subject: "user@example.com",
}
// Initiate an http.Client, the following GET request will be
// authorized and authenticated on the behalf of user@example.com.
client := conf.Client(oauth2.NoContext)
client.Get("...")
}
func ExampleAppEngineTokenSource() {
var req *http.Request // from the ServeHTTP handler
ctx := appengine.NewContext(req)
client := &http.Client{
Transport: &oauth2.Transport{
Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"),
Base: &urlfetch.Transport{
Context: ctx,
},
},
}
client.Get("...")
}
func ExampleComputeTokenSource() {
client := &http.Client{
Transport: &oauth2.Transport{
// Fetch from Google Compute Engine's metadata server to retrieve
// an access token for the provided account.
// If no account is specified, "default" is used.
Source: google.ComputeTokenSource(""),
},
}
client.Get("...")
}

View File

@ -0,0 +1,103 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package google provides support for making
// OAuth2 authorized and authenticated HTTP requests
// to Google APIs. It supports Web server, client-side,
// service accounts, Google Compute Engine service accounts,
// and Google App Engine service accounts authorization
// and authentications flows:
//
// For more information, please read
// https://developers.google.com/accounts/docs/OAuth2.
package google
import (
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
"google.golang.org/cloud/compute/metadata"
)
// TODO(bradfitz,jbd): import "google.golang.org/cloud/compute/metadata" instead of
// the metaClient and metadata.google.internal stuff below.
// Endpoint is Google's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
// the credentials that authorize and authenticate the requests.
// Create a service account on "Credentials" page under "APIs & Auth" for your
// project at https://console.developers.google.com to download a JSON key file.
func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
var key struct {
Email string `json:"client_email"`
PrivateKey string `json:"private_key"`
}
if err := json.Unmarshal(jsonKey, &key); err != nil {
return nil, err
}
return &jwt.Config{
Email: key.Email,
PrivateKey: []byte(key.PrivateKey),
Scopes: scope,
TokenURL: JWTTokenURL,
}, nil
}
// ComputeTokenSource returns a token source that fetches access tokens
// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
// this token source if your program is running on a GCE instance.
// If no account is specified, "default" is used.
// Further information about retrieving access tokens from the GCE metadata
// server can be found at https://cloud.google.com/compute/docs/authentication.
func ComputeTokenSource(account string) oauth2.TokenSource {
return oauth2.ReuseTokenSource(nil, computeSource{account: account})
}
type computeSource struct {
account string
}
func (cs computeSource) Token() (*oauth2.Token, error) {
if !metadata.OnGCE() {
return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
}
acct := cs.account
if acct == "" {
acct = "default"
}
tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
if err != nil {
return nil, err
}
var res struct {
AccessToken string `json:"access_token"`
ExpiresInSec int `json:"expires_in"`
TokenType string `json:"token_type"`
}
err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
if err != nil {
return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
}
if res.ExpiresInSec == 0 || res.AccessToken == "" {
return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
}
return &oauth2.Token{
AccessToken: res.AccessToken,
TokenType: res.TokenType,
Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
}, nil
}

View File

@ -0,0 +1,71 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"errors"
"sort"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
)
var (
aeTokensMu sync.Mutex // guards aeTokens and appEngineTokenSource.key
// aeTokens helps the fetched tokens to be reused until their expiration.
aeTokens = make(map[string]*tokenLock) // key is '\0'-separated scopes
)
var errInvalidContext = errors.New("oauth2: a valid appengine.Context is required")
type tokenLock struct {
mu sync.Mutex // guards t; held while updating t
t *oauth2.Token
}
type appEngineTokenSource struct {
ctx oauth2.Context
// fetcherFunc makes the actual RPC to fetch a new access
// token with an expiry time. Provider of this function is
// responsible to assert that the given context is valid.
fetcherFunc func(ctx oauth2.Context, scope ...string) (accessToken string, expiry time.Time, err error)
// scopes and key are guarded by the package-level mutex aeTokensMu
scopes []string
key string
}
func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
aeTokensMu.Lock()
if ts.key == "" {
sort.Sort(sort.StringSlice(ts.scopes))
ts.key = strings.Join(ts.scopes, string(0))
}
tok, ok := aeTokens[ts.key]
if !ok {
tok = &tokenLock{}
aeTokens[ts.key] = tok
}
aeTokensMu.Unlock()
tok.mu.Lock()
defer tok.mu.Unlock()
if tok.t.Valid() {
return tok.t, nil
}
access, exp, err := ts.fetcherFunc(ts.ctx, ts.scopes...)
if err != nil {
return nil, err
}
tok.t = &oauth2.Token{
AccessToken: access,
Expiry: exp,
}
return tok.t, nil
}

View File

@ -0,0 +1,37 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal contains support packages for oauth2 package.
package internal
import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
)
// ParseKey converts the binary contents of a private key file
// to an *rsa.PrivateKey. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
block, _ := pem.Decode(key)
if block != nil {
key = block.Bytes
}
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, err
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("oauth2: private key is invalid")
}
return parsed, nil
}

160
Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go generated vendored Normal file
View File

@ -0,0 +1,160 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jws provides encoding and decoding utilities for
// signed JWS messages.
package jws
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
)
// ClaimSet contains information about the JWT signature including the
// permissions being requested (scopes), the target of the token, the issuer,
// the time the token was issued, and the lifetime of the token.
type ClaimSet struct {
Iss string `json:"iss"` // email address of the client_id of the application making the access token request
Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
Exp int64 `json:"exp"` // the expiration time of the assertion
Iat int64 `json:"iat"` // the time the assertion was issued.
Typ string `json:"typ,omitempty"` // token type (Optional).
// Email for which the application is requesting delegated access (Optional).
Sub string `json:"sub,omitempty"`
// The old name of Sub. Client keeps setting Prn to be
// complaint with legacy OAuth 2.0 providers. (Optional)
Prn string `json:"prn,omitempty"`
// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
// This array is marshalled using custom code (see (c *ClaimSet) encode()).
PrivateClaims map[string]interface{} `json:"-"`
exp time.Time
iat time.Time
}
func (c *ClaimSet) encode() (string, error) {
if c.exp.IsZero() || c.iat.IsZero() {
// Reverting time back for machines whose time is not perfectly in sync.
// If client machine's time is in the future according
// to Google servers, an access token will not be issued.
now := time.Now().Add(-10 * time.Second)
c.iat = now
c.exp = now.Add(time.Hour)
}
c.Exp = c.exp.Unix()
c.Iat = c.iat.Unix()
b, err := json.Marshal(c)
if err != nil {
return "", err
}
if len(c.PrivateClaims) == 0 {
return base64Encode(b), nil
}
// Marshal private claim set and then append it to b.
prv, err := json.Marshal(c.PrivateClaims)
if err != nil {
return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
}
// Concatenate public and private claim JSON objects.
if !bytes.HasSuffix(b, []byte{'}'}) {
return "", fmt.Errorf("jws: invalid JSON %s", b)
}
if !bytes.HasPrefix(prv, []byte{'{'}) {
return "", fmt.Errorf("jws: invalid JSON %s", prv)
}
b[len(b)-1] = ',' // Replace closing curly brace with a comma.
b = append(b, prv[1:]...) // Append private claims.
return base64Encode(b), nil
}
// Header represents the header for the signed JWS payloads.
type Header struct {
// The algorithm used for signature.
Algorithm string `json:"alg"`
// Represents the token type.
Typ string `json:"typ"`
}
func (h *Header) encode() (string, error) {
b, err := json.Marshal(h)
if err != nil {
return "", err
}
return base64Encode(b), nil
}
// Decode decodes a claim set from a JWS payload.
func Decode(payload string) (*ClaimSet, error) {
// decode returned id token to get expiry
s := strings.Split(payload, ".")
if len(s) < 2 {
// TODO(jbd): Provide more context about the error.
return nil, errors.New("jws: invalid token received")
}
decoded, err := base64Decode(s[1])
if err != nil {
return nil, err
}
c := &ClaimSet{}
err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
return c, err
}
// Encode encodes a signed JWS with provided header and claim set.
func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) {
head, err := header.encode()
if err != nil {
return "", err
}
cs, err := c.encode()
if err != nil {
return "", err
}
ss := fmt.Sprintf("%s.%s", head, cs)
h := sha256.New()
h.Write([]byte(ss))
b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil))
if err != nil {
return "", err
}
sig := base64Encode(b)
return fmt.Sprintf("%s.%s", ss, sig), nil
}
// base64Encode returns and Base64url encoded version of the input string with any
// trailing "=" stripped.
func base64Encode(b []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// base64Decode decodes the Base64url encoded string
func base64Decode(s string) ([]byte, error) {
// add back missing padding
switch len(s) % 4 {
case 2:
s += "=="
case 3:
s += "="
}
return base64.URLEncoding.DecodeString(s)
}

View File

@ -0,0 +1,31 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jwt_test
import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
)
func ExampleJWTConfig() {
conf := &jwt.Config{
Email: "xxx@developer.com",
// The contents of your RSA private key or your PEM file
// that contains a private key.
// If you have a p12 file instead, you
// can use `openssl` to export the private key into a pem file.
//
// $ openssl pkcs12 -in key.p12 -out key.pem -nodes
//
// It only supports PEM containers with no passphrase.
PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
Subject: "user@example.com",
TokenURL: "https://provider.com/o/oauth2/token",
}
// Initiate an http.Client, the following GET request will be
// authorized and authenticated on the behalf of user@example.com.
client := conf.Client(oauth2.NoContext)
client.Get("...")
}

146
Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
// known as "two-legged OAuth 2.0".
//
// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
package jwt
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/internal"
"golang.org/x/oauth2/jws"
)
var (
defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
)
// Config is the configuration for using JWT to fetch tokens,
// commonly known as "two-legged OAuth 2.0".
type Config struct {
// Email is the OAuth client identifier used when communicating with
// the configured OAuth provider.
Email string
// PrivateKey contains the contents of an RSA private key or the
// contents of a PEM file that contains a private key. The provided
// private key is used to sign JWT payloads.
// PEM containers with a passphrase are not supported.
// Use the following command to convert a PKCS 12 file into a PEM.
//
// $ openssl pkcs12 -in key.p12 -out key.pem -nodes
//
PrivateKey []byte
// Subject is the optional user to impersonate.
Subject string
// Scopes optionally specifies a list of requested permission scopes.
Scopes []string
// TokenURL is the endpoint required to complete the 2-legged JWT flow.
TokenURL string
}
// TokenSource returns a JWT TokenSource using the configuration
// in c and the HTTP client from the provided context.
func (c *Config) TokenSource(ctx oauth2.Context) oauth2.TokenSource {
return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
}
// Client returns an HTTP client wrapping the context's
// HTTP transport and adding Authorization headers with tokens
// obtained from c.
//
// The returned client and its Transport should not be modified.
func (c *Config) Client(ctx oauth2.Context) *http.Client {
return oauth2.NewClient(ctx, c.TokenSource(ctx))
}
// jwtSource is a source that always does a signed JWT request for a token.
// It should typically be wrapped with a reuseTokenSource.
type jwtSource struct {
ctx oauth2.Context
conf *Config
}
func (js jwtSource) Token() (*oauth2.Token, error) {
pk, err := internal.ParseKey(js.conf.PrivateKey)
if err != nil {
return nil, err
}
hc := oauth2.NewClient(js.ctx, nil)
claimSet := &jws.ClaimSet{
Iss: js.conf.Email,
Scope: strings.Join(js.conf.Scopes, " "),
Aud: js.conf.TokenURL,
}
if subject := js.conf.Subject; subject != "" {
claimSet.Sub = subject
// prn is the old name of sub. Keep setting it
// to be compatible with legacy OAuth 2.0 providers.
claimSet.Prn = subject
}
payload, err := jws.Encode(defaultHeader, claimSet, pk)
if err != nil {
return nil, err
}
v := url.Values{}
v.Set("grant_type", defaultGrantType)
v.Set("assertion", payload)
resp, err := hc.PostForm(js.conf.TokenURL, v)
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if c := resp.StatusCode; c < 200 || c > 299 {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
}
// tokenRes is the JSON response body.
var tokenRes struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
ExpiresIn int64 `json:"expires_in"` // relative seconds from now
}
if err := json.Unmarshal(body, &tokenRes); err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
token := &oauth2.Token{
AccessToken: tokenRes.AccessToken,
TokenType: tokenRes.TokenType,
}
raw := make(map[string]interface{})
json.Unmarshal(body, &raw) // no error checks for optional fields
token = token.WithExtra(raw)
if secs := tokenRes.ExpiresIn; secs > 0 {
token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
}
if v := tokenRes.IDToken; v != "" {
// decode returned id token to get expiry
claimSet, err := jws.Decode(v)
if err != nil {
return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
}
token.Expiry = time.Unix(claimSet.Exp, 0)
}
return token, nil
}

View File

@ -0,0 +1,134 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jwt
import (
"net/http"
"net/http/httptest"
"testing"
"golang.org/x/oauth2"
)
var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE
DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY
fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK
1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr
k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9
/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt
3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn
2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3
nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK
6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf
5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e
DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1
M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g
z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y
1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK
J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U
f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx
QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA
cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr
Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw
5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg
KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84
OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd
mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ
5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg==
-----END RSA PRIVATE KEY-----`)
func TestJWTFetch_JSONResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{
"access_token": "90d64460d14870c08c81352a05dedd3465940a7c",
"scope": "user",
"token_type": "bearer",
"expires_in": 3600
}`))
}))
defer ts.Close()
conf := &Config{
Email: "aaa@xxx.com",
PrivateKey: dummyPrivateKey,
TokenURL: ts.URL,
}
tok, err := conf.TokenSource(oauth2.NoContext).Token()
if err != nil {
t.Fatal(err)
}
if !tok.Valid() {
t.Errorf("Token invalid")
}
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
t.Errorf("Unexpected access token, %#v", tok.AccessToken)
}
if tok.TokenType != "bearer" {
t.Errorf("Unexpected token type, %#v", tok.TokenType)
}
if tok.Expiry.IsZero() {
t.Errorf("Unexpected token expiry, %#v", tok.Expiry)
}
scope := tok.Extra("scope")
if scope != "user" {
t.Errorf("Unexpected value for scope: %v", scope)
}
}
func TestJWTFetch_BadResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
}))
defer ts.Close()
conf := &Config{
Email: "aaa@xxx.com",
PrivateKey: dummyPrivateKey,
TokenURL: ts.URL,
}
tok, err := conf.TokenSource(oauth2.NoContext).Token()
if err != nil {
t.Fatal(err)
}
if tok == nil {
t.Fatalf("token is nil")
}
if tok.Valid() {
t.Errorf("token is valid. want invalid.")
}
if tok.AccessToken != "" {
t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken)
}
if want := "bearer"; tok.TokenType != want {
t.Errorf("TokenType = %q; want %q", tok.TokenType, want)
}
scope := tok.Extra("scope")
if want := "user"; scope != want {
t.Errorf("token scope = %q; want %q", scope, want)
}
}
func TestJWTFetch_BadResponseType(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
}))
defer ts.Close()
conf := &Config{
Email: "aaa@xxx.com",
PrivateKey: dummyPrivateKey,
TokenURL: ts.URL,
}
tok, err := conf.TokenSource(oauth2.NoContext).Token()
if err == nil {
t.Error("got a token; expected error")
if tok.AccessToken != "" {
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
}
}
}

462
Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go generated vendored Normal file
View File

@ -0,0 +1,462 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package oauth2 provides support for making
// OAuth2 authorized and authenticated HTTP requests.
// It can additionally grant authorization with Bearer JWT.
package oauth2
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/net/context"
)
// Context can be an golang.org/x/net.Context, or an App Engine Context.
// If you don't care and aren't running on App Engine, you may use NoContext.
type Context interface{}
// NoContext is the default context. If you're not running this code
// on App Engine or not using golang.org/x/net.Context to provide a custom
// HTTP client, you should use NoContext.
var NoContext Context = nil
// Config describes a typical 3-legged OAuth2 flow, with both the
// client application information and the server's endpoint URLs.
type Config struct {
// ClientID is the application's ID.
ClientID string
// ClientSecret is the application's secret.
ClientSecret string
// Endpoint contains the resource server's token endpoint
// URLs. These are constants specific to each server and are
// often available via site-specific packages, such as
// google.Endpoint or github.Endpoint.
Endpoint Endpoint
// RedirectURL is the URL to redirect users going through
// the OAuth flow, after the resource owner's URLs.
RedirectURL string
// Scope specifies optional requested permissions.
Scopes []string
}
// A TokenSource is anything that can return a token.
type TokenSource interface {
// Token returns a token or an error.
// Token must be safe for concurrent use by multiple goroutines.
// The returned Token must not be modified.
Token() (*Token, error)
}
// Endpoint contains the OAuth 2.0 provider's authorization and token
// endpoint URLs.
type Endpoint struct {
AuthURL string
TokenURL string
}
var (
// AccessTypeOnline and AccessTypeOffline are options passed
// to the Options.AuthCodeURL method. They modify the
// "access_type" field that gets sent in the URL returned by
// AuthCodeURL.
//
// Online (the default if neither is specified) is the default.
// If your application needs to refresh access tokens when the
// user is not present at the browser, then use offline. This
// will result in your application obtaining a refresh token
// the first time your application exchanges an authorization
// code for a user.
AccessTypeOnline AuthCodeOption = setParam{"access_type", "online"}
AccessTypeOffline AuthCodeOption = setParam{"access_type", "offline"}
// ApprovalForce forces the users to view the consent dialog
// and confirm the permissions request at the URL returned
// from AuthCodeURL, even if they've already done so.
ApprovalForce AuthCodeOption = setParam{"approval_prompt", "force"}
)
type setParam struct{ k, v string }
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
// An AuthCodeOption is passed to Config.AuthCodeURL.
type AuthCodeOption interface {
setValue(url.Values)
}
// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
// that asks for permissions for the required scopes explicitly.
//
// State is a token to protect the user from CSRF attacks. You must
// always provide a non-zero string and validate that it matches the
// the state query parameter on your redirect callback.
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
//
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
// as ApprovalForce.
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
var buf bytes.Buffer
buf.WriteString(c.Endpoint.AuthURL)
v := url.Values{
"response_type": {"code"},
"client_id": {c.ClientID},
"redirect_uri": condVal(c.RedirectURL),
"scope": condVal(strings.Join(c.Scopes, " ")),
"state": condVal(state),
}
for _, opt := range opts {
opt.setValue(v)
}
if strings.Contains(c.Endpoint.AuthURL, "?") {
buf.WriteByte('&')
} else {
buf.WriteByte('?')
}
buf.WriteString(v.Encode())
return buf.String()
}
// Exchange converts an authorization code into a token.
//
// It is used after a resource provider redirects the user back
// to the Redirect URI (the URL obtained from AuthCodeURL).
//
// The HTTP client to use is derived from the context. If nil,
// http.DefaultClient is used. See the Context type's documentation.
//
// The code will be in the *http.Request.FormValue("code"). Before
// calling Exchange, be sure to validate FormValue("state").
func (c *Config) Exchange(ctx Context, code string) (*Token, error) {
return retrieveToken(ctx, c, url.Values{
"grant_type": {"authorization_code"},
"code": {code},
"redirect_uri": condVal(c.RedirectURL),
"scope": condVal(strings.Join(c.Scopes, " ")),
})
}
// contextClientFunc is a func which tries to return an *http.Client
// given a Context value. If it returns an error, the search stops
// with that error. If it returns (nil, nil), the search continues
// down the list of registered funcs.
type contextClientFunc func(Context) (*http.Client, error)
var contextClientFuncs []contextClientFunc
func registerContextClientFunc(fn contextClientFunc) {
contextClientFuncs = append(contextClientFuncs, fn)
}
func contextClient(ctx Context) (*http.Client, error) {
for _, fn := range contextClientFuncs {
c, err := fn(ctx)
if err != nil {
return nil, err
}
if c != nil {
return c, nil
}
}
if xc, ok := ctx.(context.Context); ok {
if hc, ok := xc.Value(HTTPClient).(*http.Client); ok {
return hc, nil
}
}
return http.DefaultClient, nil
}
func contextTransport(ctx Context) http.RoundTripper {
hc, err := contextClient(ctx)
if err != nil {
// This is a rare error case (somebody using nil on App Engine),
// so I'd rather not everybody do an error check on this Client
// method. They can get the error that they're doing it wrong
// later, at client.Get/PostForm time.
return errorTransport{err}
}
return hc.Transport
}
// Client returns an HTTP client using the provided token.
// The token will auto-refresh as necessary. The underlying
// HTTP transport will be obtained using the provided context.
// The returned client and its Transport should not be modified.
func (c *Config) Client(ctx Context, t *Token) *http.Client {
return NewClient(ctx, c.TokenSource(ctx, t))
}
// TokenSource returns a TokenSource that returns t until t expires,
// automatically refreshing it as necessary using the provided context.
// See the the Context documentation.
//
// Most users will use Config.Client instead.
func (c *Config) TokenSource(ctx Context, t *Token) TokenSource {
nwn := &reuseTokenSource{t: t}
nwn.new = tokenRefresher{
ctx: ctx,
conf: c,
oldToken: &nwn.t,
}
return nwn
}
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
// HTTP requests to renew a token using a RefreshToken.
type tokenRefresher struct {
ctx Context // used to get HTTP requests
conf *Config
oldToken **Token // pointer to old *Token w/ RefreshToken
}
func (tf tokenRefresher) Token() (*Token, error) {
t := *tf.oldToken
if t == nil {
return nil, errors.New("oauth2: attempted use of nil Token")
}
if t.RefreshToken == "" {
return nil, errors.New("oauth2: token expired and refresh token is not set")
}
return retrieveToken(tf.ctx, tf.conf, url.Values{
"grant_type": {"refresh_token"},
"refresh_token": {t.RefreshToken},
})
}
// reuseTokenSource is a TokenSource that holds a single token in memory
// and validates its expiry before each call to retrieve it with
// Token. If it's expired, it will be auto-refreshed using the
// new TokenSource.
//
// The first call to TokenRefresher must be SetToken.
type reuseTokenSource struct {
new TokenSource // called when t is expired.
mu sync.Mutex // guards t
t *Token
}
// Token returns the current token if it's still valid, else will
// refresh the current token (using r.Context for HTTP client
// information) and return the new one.
func (s *reuseTokenSource) Token() (*Token, error) {
s.mu.Lock()
defer s.mu.Unlock()
if s.t.Valid() {
return s.t, nil
}
t, err := s.new.Token()
if err != nil {
return nil, err
}
s.t = t
return t, nil
}
func retrieveToken(ctx Context, c *Config, v url.Values) (*Token, error) {
hc, err := contextClient(ctx)
if err != nil {
return nil, err
}
v.Set("client_id", c.ClientID)
bustedAuth := !providerAuthHeaderWorks(c.Endpoint.TokenURL)
if bustedAuth && c.ClientSecret != "" {
v.Set("client_secret", c.ClientSecret)
}
req, err := http.NewRequest("POST", c.Endpoint.TokenURL, strings.NewReader(v.Encode()))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
if !bustedAuth && c.ClientSecret != "" {
req.SetBasicAuth(c.ClientID, c.ClientSecret)
}
r, err := hc.Do(req)
if err != nil {
return nil, err
}
defer r.Body.Close()
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
if err != nil {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
}
if code := r.StatusCode; code < 200 || code > 299 {
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
}
var token *Token
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
switch content {
case "application/x-www-form-urlencoded", "text/plain":
vals, err := url.ParseQuery(string(body))
if err != nil {
return nil, err
}
token = &Token{
AccessToken: vals.Get("access_token"),
TokenType: vals.Get("token_type"),
RefreshToken: vals.Get("refresh_token"),
raw: vals,
}
e := vals.Get("expires_in")
if e == "" {
// TODO(jbd): Facebook's OAuth2 implementation is broken and
// returns expires_in field in expires. Remove the fallback to expires,
// when Facebook fixes their implementation.
e = vals.Get("expires")
}
expires, _ := strconv.Atoi(e)
if expires != 0 {
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
}
default:
var tj tokenJSON
if err = json.Unmarshal(body, &tj); err != nil {
return nil, err
}
token = &Token{
AccessToken: tj.AccessToken,
TokenType: tj.TokenType,
RefreshToken: tj.RefreshToken,
Expiry: tj.expiry(),
raw: make(map[string]interface{}),
}
json.Unmarshal(body, &token.raw) // no error checks for optional fields
}
// Don't overwrite `RefreshToken` with an empty value
// if this was a token refreshing request.
if token.RefreshToken == "" {
token.RefreshToken = v.Get("refresh_token")
}
return token, nil
}
// tokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type tokenJSON struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"`
Expires int32 `json:"expires"` // broken Facebook spelling of expires_in
}
func (e *tokenJSON) expiry() (t time.Time) {
if v := e.ExpiresIn; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
if v := e.Expires; v != 0 {
return time.Now().Add(time.Duration(v) * time.Second)
}
return
}
func condVal(v string) []string {
if v == "" {
return nil
}
return []string{v}
}
// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
// implements the OAuth2 spec correctly
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
// In summary:
// - Reddit only accepts client secret in the Authorization header
// - Dropbox accepts either it in URL param or Auth header, but not both.
// - Google only accepts URL param (not spec compliant?), not Auth header
func providerAuthHeaderWorks(tokenURL string) bool {
if strings.HasPrefix(tokenURL, "https://accounts.google.com/") ||
strings.HasPrefix(tokenURL, "https://github.com/") ||
strings.HasPrefix(tokenURL, "https://api.instagram.com/") ||
strings.HasPrefix(tokenURL, "https://www.douban.com/") ||
strings.HasPrefix(tokenURL, "https://api.dropbox.com/") ||
strings.HasPrefix(tokenURL, "https://api.soundcloud.com/") ||
strings.HasPrefix(tokenURL, "https://www.linkedin.com/") {
// Some sites fail to implement the OAuth2 spec fully.
return false
}
// Assume the provider implements the spec properly
// otherwise. We can add more exceptions as they're
// discovered. We will _not_ be adding configurable hooks
// to this package to let users select server bugs.
return true
}
// HTTPClient is the context key to use with golang.org/x/net/context's
// WithValue function to associate an *http.Client value with a context.
var HTTPClient contextKey
// contextKey is just an empty struct. It exists so HTTPClient can be
// an immutable public variable with a unique type. It's immutable
// because nobody else can create a contextKey, being unexported.
type contextKey struct{}
// NewClient creates an *http.Client from a Context and TokenSource.
// The returned client is not valid beyond the lifetime of the context.
//
// As a special case, if src is nil, a non-OAuth2 client is returned
// using the provided context. This exists to support related OAuth2
// packages.
func NewClient(ctx Context, src TokenSource) *http.Client {
if src == nil {
c, err := contextClient(ctx)
if err != nil {
return &http.Client{Transport: errorTransport{err}}
}
return c
}
return &http.Client{
Transport: &Transport{
Base: contextTransport(ctx),
Source: ReuseTokenSource(nil, src),
},
}
}
// ReuseTokenSource returns a TokenSource which repeatedly returns the
// same token as long as it's valid, starting with t.
// When its cached token is invalid, a new token is obtained from src.
//
// ReuseTokenSource is typically used to reuse tokens from a cache
// (such as a file on disk) between runs of a program, rather than
// obtaining new tokens unnecessarily.
//
// The initial token t may be nil, in which case the TokenSource is
// wrapped in a caching version if it isn't one already. This also
// means it's always safe to wrap ReuseTokenSource around any other
// TokenSource without adverse effects.
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
// Don't wrap a reuseTokenSource in itself. That would work,
// but cause an unnecessary number of mutex operations.
// Just build the equivalent one.
if rt, ok := src.(*reuseTokenSource); ok {
if t == nil {
// Just use it directly.
return rt
}
src = rt.new
}
return &reuseTokenSource{
t: t,
new: src,
}
}

View File

@ -0,0 +1,260 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package oauth2
import (
"errors"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"golang.org/x/net/context"
)
type mockTransport struct {
rt func(req *http.Request) (resp *http.Response, err error)
}
func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
return t.rt(req)
}
type mockCache struct {
token *Token
readErr error
}
func (c *mockCache) ReadToken() (*Token, error) {
return c.token, c.readErr
}
func (c *mockCache) WriteToken(*Token) {
// do nothing
}
func newConf(url string) *Config {
return &Config{
ClientID: "CLIENT_ID",
ClientSecret: "CLIENT_SECRET",
RedirectURL: "REDIRECT_URL",
Scopes: []string{"scope1", "scope2"},
Endpoint: Endpoint{
AuthURL: url + "/auth",
TokenURL: url + "/token",
},
}
}
func TestAuthCodeURL(t *testing.T) {
conf := newConf("server")
url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce)
if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" {
t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
}
}
func TestAuthCodeURL_Optional(t *testing.T) {
conf := &Config{
ClientID: "CLIENT_ID",
Endpoint: Endpoint{
AuthURL: "/auth-url",
TokenURL: "/token-url",
},
}
url := conf.AuthCodeURL("")
if url != "/auth-url?client_id=CLIENT_ID&response_type=code" {
t.Fatalf("Auth code URL doesn't match the expected, found: %v", url)
}
}
func TestExchangeRequest(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() != "/token" {
t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
}
headerAuth := r.Header.Get("Authorization")
if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
}
headerContentType := r.Header.Get("Content-Type")
if headerContentType != "application/x-www-form-urlencoded" {
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("Failed reading request body: %s.", err)
}
if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
t.Errorf("Unexpected exchange payload, %v is found.", string(body))
}
w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
}))
defer ts.Close()
conf := newConf(ts.URL)
tok, err := conf.Exchange(NoContext, "exchange-code")
if err != nil {
t.Error(err)
}
if !tok.Valid() {
t.Fatalf("Token invalid. Got: %#v", tok)
}
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
}
if tok.TokenType != "bearer" {
t.Errorf("Unexpected token type, %#v.", tok.TokenType)
}
scope := tok.Extra("scope")
if scope != "user" {
t.Errorf("Unexpected value for scope: %v", scope)
}
}
func TestExchangeRequest_JSONResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() != "/token" {
t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
}
headerAuth := r.Header.Get("Authorization")
if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
}
headerContentType := r.Header.Get("Content-Type")
if headerContentType != "application/x-www-form-urlencoded" {
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("Failed reading request body: %s.", err)
}
if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
t.Errorf("Unexpected exchange payload, %v is found.", string(body))
}
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`))
}))
defer ts.Close()
conf := newConf(ts.URL)
tok, err := conf.Exchange(NoContext, "exchange-code")
if err != nil {
t.Error(err)
}
if !tok.Valid() {
t.Fatalf("Token invalid. Got: %#v", tok)
}
if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
}
if tok.TokenType != "bearer" {
t.Errorf("Unexpected token type, %#v.", tok.TokenType)
}
scope := tok.Extra("scope")
if scope != "user" {
t.Errorf("Unexpected value for scope: %v", scope)
}
}
func TestExchangeRequest_BadResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
}))
defer ts.Close()
conf := newConf(ts.URL)
tok, err := conf.Exchange(NoContext, "code")
if err != nil {
t.Fatal(err)
}
if tok.AccessToken != "" {
t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
}
}
func TestExchangeRequest_BadResponseType(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
}))
defer ts.Close()
conf := newConf(ts.URL)
_, err := conf.Exchange(NoContext, "exchange-code")
if err == nil {
t.Error("expected error from invalid access_token type")
}
}
func TestExchangeRequest_NonBasicAuth(t *testing.T) {
tr := &mockTransport{
rt: func(r *http.Request) (w *http.Response, err error) {
headerAuth := r.Header.Get("Authorization")
if headerAuth != "" {
t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
}
return nil, errors.New("no response")
},
}
c := &http.Client{Transport: tr}
conf := &Config{
ClientID: "CLIENT_ID",
Endpoint: Endpoint{
AuthURL: "https://accounts.google.com/auth",
TokenURL: "https://accounts.google.com/token",
},
}
ctx := context.WithValue(context.Background(), HTTPClient, c)
conf.Exchange(ctx, "code")
}
func TestTokenRefreshRequest(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/somethingelse" {
return
}
if r.URL.String() != "/token" {
t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
}
headerContentType := r.Header.Get("Content-Type")
if headerContentType != "application/x-www-form-urlencoded" {
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
}
body, _ := ioutil.ReadAll(r.Body)
if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
}
}))
defer ts.Close()
conf := newConf(ts.URL)
c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"})
c.Get(ts.URL + "/somethingelse")
}
func TestFetchWithNoRefreshToken(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.String() == "/somethingelse" {
return
}
if r.URL.String() != "/token" {
t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
}
headerContentType := r.Header.Get("Content-Type")
if headerContentType != "application/x-www-form-urlencoded" {
t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
}
body, _ := ioutil.ReadAll(r.Body)
if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
}
}))
defer ts.Close()
conf := newConf(ts.URL)
c := conf.Client(NoContext, nil)
_, err := c.Get(ts.URL + "/somethingelse")
if err == nil {
t.Errorf("Fetch should return an error if no refresh token is set")
}
}

99
Godeps/_workspace/src/golang.org/x/oauth2/token.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package oauth2
import (
"net/http"
"net/url"
"time"
)
// Token represents the crendentials used to authorize
// the requests to access protected resources on the OAuth 2.0
// provider's backend.
//
// Most users of this package should not access fields of Token
// directly. They're exported mostly for use by related packages
// implementing derivative OAuth2 flows.
type Token struct {
// AccessToken is the token that authorizes and authenticates
// the requests.
AccessToken string `json:"access_token"`
// TokenType is the type of token.
// The Type method returns either this or "Bearer", the default.
TokenType string `json:"token_type,omitempty"`
// RefreshToken is a token that's used by the application
// (as opposed to the user) to refresh the access token
// if it expires.
RefreshToken string `json:"refresh_token,omitempty"`
// Expiry is the optional expiration time of the access token.
//
// If zero, TokenSource implementations will reuse the same
// token forever and RefreshToken or equivalent
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
}
// Type returns t.TokenType if non-empty, else "Bearer".
func (t *Token) Type() string {
if t.TokenType != "" {
return t.TokenType
}
return "Bearer"
}
// SetAuthHeader sets the Authorization header to r using the access
// token in t.
//
// This method is unnecessary when using Transport or an HTTP Client
// returned by this package.
func (t *Token) SetAuthHeader(r *http.Request) {
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
}
// WithExtra returns a new Token that's a clone of t, but using the
// provided raw extra map. This is only intended for use by packages
// implementing derivative OAuth2 flows.
func (t *Token) WithExtra(extra interface{}) *Token {
t2 := new(Token)
*t2 = *t
t2.raw = extra
return t2
}
// Extra returns an extra field.
// Extra fields are key-value pairs returned by the server as a
// part of the token retrieval response.
func (t *Token) Extra(key string) interface{} {
if vals, ok := t.raw.(url.Values); ok {
// TODO(jbd): Cast numeric values to int64 or float64.
return vals.Get(key)
}
if raw, ok := t.raw.(map[string]interface{}); ok {
return raw[key]
}
return nil
}
// expired reports whether the token is expired.
// t must be non-nil.
func (t *Token) expired() bool {
if t.Expiry.IsZero() {
return false
}
return t.Expiry.Before(time.Now())
}
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
func (t *Token) Valid() bool {
return t != nil && t.AccessToken != "" && !t.expired()
}

View File

@ -0,0 +1,30 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package oauth2
import "testing"
func TestTokenExtra(t *testing.T) {
type testCase struct {
key string
val interface{}
want interface{}
}
const key = "extra-key"
cases := []testCase{
{key: key, val: "abc", want: "abc"},
{key: key, val: 123, want: 123},
{key: key, val: "", want: ""},
{key: "other-key", val: "def", want: nil},
}
for _, tc := range cases {
extra := make(map[string]interface{})
extra[tc.key] = tc.val
tok := &Token{raw: extra}
if got, want := tok.Extra(key), tc.want; got != want {
t.Errorf("Extra(%q) = %q; want %q", key, got, want)
}
}
}

138
Godeps/_workspace/src/golang.org/x/oauth2/transport.go generated vendored Normal file
View File

@ -0,0 +1,138 @@
// Copyright 2014 The oauth2 Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package oauth2
import (
"errors"
"io"
"net/http"
"sync"
)
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
// wrapping a base RoundTripper and adding an Authorization header
// with a token from the supplied Sources.
//
// Transport is a low-level mechanism. Most code will use the
// higher-level Config.Client method instead.
type Transport struct {
// Source supplies the token to add to outgoing requests'
// Authorization headers.
Source TokenSource
// Base is the base RoundTripper used to make HTTP requests.
// If nil, http.DefaultTransport is used.
Base http.RoundTripper
mu sync.Mutex // guards modReq
modReq map[*http.Request]*http.Request // original -> modified
}
// RoundTrip authorizes and authenticates the request with an
// access token. If no token exists or token is expired,
// tries to refresh/fetch a new token.
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
if t.Source == nil {
return nil, errors.New("oauth2: Transport's Source is nil")
}
token, err := t.Source.Token()
if err != nil {
return nil, err
}
req2 := cloneRequest(req) // per RoundTripper contract
token.SetAuthHeader(req2)
t.setModReq(req, req2)
res, err := t.base().RoundTrip(req2)
if err != nil {
t.setModReq(req, nil)
return nil, err
}
res.Body = &onEOFReader{
rc: res.Body,
fn: func() { t.setModReq(req, nil) },
}
return res, nil
}
// CancelRequest cancels an in-flight request by closing its connection.
func (t *Transport) CancelRequest(req *http.Request) {
type canceler interface {
CancelRequest(*http.Request)
}
if cr, ok := t.base().(canceler); ok {
t.mu.Lock()
modReq := t.modReq[req]
delete(t.modReq, req)
t.mu.Unlock()
cr.CancelRequest(modReq)
}
}
func (t *Transport) base() http.RoundTripper {
if t.Base != nil {
return t.Base
}
return http.DefaultTransport
}
func (t *Transport) setModReq(orig, mod *http.Request) {
t.mu.Lock()
defer t.mu.Unlock()
if t.modReq == nil {
t.modReq = make(map[*http.Request]*http.Request)
}
if mod == nil {
delete(t.modReq, orig)
} else {
t.modReq[orig] = mod
}
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = append([]string(nil), s...)
}
return r2
}
type onEOFReader struct {
rc io.ReadCloser
fn func()
}
func (r *onEOFReader) Read(p []byte) (n int, err error) {
n, err = r.rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
func (r *onEOFReader) Close() error {
err := r.rc.Close()
r.runFunc()
return err
}
func (r *onEOFReader) runFunc() {
if fn := r.fn; fn != nil {
fn()
r.fn = nil
}
}
type errorTransport struct{ err error }
func (t errorTransport) RoundTrip(*http.Request) (*http.Response, error) {
return nil, t.err
}

View File

@ -0,0 +1,53 @@
package oauth2
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
type tokenSource struct{ token *Token }
func (t *tokenSource) Token() (*Token, error) {
return t.token, nil
}
func TestTransportTokenSource(t *testing.T) {
ts := &tokenSource{
token: &Token{
AccessToken: "abc",
},
}
tr := &Transport{
Source: ts,
}
server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Authorization") != "Bearer abc" {
t.Errorf("Transport doesn't set the Authorization header from the fetched token")
}
})
defer server.Close()
client := http.Client{Transport: tr}
client.Get(server.URL)
}
func TestTokenValidNoAccessToken(t *testing.T) {
token := &Token{}
if token.Valid() {
t.Errorf("Token should not be valid with no access token")
}
}
func TestExpiredWithExpiry(t *testing.T) {
token := &Token{
Expiry: time.Now().Add(-5 * time.Hour),
}
if token.Valid() {
t.Errorf("Token should not be valid if it expired in the past")
}
}
func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(handler))
}

View File

@ -0,0 +1,14 @@
language: go
go:
- 1.4
install:
- export GOPATH="$HOME/gopath"
- mkdir -p "$GOPATH/src/google.golang.org"
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/appengine"
- go get -v -t -d google.golang.org/appengine/...
script:
- go test -v google.golang.org/appengine/...
- go test -v -race google.golang.org/appengine/...

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,65 @@
# Go App Engine for Managed VMs
[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
This repository supports the Go runtime for Managed VMs on App Engine.
It provides APIs for interacting with App Engine services.
Its canonical import path is `google.golang.org/appengine`.
See https://cloud.google.com/appengine/docs/go/managed-vms/
for more information.
## Directory structure
The top level directory of this repository is the `appengine` package. It
contains the
basic types (e.g. `appengine.Context`) that are used across APIs. Specific API
packages are in subdirectories (e.g. `datastore`).
There is an `internal` subdirectory that contains service protocol buffers,
plus packages required for connectivity to make API calls. App Engine apps
should not directly import any package under `internal`.
## Updating a Go App Engine app
This section describes how to update a traditional Go App Engine app to run on Managed VMs.
### 1. Update YAML files
The `app.yaml` file (and YAML files for modules) should have these new lines added:
```
vm: true
manual_scaling:
instances: 1
```
See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
### 2. Update import paths
The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
You will need to update your code to use import paths starting with that; for instance,
code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
You can do that manually, or by running this command to recursively update all Go source files in the current directory:
(may require GNU sed)
```
sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \
$(find . -name '*.go')
```
### 3. Update code using deprecated, removed or modified APIs
Most App Engine services are available with exactly the same API.
A few APIs were cleaned up, and some are not available yet.
This list summarises the differences:
* `appengine.Datacenter` now takes an `appengine.Context` argument.
* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
* `search.FieldLoadSaver` now handles document metadata.
* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
deprecated and unused for a long time.
* `appengine/aetest`, `appengine/blobstore`, `appengine/cloudsql`
and `appengine/runtime` have not been ported yet.
* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
* `appengine.IsCapabilityDisabled` and `appengine/capability` are obsolete.
* Most of `appengine/file` is deprecated. Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.
* `appengine/socket` is deprecated. Use the standard `net` package instead.

View File

@ -0,0 +1,78 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// Package appengine provides basic functionality for Google App Engine.
//
// For more information on how to write Go apps for Google App Engine, see:
// https://cloud.google.com/appengine/docs/go/
package appengine
import (
"net/http"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine/internal"
)
// IsDevAppServer reports whether the App Engine app is running in the
// development App Server.
func IsDevAppServer() bool {
// TODO(dsymonds): Detect this.
return false
}
// Context represents the context of an in-flight HTTP request.
type Context interface {
// Debugf formats its arguments according to the format, analogous to fmt.Printf,
// and records the text as a log message at Debug level.
Debugf(format string, args ...interface{})
// Infof is like Debugf, but at Info level.
Infof(format string, args ...interface{})
// Warningf is like Debugf, but at Warning level.
Warningf(format string, args ...interface{})
// Errorf is like Debugf, but at Error level.
Errorf(format string, args ...interface{})
// Criticalf is like Debugf, but at Critical level.
Criticalf(format string, args ...interface{})
// The remaining methods are for internal use only.
// Developer-facing APIs wrap these methods to provide a more friendly API.
// Internal use only.
Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error
// Internal use only. Use AppID instead.
FullyQualifiedAppID() string
// Internal use only.
Request() interface{}
}
// NewContext returns a context for an in-flight HTTP request.
// Repeated calls will return the same value.
func NewContext(req *http.Request) Context {
return internal.NewContext(req)
}
// TODO(dsymonds): Add BackgroundContext function?
// BlobKey is a key for a blobstore blob.
//
// Conceptually, this type belongs in the blobstore package, but it lives in
// the appengine package to avoid a circular dependency: blobstore depends on
// datastore, and datastore needs to refer to the BlobKey type.
type BlobKey string
// GeoPoint represents a location as latitude/longitude in degrees.
type GeoPoint struct {
Lat, Lng float64
}
// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
func (g GeoPoint) Valid() bool {
return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
}

View File

@ -0,0 +1,45 @@
package appengine
import (
"testing"
)
func TestValidGeoPoint(t *testing.T) {
testCases := []struct {
desc string
pt GeoPoint
want bool
}{
{
"valid",
GeoPoint{67.21, 13.37},
true,
},
{
"high lat",
GeoPoint{-90.01, 13.37},
false,
},
{
"low lat",
GeoPoint{90.01, 13.37},
false,
},
{
"high lng",
GeoPoint{67.21, 182},
false,
},
{
"low lng",
GeoPoint{67.21, -181},
false,
},
}
for _, tc := range testCases {
if got := tc.pt.Valid(); got != tc.want {
t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want)
}
}
}

View File

@ -0,0 +1,81 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
/*
Package channel implements the server side of App Engine's Channel API.
Create creates a new channel associated with the given clientID,
which must be unique to the client that will use the returned token.
token, err := channel.Create(c, "player1")
if err != nil {
// handle error
}
// return token to the client in an HTTP response
Send sends a message to the client over the channel identified by clientID.
channel.Send(c, "player1", "Game over!")
*/
package channel
import (
"encoding/json"
"google.golang.org/appengine"
"google.golang.org/appengine/internal"
basepb "google.golang.org/appengine/internal/base"
pb "google.golang.org/appengine/internal/channel"
)
// Create creates a channel and returns a token for use by the client.
// The clientID is an application-provided string used to identify the client.
func Create(c appengine.Context, clientID string) (token string, err error) {
req := &pb.CreateChannelRequest{
ApplicationKey: &clientID,
}
resp := &pb.CreateChannelResponse{}
err = c.Call(service, "CreateChannel", req, resp, nil)
token = resp.GetToken()
return token, remapError(err)
}
// Send sends a message on the channel associated with clientID.
func Send(c appengine.Context, clientID, message string) error {
req := &pb.SendMessageRequest{
ApplicationKey: &clientID,
Message: &message,
}
resp := &basepb.VoidProto{}
return remapError(c.Call(service, "SendChannelMessage", req, resp, nil))
}
// SendJSON is a helper function that sends a JSON-encoded value
// on the channel associated with clientID.
func SendJSON(c appengine.Context, clientID string, value interface{}) error {
m, err := json.Marshal(value)
if err != nil {
return err
}
return Send(c, clientID, string(m))
}
// remapError fixes any APIError referencing "xmpp" into one referencing "channel".
func remapError(err error) error {
if e, ok := err.(*internal.APIError); ok {
if e.Service == "xmpp" {
e.Service = "channel"
}
}
return err
}
var service = "xmpp" // prod
func init() {
if appengine.IsDevAppServer() {
service = "channel" // dev
}
internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name)
}

View File

@ -0,0 +1,17 @@
package channel
import (
"testing"
"google.golang.org/appengine/internal"
)
func TestRemapError(t *testing.T) {
err := &internal.APIError{
Service: "xmpp",
}
err = remapError(err).(*internal.APIError)
if err.Service != "channel" {
t.Errorf("err.Service = %q, want %q", err.Service, "channel")
}
}

View File

@ -0,0 +1,405 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"fmt"
"reflect"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/datastore"
)
var (
// ErrInvalidEntityType is returned when functions like Get or Next are
// passed a dst or src argument of invalid type.
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
// ErrInvalidKey is returned when an invalid key is presented.
ErrInvalidKey = errors.New("datastore: invalid key")
// ErrNoSuchEntity is returned when no entity was found for a given key.
ErrNoSuchEntity = errors.New("datastore: no such entity")
)
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct.
// StructType is the type of the struct pointed to by the destination argument
// passed to Get or to Iterator.Next.
type ErrFieldMismatch struct {
StructType reflect.Type
FieldName string
Reason string
}
func (e *ErrFieldMismatch) Error() string {
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
e.FieldName, e.StructType, e.Reason)
}
// protoToKey converts a Reference proto to a *Key.
func protoToKey(r *pb.Reference) (k *Key, err error) {
appID := r.GetApp()
namespace := r.GetNameSpace()
for _, e := range r.Path.Element {
k = &Key{
kind: e.GetType(),
stringID: e.GetName(),
intID: e.GetId(),
parent: k,
appID: appID,
namespace: namespace,
}
if !k.valid() {
return nil, ErrInvalidKey
}
}
return
}
// keyToProto converts a *Key to a Reference proto.
func keyToProto(defaultAppID string, k *Key) *pb.Reference {
appID := k.appID
if appID == "" {
appID = defaultAppID
}
n := 0
for i := k; i != nil; i = i.parent {
n++
}
e := make([]*pb.Path_Element, n)
for i := k; i != nil; i = i.parent {
n--
e[n] = &pb.Path_Element{
Type: &i.kind,
}
// At most one of {Name,Id} should be set.
// Neither will be set for incomplete keys.
if i.stringID != "" {
e[n].Name = &i.stringID
} else if i.intID != 0 {
e[n].Id = &i.intID
}
}
var namespace *string
if k.namespace != "" {
namespace = proto.String(k.namespace)
}
return &pb.Reference{
App: proto.String(appID),
NameSpace: namespace,
Path: &pb.Path{
Element: e,
},
}
}
// multiKeyToProto is a batch version of keyToProto.
func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
ret := make([]*pb.Reference, len(key))
for i, k := range key {
ret[i] = keyToProto(appID, k)
}
return ret
}
// multiValid is a batch version of Key.valid. It returns an error, not a
// []bool.
func multiValid(key []*Key) error {
invalid := false
for _, k := range key {
if !k.valid() {
invalid = true
break
}
}
if !invalid {
return nil
}
err := make(appengine.MultiError, len(key))
for i, k := range key {
if !k.valid() {
err[i] = ErrInvalidKey
}
}
return err
}
// It's unfortunate that the two semantically equivalent concepts pb.Reference
// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
// two have different protobuf field numbers.
// referenceValueToKey is the same as protoToKey except the input is a
// PropertyValue_ReferenceValue instead of a Reference.
func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
appID := r.GetApp()
namespace := r.GetNameSpace()
for _, e := range r.Pathelement {
k = &Key{
kind: e.GetType(),
stringID: e.GetName(),
intID: e.GetId(),
parent: k,
appID: appID,
namespace: namespace,
}
if !k.valid() {
return nil, ErrInvalidKey
}
}
return
}
// keyToReferenceValue is the same as keyToProto except the output is a
// PropertyValue_ReferenceValue instead of a Reference.
func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
ref := keyToProto(defaultAppID, k)
pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
for i, e := range ref.Path.Element {
pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
Type: e.Type,
Id: e.Id,
Name: e.Name,
}
}
return &pb.PropertyValue_ReferenceValue{
App: ref.App,
NameSpace: ref.NameSpace,
Pathelement: pe,
}
}
type multiArgType int
const (
multiArgTypeInvalid multiArgType = iota
multiArgTypePropertyLoadSaver
multiArgTypeStruct
multiArgTypeStructPtr
multiArgTypeInterface
)
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
// type S, for some interface type I, or some non-interface non-pointer type P
// such that P or *P implements PropertyLoadSaver.
//
// It returns what category the slice's elements are, and the reflect.Type
// that represents S, I or P.
//
// As a special case, PropertyList is an invalid type for v.
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
if v.Kind() != reflect.Slice {
return multiArgTypeInvalid, nil
}
if v.Type() == typeOfPropertyList {
return multiArgTypeInvalid, nil
}
elemType = v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return multiArgTypePropertyLoadSaver, elemType
}
switch elemType.Kind() {
case reflect.Struct:
return multiArgTypeStruct, elemType
case reflect.Interface:
return multiArgTypeInterface, elemType
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return multiArgTypeStructPtr, elemType
}
}
return multiArgTypeInvalid, nil
}
// Get loads the entity stored for k into dst, which must be a struct pointer
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
// returns ErrNoSuchEntity.
//
// The values of dst's unmatched struct fields are not modified, and matching
// slice-typed fields are not reset before appending to them. In particular, it
// is recommended to pass a pointer to a zero valued struct on each Get call.
//
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct. ErrFieldMismatch is only returned if
// dst is a struct pointer.
func Get(c appengine.Context, key *Key, dst interface{}) error {
if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
return ErrInvalidEntityType
}
err := GetMulti(c, []*Key{key}, []interface{}{dst})
if me, ok := err.(appengine.MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
//
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
// type I, or some non-interface non-pointer type P such that P or *P
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
//
// As a special case, PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when []PropertyList was intended.
func GetMulti(c appengine.Context, key []*Key, dst interface{}) error {
v := reflect.ValueOf(dst)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return errors.New("datastore: dst has invalid type")
}
if len(key) != v.Len() {
return errors.New("datastore: key and dst slices have different length")
}
if len(key) == 0 {
return nil
}
if err := multiValid(key); err != nil {
return err
}
req := &pb.GetRequest{
Key: multiKeyToProto(c.FullyQualifiedAppID(), key),
}
res := &pb.GetResponse{}
if err := c.Call("datastore_v3", "Get", req, res, nil); err != nil {
return err
}
if len(key) != len(res.Entity) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
multiErr, any := make(appengine.MultiError, len(key)), false
for i, e := range res.Entity {
if e.Entity == nil {
multiErr[i] = ErrNoSuchEntity
} else {
elem := v.Index(i)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
multiErr[i] = loadEntity(elem.Interface(), e.Entity)
}
if multiErr[i] != nil {
any = true
}
}
if any {
return multiErr
}
return nil
}
// Put saves the entity src into the datastore with key k. src must be a struct
// pointer or implement PropertyLoadSaver; if a struct pointer then any
// unexported fields of that struct will be skipped. If k is an incomplete key,
// the returned key will be a unique key generated by the datastore.
func Put(c appengine.Context, key *Key, src interface{}) (*Key, error) {
k, err := PutMulti(c, []*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(appengine.MultiError); ok {
return nil, me[0]
}
return nil, err
}
return k[0], nil
}
// PutMulti is a batch version of Put.
//
// src must satisfy the same conditions as the dst argument to GetMulti.
func PutMulti(c appengine.Context, key []*Key, src interface{}) ([]*Key, error) {
v := reflect.ValueOf(src)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return nil, errors.New("datastore: src has invalid type")
}
if len(key) != v.Len() {
return nil, errors.New("datastore: key and src slices have different length")
}
if len(key) == 0 {
return nil, nil
}
appID := c.FullyQualifiedAppID()
if err := multiValid(key); err != nil {
return nil, err
}
req := &pb.PutRequest{}
for i := range key {
elem := v.Index(i)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
sProto, err := saveEntity(appID, key[i], elem.Interface())
if err != nil {
return nil, err
}
req.Entity = append(req.Entity, sProto)
}
res := &pb.PutResponse{}
if err := c.Call("datastore_v3", "Put", req, res, nil); err != nil {
return nil, err
}
if len(key) != len(res.Key) {
return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
}
ret := make([]*Key, len(key))
for i := range ret {
var err error
ret[i], err = protoToKey(res.Key[i])
if err != nil || ret[i].Incomplete() {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
}
return ret, nil
}
// Delete deletes the entity for the given key.
func Delete(c appengine.Context, key *Key) error {
err := DeleteMulti(c, []*Key{key})
if me, ok := err.(appengine.MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
func DeleteMulti(c appengine.Context, key []*Key) error {
if len(key) == 0 {
return nil
}
if err := multiValid(key); err != nil {
return err
}
req := &pb.DeleteRequest{
Key: multiKeyToProto(c.FullyQualifiedAppID(), key),
}
res := &pb.DeleteResponse{}
return c.Call("datastore_v3", "Delete", req, res, nil)
}
func namespaceMod(m proto.Message, namespace string) {
// pb.Query is the only type that has a name_space field.
// All other namespace support in datastore is in the keys.
switch m := m.(type) {
case *pb.Query:
if m.NameSpace == nil {
m.NameSpace = &namespace
}
}
}
func init() {
internal.NamespaceMods["datastore_v3"] = namespaceMod
internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,316 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
/*
Package datastore provides a client for App Engine's datastore service.
Basic Operations
Entities are the unit of storage and are associated with a key. A key
consists of an optional parent key, a string application ID, a string kind
(also known as an entity type), and either a StringID or an IntID. A
StringID is also known as an entity name or key name.
It is valid to create a key with a zero StringID and a zero IntID; this is
called an incomplete key, and does not refer to any saved entity. Putting an
entity into the datastore under an incomplete key will cause a unique key
to be generated for that entity, with a non-zero IntID.
An entity's contents are a mapping from case-sensitive field names to values.
Valid value types are:
- signed integers (int, int8, int16, int32 and int64),
- bool,
- string,
- float32 and float64,
- []byte (up to 1 megabyte in length),
- any type whose underlying type is one of the above predeclared types,
- ByteString,
- *Key,
- time.Time (stored with microsecond precision),
- appengine.BlobKey,
- appengine.GeoPoint,
- structs whose fields are all valid value types,
- slices of any of the above.
Slices of structs are valid, as are structs that contain slices. However, if
one struct contains another, then at most one of those can be repeated. This
disqualifies recursively defined struct types: any struct T that (directly or
indirectly) contains a []T.
The Get and Put functions load and save an entity's contents. An entity's
contents are typically represented by a struct pointer.
Example code:
type Entity struct {
Value string
}
func handle(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
k := datastore.NewKey(c, "Entity", "stringID", 0, nil)
e := new(Entity)
if err := datastore.Get(c, k, e); err != nil {
http.Error(w, err.Error(), 500)
return
}
old := e.Value
e.Value = r.URL.Path
if _, err := datastore.Put(c, k, e); err != nil {
http.Error(w, err.Error(), 500)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
}
GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
Delete functions. They take a []*Key instead of a *Key, and may return an
appengine.MultiError when encountering partial failure.
Properties
An entity's contents can be represented by a variety of types. These are
typically struct pointers, but can also be any type that implements the
PropertyLoadSaver interface. If using a struct pointer, you do not have to
explicitly implement the PropertyLoadSaver interface; the datastore will
automatically convert via reflection. If a struct pointer does implement that
interface then those methods will be used in preference to the default
behavior for struct pointers. Struct pointers are more strongly typed and are
easier to use; PropertyLoadSavers are more flexible.
The actual types passed do not have to match between Get and Put calls or even
across different App Engine requests. It is valid to put a *PropertyList and
get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
Conceptually, any entity is saved as a sequence of properties, and is loaded
into the destination value on a property-by-property basis. When loading into
a struct pointer, an entity that cannot be completely represented (such as a
missing field) will result in an ErrFieldMismatch error but it is up to the
caller whether this error is fatal, recoverable or ignorable.
By default, for struct pointers, all properties are potentially indexed, and
the property name is the same as the field name (and hence must start with an
upper case letter). Fields may have a `datastore:"name,options"` tag. The tag
name is the property name, which must be one or more valid Go identifiers
joined by ".", but may start with a lower case letter. An empty tag name means
to just use the field name. A "-" tag name means that the datastore will
ignore that field. If options is "noindex" then the field will not be indexed.
If the options is "" then the comma may be omitted. There are no other
recognized options.
Fields (except for []byte) are indexed by default. Strings longer than 500
characters cannot be indexed; fields used to store long strings should be
tagged with "noindex". Similarly, ByteStrings longer than 500 bytes cannot be
indexed.
Example code:
// A and B are renamed to a and b.
// A, C and J are not indexed.
// D's tag is equivalent to having no tag at all (E).
// I is ignored entirely by the datastore.
// J has tag information for both the datastore and json packages.
type TaggedStruct struct {
A int `datastore:"a,noindex"`
B int `datastore:"b"`
C int `datastore:",noindex"`
D int `datastore:""`
E int
I int `datastore:"-"`
J int `datastore:",noindex" json:"j"`
}
Structured Properties
If the struct pointed to contains other structs, then the nested or embedded
structs are flattened. For example, given these definitions:
type Inner1 struct {
W int32
X string
}
type Inner2 struct {
Y float64
}
type Inner3 struct {
Z bool
}
type Outer struct {
A int16
I []Inner1
J Inner2
Inner3
}
then an Outer's properties would be equivalent to those of:
type OuterEquivalent struct {
A int16
IDotW []int32 `datastore:"I.W"`
IDotX []string `datastore:"I.X"`
JDotY float64 `datastore:"J.Y"`
Z bool
}
If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
If an outer struct is tagged "noindex" then all of its implicit flattened
fields are effectively "noindex".
The PropertyLoadSaver Interface
An entity's contents can also be represented by any type that implements the
PropertyLoadSaver interface. This type may be a struct pointer, but it does
not have to be. The datastore package will call Load when getting the entity's
contents, and Save when putting the entity's contents.
Possible uses include deriving non-stored fields, verifying fields, or indexing
a field only if its value is positive.
Example code:
type CustomPropsExample struct {
I, J int
// Sum is not stored, but should always be equal to I + J.
Sum int `datastore:"-"`
}
func (x *CustomPropsExample) Load(c <-chan Property) error {
// Load I and J as usual.
if err := datastore.LoadStruct(x, c); err != nil {
return err
}
// Derive the Sum field.
x.Sum = x.I + x.J
return nil
}
func (x *CustomPropsExample) Save(c chan<- Property) error {
defer close(c)
// Validate the Sum field.
if x.Sum != x.I + x.J {
return errors.New("CustomPropsExample has inconsistent sum")
}
// Save I and J as usual. The code below is equivalent to calling
// "return datastore.SaveStruct(x, c)", but is done manually for
// demonstration purposes.
c <- datastore.Property{
Name: "I",
Value: int64(x.I),
}
c <- datastore.Property{
Name: "J",
Value: int64(x.J),
}
return nil
}
The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
arbitrary entity's contents.
Queries
Queries retrieve entities based on their properties or key's ancestry. Running
a query yields an iterator of results: either keys or (key, entity) pairs.
Queries are re-usable and it is safe to call Query.Run from concurrent
goroutines. Iterators are not safe for concurrent use.
Queries are immutable, and are either created by calling NewQuery, or derived
from an existing query by calling a method like Filter or Order that returns a
new query value. A query is typically constructed by calling NewQuery followed
by a chain of zero or more such methods. These methods are:
- Ancestor and Filter constrain the entities returned by running a query.
- Order affects the order in which they are returned.
- Project constrains the fields returned.
- Distinct de-duplicates projected entities.
- KeysOnly makes the iterator return only keys, not (key, entity) pairs.
- Start, End, Offset and Limit define which sub-sequence of matching entities
to return. Start and End take cursors, Offset and Limit take integers. Start
and Offset affect the first result, End and Limit affect the last result.
If both Start and Offset are set, then the offset is relative to Start.
If both End and Limit are set, then the earliest constraint wins. Limit is
relative to Start+Offset, not relative to End. As a special case, a
negative limit means unlimited.
Example code:
type Widget struct {
Description string
Price int
}
func handle(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
q := datastore.NewQuery("Widget").
Filter("Price <", 1000).
Order("-Price")
b := new(bytes.Buffer)
for t := q.Run(c); ; {
var x Widget
key, err := t.Next(&x)
if err == datastore.Done {
break
}
if err != nil {
serveError(c, w, err)
return
}
fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
io.Copy(w, b)
}
Transactions
RunInTransaction runs a function in a transaction.
Example code:
type Counter struct {
Count int
}
func inc(c appengine.Context, key *datastore.Key) (int, error) {
var x Counter
if err := datastore.Get(c, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
return 0, err
}
x.Count++
if _, err := datastore.Put(c, key, &x); err != nil {
return 0, err
}
return x.Count, nil
}
func handle(w http.ResponseWriter, r *http.Request) {
c := appengine.NewContext(r)
var count int
err := datastore.RunInTransaction(c, func(c appengine.Context) error {
var err1 error
count, err1 = inc(c, datastore.NewKey(c, "Counter", "singleton", 0, nil))
return err1
}, nil)
if err != nil {
serveError(c, w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, "Count=%d", count)
}
*/
package datastore

View File

@ -0,0 +1,309 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"bytes"
"encoding/base64"
"encoding/gob"
"errors"
"fmt"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/datastore"
)
// Key represents the datastore key for a stored entity, and is immutable.
type Key struct {
kind string
stringID string
intID int64
parent *Key
appID string
namespace string
}
// Kind returns the key's kind (also known as entity type).
func (k *Key) Kind() string {
return k.kind
}
// StringID returns the key's string ID (also known as an entity name or key
// name), which may be "".
func (k *Key) StringID() string {
return k.stringID
}
// IntID returns the key's integer ID, which may be 0.
func (k *Key) IntID() int64 {
return k.intID
}
// Parent returns the key's parent key, which may be nil.
func (k *Key) Parent() *Key {
return k.parent
}
// AppID returns the key's application ID.
func (k *Key) AppID() string {
return k.appID
}
// Namespace returns the key's namespace.
func (k *Key) Namespace() string {
return k.namespace
}
// Incomplete returns whether the key does not refer to a stored entity.
// In particular, whether the key has a zero StringID and a zero IntID.
func (k *Key) Incomplete() bool {
return k.stringID == "" && k.intID == 0
}
// valid returns whether the key is valid.
func (k *Key) valid() bool {
if k == nil {
return false
}
for ; k != nil; k = k.parent {
if k.kind == "" || k.appID == "" {
return false
}
if k.stringID != "" && k.intID != 0 {
return false
}
if k.parent != nil {
if k.parent.Incomplete() {
return false
}
if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
return false
}
}
}
return true
}
// Equal returns whether two keys are equal.
func (k *Key) Equal(o *Key) bool {
for k != nil && o != nil {
if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
return false
}
k, o = k.parent, o.parent
}
return k == o
}
// root returns the furthest ancestor of a key, which may be itself.
func (k *Key) root() *Key {
for k.parent != nil {
k = k.parent
}
return k
}
// marshal marshals the key's string representation to the buffer.
func (k *Key) marshal(b *bytes.Buffer) {
if k.parent != nil {
k.parent.marshal(b)
}
b.WriteByte('/')
b.WriteString(k.kind)
b.WriteByte(',')
if k.stringID != "" {
b.WriteString(k.stringID)
} else {
b.WriteString(strconv.FormatInt(k.intID, 10))
}
}
// String returns a string representation of the key.
func (k *Key) String() string {
if k == nil {
return ""
}
b := bytes.NewBuffer(make([]byte, 0, 512))
k.marshal(b)
return b.String()
}
type gobKey struct {
Kind string
StringID string
IntID int64
Parent *gobKey
AppID string
Namespace string
}
func keyToGobKey(k *Key) *gobKey {
if k == nil {
return nil
}
return &gobKey{
Kind: k.kind,
StringID: k.stringID,
IntID: k.intID,
Parent: keyToGobKey(k.parent),
AppID: k.appID,
Namespace: k.namespace,
}
}
func gobKeyToKey(gk *gobKey) *Key {
if gk == nil {
return nil
}
return &Key{
kind: gk.Kind,
stringID: gk.StringID,
intID: gk.IntID,
parent: gobKeyToKey(gk.Parent),
appID: gk.AppID,
namespace: gk.Namespace,
}
}
func (k *Key) GobEncode() ([]byte, error) {
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (k *Key) GobDecode(buf []byte) error {
gk := new(gobKey)
if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
return err
}
*k = *gobKeyToKey(gk)
return nil
}
func (k *Key) MarshalJSON() ([]byte, error) {
return []byte(`"` + k.Encode() + `"`), nil
}
func (k *Key) UnmarshalJSON(buf []byte) error {
if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
return errors.New("datastore: bad JSON key")
}
k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
if err != nil {
return err
}
*k = *k2
return nil
}
// Encode returns an opaque representation of the key
// suitable for use in HTML and URLs.
// This is compatible with the Python and Java runtimes.
func (k *Key) Encode() string {
ref := keyToProto("", k)
b, err := proto.Marshal(ref)
if err != nil {
panic(err)
}
// Trailing padding is stripped.
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// DecodeKey decodes a key from the opaque representation returned by Encode.
func DecodeKey(encoded string) (*Key, error) {
// Re-add padding.
if m := len(encoded) % 4; m != 0 {
encoded += strings.Repeat("=", 4-m)
}
b, err := base64.URLEncoding.DecodeString(encoded)
if err != nil {
return nil, err
}
ref := new(pb.Reference)
if err := proto.Unmarshal(b, ref); err != nil {
return nil, err
}
return protoToKey(ref)
}
// NewIncompleteKey creates a new incomplete key.
// kind cannot be empty.
func NewIncompleteKey(c appengine.Context, kind string, parent *Key) *Key {
return NewKey(c, kind, "", 0, parent)
}
// NewKey creates a new key.
// kind cannot be empty.
// Either one or both of stringID and intID must be zero. If both are zero,
// the key returned is incomplete.
// parent must either be a complete key or nil.
func NewKey(c appengine.Context, kind, stringID string, intID int64, parent *Key) *Key {
// If there's a parent key, use its namespace.
// Otherwise, do a fake RPC to try to get a namespace if c is a namespacedContext (or wraps one).
var namespace string
if parent != nil {
namespace = parent.namespace
} else {
namespace = internal.VirtAPI(c, "GetNamespace")
}
return &Key{
kind: kind,
stringID: stringID,
intID: intID,
parent: parent,
appID: c.FullyQualifiedAppID(),
namespace: namespace,
}
}
// AllocateIDs returns a range of n integer IDs with the given kind and parent
// combination. kind cannot be empty; parent may be nil. The IDs in the range
// returned will not be used by the datastore's automatic ID sequence generator
// and may be used with NewKey without conflict.
//
// The range is inclusive at the low end and exclusive at the high end. In
// other words, valid intIDs x satisfy low <= x && x < high.
//
// If no error is returned, low + n == high.
func AllocateIDs(c appengine.Context, kind string, parent *Key, n int) (low, high int64, err error) {
if kind == "" {
return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
}
if n < 0 {
return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
}
if n == 0 {
return 0, 0, nil
}
req := &pb.AllocateIdsRequest{
ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
Size: proto.Int64(int64(n)),
}
res := &pb.AllocateIdsResponse{}
if err := c.Call("datastore_v3", "AllocateIds", req, res, nil); err != nil {
return 0, 0, err
}
// The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
// is inclusive at the low end and exclusive at the high end, so we add 1.
low = res.GetStart()
high = res.GetEnd() + 1
if low+int64(n) != high {
return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
}
return low, high, nil
}

View File

@ -0,0 +1,214 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"bytes"
"encoding/gob"
"encoding/json"
"testing"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
"google.golang.org/appengine/internal"
)
func TestKeyEncoding(t *testing.T) {
testCases := []struct {
desc string
key *Key
exp string
}{
{
desc: "A simple key with an int ID",
key: &Key{
kind: "Person",
intID: 1,
appID: "glibrary",
},
exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM",
},
{
desc: "A simple key with a string ID",
key: &Key{
kind: "Graph",
stringID: "graph:7-day-active",
appID: "glibrary",
},
exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw",
},
{
desc: "A key with a parent",
key: &Key{
kind: "WordIndex",
intID: 1033,
parent: &Key{
kind: "WordIndex",
intID: 1020032,
appID: "glibrary",
},
appID: "glibrary",
},
exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM",
},
}
for _, tc := range testCases {
enc := tc.key.Encode()
if enc != tc.exp {
t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp)
}
key, err := DecodeKey(tc.exp)
if err != nil {
t.Errorf("%s: failed decoding key: %v", tc.desc, err)
continue
}
if !key.Equal(tc.key) {
t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key)
}
}
}
func TestKeyGob(t *testing.T) {
k := &Key{
kind: "Gopher",
intID: 3,
parent: &Key{
kind: "Mom",
stringID: "narwhal",
appID: "gopher-con",
},
appID: "gopher-con",
}
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(k); err != nil {
t.Fatalf("gob encode failed: %v", err)
}
k2 := new(Key)
if err := gob.NewDecoder(buf).Decode(k2); err != nil {
t.Fatalf("gob decode failed: %v", err)
}
if !k2.Equal(k) {
t.Errorf("gob round trip of %v produced %v", k, k2)
}
}
func TestNilKeyGob(t *testing.T) {
type S struct {
Key *Key
}
s1 := new(S)
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(s1); err != nil {
t.Fatalf("gob encode failed: %v", err)
}
s2 := new(S)
if err := gob.NewDecoder(buf).Decode(s2); err != nil {
t.Fatalf("gob decode failed: %v", err)
}
if s2.Key != nil {
t.Errorf("gob round trip of nil key produced %v", s2.Key)
}
}
func TestKeyJSON(t *testing.T) {
k := &Key{
kind: "Gopher",
intID: 2,
parent: &Key{
kind: "Mom",
stringID: "narwhal",
appID: "gopher-con",
},
appID: "gopher-con",
}
exp := `"` + k.Encode() + `"`
buf, err := json.Marshal(k)
if err != nil {
t.Fatalf("json.Marshal failed: %v", err)
}
if s := string(buf); s != exp {
t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp)
}
k2 := new(Key)
if err := json.Unmarshal(buf, k2); err != nil {
t.Fatalf("json.Unmarshal failed: %v", err)
}
if !k2.Equal(k) {
t.Errorf("JSON round trip of %v produced %v", k, k2)
}
}
func TestNilKeyJSON(t *testing.T) {
type S struct {
Key *Key
}
s1 := new(S)
buf, err := json.Marshal(s1)
if err != nil {
t.Fatalf("json.Marshal failed: %v", err)
}
s2 := new(S)
if err := json.Unmarshal(buf, s2); err != nil {
t.Fatalf("json.Unmarshal failed: %v", err)
}
if s2.Key != nil {
t.Errorf("JSON round trip of nil key produced %v", s2.Key)
}
}
type fakeKeyer struct {
appengine.Context
}
func (fakeKeyer) FullyQualifiedAppID() string { return "s~some-app" }
func (fakeKeyer) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
return nil
}
func TestIncompleteKeyWithParent(t *testing.T) {
var c appengine.Context = fakeKeyer{}
// fadduh is a complete key.
fadduh := NewKey(c, "Person", "", 1, nil)
if fadduh.Incomplete() {
t.Fatalf("fadduh is incomplete")
}
// robert is an incomplete key with fadduh as a parent.
robert := NewIncompleteKey(c, "Person", fadduh)
if !robert.Incomplete() {
t.Fatalf("robert is complete")
}
// Both should be valid keys.
if !fadduh.valid() {
t.Errorf("fadduh is invalid: %v", fadduh)
}
if !robert.valid() {
t.Errorf("robert is invalid: %v", robert)
}
}
func TestNamespace(t *testing.T) {
key := &Key{
kind: "Person",
intID: 1,
appID: "s~some-app",
namespace: "mynamespace",
}
if g, w := key.Namespace(), "mynamespace"; g != w {
t.Errorf("key.Namespace() = %q, want %q", g, w)
}
}

View File

@ -0,0 +1,334 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"fmt"
"reflect"
"time"
"google.golang.org/appengine"
pb "google.golang.org/appengine/internal/datastore"
)
var (
typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
typeOfByteSlice = reflect.TypeOf([]byte(nil))
typeOfByteString = reflect.TypeOf(ByteString(nil))
typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
typeOfTime = reflect.TypeOf(time.Time{})
)
// typeMismatchReason returns a string explaining why the property p could not
// be stored in an entity field of type v.Type().
func typeMismatchReason(p Property, v reflect.Value) string {
entityType := "empty"
switch p.Value.(type) {
case int64:
entityType = "int"
case bool:
entityType = "bool"
case string:
entityType = "string"
case float64:
entityType = "float"
case *Key:
entityType = "*datastore.Key"
case time.Time:
entityType = "time.Time"
case appengine.BlobKey:
entityType = "appengine.BlobKey"
case appengine.GeoPoint:
entityType = "appengine.GeoPoint"
case ByteString:
entityType = "datastore.ByteString"
case []byte:
entityType = "[]byte"
}
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
}
type propertyLoader struct {
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
// been seen so far. The map is constructed lazily.
m map[string]int
}
func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
var v reflect.Value
// Traverse a struct's struct-typed fields.
for name := p.Name; ; {
decoder, ok := codec.byName[name]
if !ok {
return "no such struct field"
}
v = structValue.Field(decoder.index)
if !v.IsValid() {
return "no such struct field"
}
if !v.CanSet() {
return "cannot set struct field"
}
if decoder.substructCodec == nil {
break
}
if v.Kind() == reflect.Slice {
if l.m == nil {
l.m = make(map[string]int)
}
index := l.m[p.Name]
l.m[p.Name] = index + 1
for v.Len() <= index {
v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
}
structValue = v.Index(index)
requireSlice = false
} else {
structValue = v
}
// Strip the "I." from "I.X".
name = name[len(codec.byIndex[decoder.index].name):]
codec = decoder.substructCodec
}
var slice reflect.Value
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
slice = v
v = reflect.New(v.Type().Elem()).Elem()
} else if requireSlice {
return "multiple-valued property requires a slice field type"
}
// Convert indexValues to a Go value with a meaning derived from the
// destination type.
pValue := p.Value
if iv, ok := pValue.(indexValue); ok {
meaning := pb.Property_NO_MEANING
switch v.Type() {
case typeOfBlobKey:
meaning = pb.Property_BLOBKEY
case typeOfByteSlice:
meaning = pb.Property_BLOB
case typeOfByteString:
meaning = pb.Property_BYTESTRING
case typeOfGeoPoint:
meaning = pb.Property_GEORSS_POINT
case typeOfTime:
meaning = pb.Property_GD_WHEN
}
var err error
pValue, err = propValue(iv.value, meaning)
if err != nil {
return err.Error()
}
}
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x, ok := pValue.(int64)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.OverflowInt(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetInt(x)
case reflect.Bool:
x, ok := pValue.(bool)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.SetBool(x)
case reflect.String:
switch x := pValue.(type) {
case appengine.BlobKey:
v.SetString(string(x))
case ByteString:
v.SetString(string(x))
case string:
v.SetString(x)
default:
if pValue != nil {
return typeMismatchReason(p, v)
}
}
case reflect.Float32, reflect.Float64:
x, ok := pValue.(float64)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.OverflowFloat(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetFloat(x)
case reflect.Ptr:
x, ok := pValue.(*Key)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if _, ok := v.Interface().(*Key); !ok {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
case reflect.Struct:
switch v.Type() {
case typeOfTime:
x, ok := pValue.(time.Time)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
case typeOfGeoPoint:
x, ok := pValue.(appengine.GeoPoint)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
default:
return typeMismatchReason(p, v)
}
case reflect.Slice:
x, ok := pValue.([]byte)
if !ok {
if y, yok := pValue.(ByteString); yok {
x, ok = []byte(y), true
}
}
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.Type().Elem().Kind() != reflect.Uint8 {
return typeMismatchReason(p, v)
}
v.SetBytes(x)
default:
return typeMismatchReason(p, v)
}
if slice.IsValid() {
slice.Set(reflect.Append(slice, v))
}
return ""
}
// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
props, err := protoToProperties(src)
if err != nil {
return err
}
if e, ok := dst.(PropertyLoadSaver); ok {
return e.Load(props)
}
return LoadStruct(dst, props)
}
func (s structPLS) Load(props []Property) error {
var fieldName, reason string
var l propertyLoader
for _, p := range props {
if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
// We don't return early, as we try to load as many properties as possible.
// It is valid to load an entity into a struct that cannot fully represent it.
// That case returns an error, but the caller is free to ignore it.
fieldName, reason = p.Name, errStr
}
}
if reason != "" {
return &ErrFieldMismatch{
StructType: s.v.Type(),
FieldName: fieldName,
Reason: reason,
}
}
return nil
}
func protoToProperties(src *pb.EntityProto) ([]Property, error) {
props, rawProps := src.Property, src.RawProperty
out := make([]Property, 0, len(props)+len(rawProps))
for {
var (
x *pb.Property
noIndex bool
)
if len(props) > 0 {
x, props = props[0], props[1:]
} else if len(rawProps) > 0 {
x, rawProps = rawProps[0], rawProps[1:]
noIndex = true
} else {
break
}
var value interface{}
if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
value = indexValue{x.Value}
} else {
var err error
value, err = propValue(x.Value, x.GetMeaning())
if err != nil {
return nil, err
}
}
out = append(out, Property{
Name: x.GetName(),
Value: value,
NoIndex: noIndex,
Multiple: x.GetMultiple(),
})
}
return out, nil
}
// propValue returns a Go value that combines the raw PropertyValue with a
// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
switch {
case v.Int64Value != nil:
if m == pb.Property_GD_WHEN {
return fromUnixMicro(*v.Int64Value), nil
} else {
return *v.Int64Value, nil
}
case v.BooleanValue != nil:
return *v.BooleanValue, nil
case v.StringValue != nil:
if m == pb.Property_BLOB {
return []byte(*v.StringValue), nil
} else if m == pb.Property_BLOBKEY {
return appengine.BlobKey(*v.StringValue), nil
} else if m == pb.Property_BYTESTRING {
return ByteString(*v.StringValue), nil
} else {
return *v.StringValue, nil
}
case v.DoubleValue != nil:
return *v.DoubleValue, nil
case v.Referencevalue != nil:
key, err := referenceValueToKey(v.Referencevalue)
if err != nil {
return nil, err
}
return key, nil
case v.Pointvalue != nil:
// NOTE: Strangely, latitude maps to X, longitude to Y.
return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
}
return nil, nil
}
// indexValue is a Property value that is created when entities are loaded from
// an index, such as from a projection query.
//
// Such Property values do not contain all of the metadata required to be
// faithfully represented as a Go value, and are instead represented as an
// opaque indexValue. Load the properties into a concrete struct type (e.g. by
// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
// of type int, string, time.Time, etc.
type indexValue struct {
value *pb.PropertyValue
}

View File

@ -0,0 +1,294 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"fmt"
"reflect"
"strings"
"sync"
"unicode"
)
// Entities with more than this many indexed properties will not be saved.
const maxIndexedProperties = 5000
// []byte fields more than 1 megabyte long will not be loaded or saved.
const maxBlobLen = 1 << 20
// Property is a name/value pair plus some metadata. A datastore entity's
// contents are loaded and saved as a sequence of Properties. An entity can
// have multiple Properties with the same name, provided that p.Multiple is
// true on all of that entity's Properties with that name.
type Property struct {
// Name is the property name.
Name string
// Value is the property value. The valid types are:
// - int64
// - bool
// - string
// - float64
// - ByteString
// - *Key
// - time.Time
// - appengine.BlobKey
// - appengine.GeoPoint
// - []byte (up to 1 megabyte in length)
// This set is smaller than the set of valid struct field types that the
// datastore can load and save. A Property Value cannot be a slice (apart
// from []byte); use multiple Properties instead. Also, a Value's type
// must be explicitly on the list above; it is not sufficient for the
// underlying type to be on that list. For example, a Value of "type
// myInt64 int64" is invalid. Smaller-width integers and floats are also
// invalid. Again, this is more restrictive than the set of valid struct
// field types.
//
// A Value will have an opaque type when loading entities from an index,
// such as via a projection query. Load entities into a struct instead
// of a PropertyLoadSaver when using a projection query.
//
// A Value may also be the nil interface value; this is equivalent to
// Python's None but not directly representable by a Go struct. Loading
// a nil-valued property into a struct will set that field to the zero
// value.
Value interface{}
// NoIndex is whether the datastore cannot index this property.
NoIndex bool
// Multiple is whether the entity can have multiple properties with
// the same name. Even if a particular instance only has one property with
// a certain name, Multiple should be true if a struct would best represent
// it as a field of type []T instead of type T.
Multiple bool
}
// ByteString is a short byte slice (up to 500 bytes) that can be indexed.
type ByteString []byte
// PropertyLoadSaver can be converted from and to a slice of Properties.
type PropertyLoadSaver interface {
Load([]Property) error
Save() ([]Property, error)
}
// PropertyList converts a []Property to implement PropertyLoadSaver.
type PropertyList []Property
var (
typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
)
// Load loads all of the provided properties into l.
// It does not first reset *l to an empty slice.
func (l *PropertyList) Load(p []Property) error {
*l = append(*l, p...)
return nil
}
// Save saves all of l's properties as a slice or Properties.
func (l *PropertyList) Save() ([]Property, error) {
return *l, nil
}
// validPropertyName returns whether name consists of one or more valid Go
// identifiers joined by ".".
func validPropertyName(name string) bool {
if name == "" {
return false
}
for _, s := range strings.Split(name, ".") {
if s == "" {
return false
}
first := true
for _, c := range s {
if first {
first = false
if c != '_' && !unicode.IsLetter(c) {
return false
}
} else {
if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
}
return true
}
// structTag is the parsed `datastore:"name,options"` tag of a struct field.
// If a field has no tag, or the tag has an empty name, then the structTag's
// name is just the field name. A "-" name means that the datastore ignores
// that field.
type structTag struct {
name string
noIndex bool
}
// structCodec describes how to convert a struct to and from a sequence of
// properties.
type structCodec struct {
// byIndex gives the structTag for the i'th field.
byIndex []structTag
// byName gives the field codec for the structTag with the given name.
byName map[string]fieldCodec
// hasSlice is whether a struct or any of its nested or embedded structs
// has a slice-typed field (other than []byte).
hasSlice bool
// complete is whether the structCodec is complete. An incomplete
// structCodec may be encountered when walking a recursive struct.
complete bool
}
// fieldCodec is a struct field's index and, if that struct field's type is
// itself a struct, that substruct's structCodec.
type fieldCodec struct {
index int
substructCodec *structCodec
}
// structCodecs collects the structCodecs that have already been calculated.
var (
structCodecsMutex sync.Mutex
structCodecs = make(map[reflect.Type]*structCodec)
)
// getStructCodec returns the structCodec for the given struct type.
func getStructCodec(t reflect.Type) (*structCodec, error) {
structCodecsMutex.Lock()
defer structCodecsMutex.Unlock()
return getStructCodecLocked(t)
}
// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
// be held when calling this function.
func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
c, ok := structCodecs[t]
if ok {
return c, nil
}
c = &structCodec{
byIndex: make([]structTag, t.NumField()),
byName: make(map[string]fieldCodec),
}
// Add c to the structCodecs map before we are sure it is good. If t is
// a recursive type, it needs to find the incomplete entry for itself in
// the map.
structCodecs[t] = c
defer func() {
if retErr != nil {
delete(structCodecs, t)
}
}()
for i := range c.byIndex {
f := t.Field(i)
name, opts := f.Tag.Get("datastore"), ""
if i := strings.Index(name, ","); i != -1 {
name, opts = name[:i], name[i+1:]
}
if name == "" {
if !f.Anonymous {
name = f.Name
}
} else if name == "-" {
c.byIndex[i] = structTag{name: name}
continue
} else if !validPropertyName(name) {
return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
}
substructType, fIsSlice := reflect.Type(nil), false
switch f.Type.Kind() {
case reflect.Struct:
substructType = f.Type
case reflect.Slice:
if f.Type.Elem().Kind() == reflect.Struct {
substructType = f.Type.Elem()
}
fIsSlice = f.Type != typeOfByteSlice
c.hasSlice = c.hasSlice || fIsSlice
}
if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
if name != "" {
name = name + "."
}
sub, err := getStructCodecLocked(substructType)
if err != nil {
return nil, err
}
if !sub.complete {
return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
}
if fIsSlice && sub.hasSlice {
return nil, fmt.Errorf(
"datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
}
c.hasSlice = c.hasSlice || sub.hasSlice
for relName := range sub.byName {
absName := name + relName
if _, ok := c.byName[absName]; ok {
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName)
}
c.byName[absName] = fieldCodec{index: i, substructCodec: sub}
}
} else {
if _, ok := c.byName[name]; ok {
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
}
c.byName[name] = fieldCodec{index: i}
}
c.byIndex[i] = structTag{
name: name,
noIndex: opts == "noindex",
}
}
c.complete = true
return c, nil
}
// structPLS adapts a struct to be a PropertyLoadSaver.
type structPLS struct {
v reflect.Value
codec *structCodec
}
// newStructPLS returns a PropertyLoadSaver for the struct pointer p.
func newStructPLS(p interface{}) (PropertyLoadSaver, error) {
v := reflect.ValueOf(p)
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
return nil, ErrInvalidEntityType
}
v = v.Elem()
codec, err := getStructCodec(v.Type())
if err != nil {
return nil, err
}
return structPLS{v, codec}, nil
}
// LoadStruct loads the properties from p to dst.
// dst must be a struct pointer.
func LoadStruct(dst interface{}, p []Property) error {
x, err := newStructPLS(dst)
if err != nil {
return err
}
return x.Load(p)
}
// SaveStruct returns the properties from src as a slice of Properties.
// src must be a struct pointer.
func SaveStruct(src interface{}) ([]Property, error) {
x, err := newStructPLS(src)
if err != nil {
return nil, err
}
return x.Save()
}

View File

@ -0,0 +1,559 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"reflect"
"testing"
"time"
"google.golang.org/appengine"
)
func TestValidPropertyName(t *testing.T) {
testCases := []struct {
name string
want bool
}{
// Invalid names.
{"", false},
{"'", false},
{".", false},
{"..", false},
{".foo", false},
{"0", false},
{"00", false},
{"X.X.4.X.X", false},
{"\n", false},
{"\x00", false},
{"abc\xffz", false},
{"foo.", false},
{"foo..", false},
{"foo..bar", false},
{"☃", false},
{`"`, false},
// Valid names.
{"AB", true},
{"Abc", true},
{"X.X.X.X.X", true},
{"_", true},
{"_0", true},
{"a", true},
{"a_B", true},
{"f00", true},
{"f0o", true},
{"fo0", true},
{"foo", true},
{"foo.bar", true},
{"foo.bar.baz", true},
{"世界", true},
}
for _, tc := range testCases {
got := validPropertyName(tc.name)
if got != tc.want {
t.Errorf("%q: got %v, want %v", tc.name, got, tc.want)
}
}
}
func TestStructCodec(t *testing.T) {
type oStruct struct {
O int
}
type pStruct struct {
P int
Q int
}
type rStruct struct {
R int
S pStruct
T oStruct
oStruct
}
type uStruct struct {
U int
v int
}
oStructCodec := &structCodec{
byIndex: []structTag{
{name: "O"},
},
byName: map[string]fieldCodec{
"O": {index: 0},
},
complete: true,
}
pStructCodec := &structCodec{
byIndex: []structTag{
{name: "P"},
{name: "Q"},
},
byName: map[string]fieldCodec{
"P": {index: 0},
"Q": {index: 1},
},
complete: true,
}
rStructCodec := &structCodec{
byIndex: []structTag{
{name: "R"},
{name: "S."},
{name: "T."},
{name: ""},
},
byName: map[string]fieldCodec{
"R": {index: 0},
"S.P": {index: 1, substructCodec: pStructCodec},
"S.Q": {index: 1, substructCodec: pStructCodec},
"T.O": {index: 2, substructCodec: oStructCodec},
"O": {index: 3, substructCodec: oStructCodec},
},
complete: true,
}
uStructCodec := &structCodec{
byIndex: []structTag{
{name: "U"},
{name: "v"},
},
byName: map[string]fieldCodec{
"U": {index: 0},
"v": {index: 1},
},
complete: true,
}
testCases := []struct {
desc string
structValue interface{}
want *structCodec
}{
{
"oStruct",
oStruct{},
oStructCodec,
},
{
"pStruct",
pStruct{},
pStructCodec,
},
{
"rStruct",
rStruct{},
rStructCodec,
},
{
"uStruct",
uStruct{},
uStructCodec,
},
{
"non-basic fields",
struct {
B appengine.BlobKey
K *Key
T time.Time
}{},
&structCodec{
byIndex: []structTag{
{name: "B"},
{name: "K"},
{name: "T"},
},
byName: map[string]fieldCodec{
"B": {index: 0},
"K": {index: 1},
"T": {index: 2},
},
complete: true,
},
},
{
"struct tags with ignored embed",
struct {
A int `datastore:"a,noindex"`
B int `datastore:"b"`
C int `datastore:",noindex"`
D int `datastore:""`
E int
I int `datastore:"-"`
J int `datastore:",noindex" json:"j"`
oStruct `datastore:"-"`
}{},
&structCodec{
byIndex: []structTag{
{name: "a", noIndex: true},
{name: "b", noIndex: false},
{name: "C", noIndex: true},
{name: "D", noIndex: false},
{name: "E", noIndex: false},
{name: "-", noIndex: false},
{name: "J", noIndex: true},
{name: "-", noIndex: false},
},
byName: map[string]fieldCodec{
"a": {index: 0},
"b": {index: 1},
"C": {index: 2},
"D": {index: 3},
"E": {index: 4},
"J": {index: 6},
},
complete: true,
},
},
{
"unexported fields",
struct {
A int
b int
C int `datastore:"x"`
d int `datastore:"Y"`
}{},
&structCodec{
byIndex: []structTag{
{name: "A"},
{name: "b"},
{name: "x"},
{name: "Y"},
},
byName: map[string]fieldCodec{
"A": {index: 0},
"b": {index: 1},
"x": {index: 2},
"Y": {index: 3},
},
complete: true,
},
},
{
"nested and embedded structs",
struct {
A int
B int
CC oStruct
DDD rStruct
oStruct
}{},
&structCodec{
byIndex: []structTag{
{name: "A"},
{name: "B"},
{name: "CC."},
{name: "DDD."},
{name: ""},
},
byName: map[string]fieldCodec{
"A": {index: 0},
"B": {index: 1},
"CC.O": {index: 2, substructCodec: oStructCodec},
"DDD.R": {index: 3, substructCodec: rStructCodec},
"DDD.S.P": {index: 3, substructCodec: rStructCodec},
"DDD.S.Q": {index: 3, substructCodec: rStructCodec},
"DDD.T.O": {index: 3, substructCodec: rStructCodec},
"DDD.O": {index: 3, substructCodec: rStructCodec},
"O": {index: 4, substructCodec: oStructCodec},
},
complete: true,
},
},
{
"struct tags with nested and embedded structs",
struct {
A int `datastore:"-"`
B int `datastore:"w"`
C oStruct `datastore:"xx"`
D rStruct `datastore:"y"`
oStruct `datastore:"z"`
}{},
&structCodec{
byIndex: []structTag{
{name: "-"},
{name: "w"},
{name: "xx."},
{name: "y."},
{name: "z."},
},
byName: map[string]fieldCodec{
"w": {index: 1},
"xx.O": {index: 2, substructCodec: oStructCodec},
"y.R": {index: 3, substructCodec: rStructCodec},
"y.S.P": {index: 3, substructCodec: rStructCodec},
"y.S.Q": {index: 3, substructCodec: rStructCodec},
"y.T.O": {index: 3, substructCodec: rStructCodec},
"y.O": {index: 3, substructCodec: rStructCodec},
"z.O": {index: 4, substructCodec: oStructCodec},
},
complete: true,
},
},
{
"unexported nested and embedded structs",
struct {
a int
B int
c uStruct
D uStruct
uStruct
}{},
&structCodec{
byIndex: []structTag{
{name: "a"},
{name: "B"},
{name: "c."},
{name: "D."},
{name: ""},
},
byName: map[string]fieldCodec{
"a": {index: 0},
"B": {index: 1},
"c.U": {index: 2, substructCodec: uStructCodec},
"c.v": {index: 2, substructCodec: uStructCodec},
"D.U": {index: 3, substructCodec: uStructCodec},
"D.v": {index: 3, substructCodec: uStructCodec},
"U": {index: 4, substructCodec: uStructCodec},
"v": {index: 4, substructCodec: uStructCodec},
},
complete: true,
},
},
{
"noindex nested struct",
struct {
A oStruct `datastore:",noindex"`
}{},
&structCodec{
byIndex: []structTag{
{name: "A.", noIndex: true},
},
byName: map[string]fieldCodec{
"A.O": {index: 0, substructCodec: oStructCodec},
},
complete: true,
},
},
}
for _, tc := range testCases {
got, err := getStructCodec(reflect.TypeOf(tc.structValue))
if err != nil {
t.Errorf("%s: getStructCodec: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(got, tc.want) {
t.Errorf("%s\ngot %v\nwant %v\n", tc.desc, got, tc.want)
continue
}
}
}
func TestRepeatedPropertyName(t *testing.T) {
good := []interface{}{
struct {
A int `datastore:"-"`
}{},
struct {
A int `datastore:"b"`
B int
}{},
struct {
A int
B int `datastore:"B"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"-"`
}{},
struct {
A int `datastore:"-"`
B int `datastore:"A"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"A"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"C"`
C int `datastore:"A"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"C"`
C int `datastore:"D"`
}{},
}
bad := []interface{}{
struct {
A int `datastore:"B"`
B int
}{},
struct {
A int
B int `datastore:"A"`
}{},
struct {
A int `datastore:"C"`
B int `datastore:"C"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"C"`
C int `datastore:"B"`
}{},
}
testGetStructCodec(t, good, bad)
}
func TestFlatteningNestedStructs(t *testing.T) {
type deepGood struct {
A struct {
B []struct {
C struct {
D int
}
}
}
}
type deepBad struct {
A struct {
B []struct {
C struct {
D []int
}
}
}
}
type iSay struct {
Tomato int
}
type youSay struct {
Tomato int
}
type tweedledee struct {
Dee int `datastore:"D"`
}
type tweedledum struct {
Dum int `datastore:"D"`
}
good := []interface{}{
struct {
X []struct {
Y string
}
}{},
struct {
X []struct {
Y []byte
}
}{},
struct {
P []int
X struct {
Y []int
}
}{},
struct {
X struct {
Y []int
}
Q []int
}{},
struct {
P []int
X struct {
Y []int
}
Q []int
}{},
struct {
deepGood
}{},
struct {
DG deepGood
}{},
struct {
Foo struct {
Z int `datastore:"X"`
} `datastore:"A"`
Bar struct {
Z int `datastore:"Y"`
} `datastore:"A"`
}{},
}
bad := []interface{}{
struct {
X []struct {
Y []string
}
}{},
struct {
X []struct {
Y []int
}
}{},
struct {
deepBad
}{},
struct {
DB deepBad
}{},
struct {
iSay
youSay
}{},
struct {
tweedledee
tweedledum
}{},
struct {
Foo struct {
Z int
} `datastore:"A"`
Bar struct {
Z int
} `datastore:"A"`
}{},
}
testGetStructCodec(t, good, bad)
}
func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) {
for _, x := range good {
if _, err := getStructCodec(reflect.TypeOf(x)); err != nil {
t.Errorf("type %T: got non-nil error (%s), want nil", x, err)
}
}
for _, x := range bad {
if _, err := getStructCodec(reflect.TypeOf(x)); err == nil {
t.Errorf("type %T: got nil error, want non-nil", x)
}
}
}
func TestNilKeyIsStored(t *testing.T) {
x := struct {
K *Key
I int
}{}
p := PropertyList{}
// Save x as properties.
p1, _ := SaveStruct(&x)
p.Load(p1)
// Set x's fields to non-zero.
x.K = &Key{}
x.I = 2
// Load x from properties.
p2, _ := p.Save()
LoadStruct(&x, p2)
// Check that x's fields were set to zero.
if x.K != nil {
t.Errorf("K field was not zero")
}
if x.I != 0 {
t.Errorf("I field was not zero")
}
}

View File

@ -0,0 +1,712 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"encoding/base64"
"errors"
"fmt"
"math"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
pb "google.golang.org/appengine/internal/datastore"
)
type operator int
const (
lessThan operator = iota
lessEq
equal
greaterEq
greaterThan
)
var operatorToProto = map[operator]*pb.Query_Filter_Operator{
lessThan: pb.Query_Filter_LESS_THAN.Enum(),
lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
equal: pb.Query_Filter_EQUAL.Enum(),
greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
}
// filter is a conditional filter on query results.
type filter struct {
FieldName string
Op operator
Value interface{}
}
type sortDirection int
const (
ascending sortDirection = iota
descending
)
var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
ascending: pb.Query_Order_ASCENDING.Enum(),
descending: pb.Query_Order_DESCENDING.Enum(),
}
// order is a sort order on query results.
type order struct {
FieldName string
Direction sortDirection
}
// NewQuery creates a new Query for a specific entity kind.
//
// An empty kind means to return all entities, including entities created and
// managed by other App Engine features, and is called a kindless query.
// Kindless queries cannot include filters or sort orders on property values.
func NewQuery(kind string) *Query {
return &Query{
kind: kind,
limit: -1,
}
}
// Query represents a datastore query.
type Query struct {
kind string
ancestor *Key
filter []filter
order []order
projection []string
distinct bool
keysOnly bool
eventual bool
limit int32
offset int32
start *pb.CompiledCursor
end *pb.CompiledCursor
err error
}
func (q *Query) clone() *Query {
x := *q
// Copy the contents of the slice-typed fields to a new backing store.
if len(q.filter) > 0 {
x.filter = make([]filter, len(q.filter))
copy(x.filter, q.filter)
}
if len(q.order) > 0 {
x.order = make([]order, len(q.order))
copy(x.order, q.order)
}
return &x
}
// Ancestor returns a derivative query with an ancestor filter.
// The ancestor should not be nil.
func (q *Query) Ancestor(ancestor *Key) *Query {
q = q.clone()
if ancestor == nil {
q.err = errors.New("datastore: nil query ancestor")
return q
}
q.ancestor = ancestor
return q
}
// EventualConsistency returns a derivative query that returns eventually
// consistent results.
// It only has an effect on ancestor queries.
func (q *Query) EventualConsistency() *Query {
q = q.clone()
q.eventual = true
return q
}
// Filter returns a derivative query with a field-based filter.
// The filterStr argument must be a field name followed by optional space,
// followed by an operator, one of ">", "<", ">=", "<=", or "=".
// Fields are compared against the provided value using the operator.
// Multiple filters are AND'ed together.
func (q *Query) Filter(filterStr string, value interface{}) *Query {
q = q.clone()
filterStr = strings.TrimSpace(filterStr)
if len(filterStr) < 1 {
q.err = errors.New("datastore: invalid filter: " + filterStr)
return q
}
f := filter{
FieldName: strings.TrimRight(filterStr, " ><=!"),
Value: value,
}
switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
case "<=":
f.Op = lessEq
case ">=":
f.Op = greaterEq
case "<":
f.Op = lessThan
case ">":
f.Op = greaterThan
case "=":
f.Op = equal
default:
q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
return q
}
q.filter = append(q.filter, f)
return q
}
// Order returns a derivative query with a field-based sort order. Orders are
// applied in the order they are added. The default order is ascending; to sort
// in descending order prefix the fieldName with a minus sign (-).
func (q *Query) Order(fieldName string) *Query {
q = q.clone()
fieldName = strings.TrimSpace(fieldName)
o := order{
Direction: ascending,
FieldName: fieldName,
}
if strings.HasPrefix(fieldName, "-") {
o.Direction = descending
o.FieldName = strings.TrimSpace(fieldName[1:])
} else if strings.HasPrefix(fieldName, "+") {
q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
return q
}
if len(o.FieldName) == 0 {
q.err = errors.New("datastore: empty order")
return q
}
q.order = append(q.order, o)
return q
}
// Project returns a derivative query that yields only the given fields. It
// cannot be used with KeysOnly.
func (q *Query) Project(fieldNames ...string) *Query {
q = q.clone()
q.projection = append([]string(nil), fieldNames...)
return q
}
// Distinct returns a derivative query that yields de-duplicated entities with
// respect to the set of projected fields. It is only used for projection
// queries.
func (q *Query) Distinct() *Query {
q = q.clone()
q.distinct = true
return q
}
// KeysOnly returns a derivative query that yields only keys, not keys and
// entities. It cannot be used with projection queries.
func (q *Query) KeysOnly() *Query {
q = q.clone()
q.keysOnly = true
return q
}
// Limit returns a derivative query that has a limit on the number of results
// returned. A negative value means unlimited.
func (q *Query) Limit(limit int) *Query {
q = q.clone()
if limit < math.MinInt32 || limit > math.MaxInt32 {
q.err = errors.New("datastore: query limit overflow")
return q
}
q.limit = int32(limit)
return q
}
// Offset returns a derivative query that has an offset of how many keys to
// skip over before returning results. A negative value is invalid.
func (q *Query) Offset(offset int) *Query {
q = q.clone()
if offset < 0 {
q.err = errors.New("datastore: negative query offset")
return q
}
if offset > math.MaxInt32 {
q.err = errors.New("datastore: query offset overflow")
return q
}
q.offset = int32(offset)
return q
}
// Start returns a derivative query with the given start point.
func (q *Query) Start(c Cursor) *Query {
q = q.clone()
if c.cc == nil {
q.err = errors.New("datastore: invalid cursor")
return q
}
q.start = c.cc
return q
}
// End returns a derivative query with the given end point.
func (q *Query) End(c Cursor) *Query {
q = q.clone()
if c.cc == nil {
q.err = errors.New("datastore: invalid cursor")
return q
}
q.end = c.cc
return q
}
// toProto converts the query to a protocol buffer.
func (q *Query) toProto(dst *pb.Query, appID string) error {
if len(q.projection) != 0 && q.keysOnly {
return errors.New("datastore: query cannot both project and be keys-only")
}
dst.Reset()
dst.App = proto.String(appID)
if q.kind != "" {
dst.Kind = proto.String(q.kind)
}
if q.ancestor != nil {
dst.Ancestor = keyToProto(appID, q.ancestor)
if q.eventual {
dst.Strong = proto.Bool(false)
}
}
if q.projection != nil {
dst.PropertyName = q.projection
if q.distinct {
dst.GroupByPropertyName = q.projection
}
}
if q.keysOnly {
dst.KeysOnly = proto.Bool(true)
dst.RequirePerfectPlan = proto.Bool(true)
}
for _, qf := range q.filter {
if qf.FieldName == "" {
return errors.New("datastore: empty query filter field name")
}
p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
if errStr != "" {
return errors.New("datastore: bad query filter value type: " + errStr)
}
xf := &pb.Query_Filter{
Op: operatorToProto[qf.Op],
Property: []*pb.Property{p},
}
if xf.Op == nil {
return errors.New("datastore: unknown query filter operator")
}
dst.Filter = append(dst.Filter, xf)
}
for _, qo := range q.order {
if qo.FieldName == "" {
return errors.New("datastore: empty query order field name")
}
xo := &pb.Query_Order{
Property: proto.String(qo.FieldName),
Direction: sortDirectionToProto[qo.Direction],
}
if xo.Direction == nil {
return errors.New("datastore: unknown query order direction")
}
dst.Order = append(dst.Order, xo)
}
if q.limit >= 0 {
dst.Limit = proto.Int32(q.limit)
}
if q.offset != 0 {
dst.Offset = proto.Int32(q.offset)
}
dst.CompiledCursor = q.start
dst.EndCompiledCursor = q.end
dst.Compile = proto.Bool(true)
return nil
}
// Count returns the number of results for the query.
func (q *Query) Count(c appengine.Context) (int, error) {
// Check that the query is well-formed.
if q.err != nil {
return 0, q.err
}
// Run a copy of the query, with keysOnly true (if we're not a projection,
// since the two are incompatible), and an adjusted offset. We also set the
// limit to zero, as we don't want any actual entity data, just the number
// of skipped results.
newQ := q.clone()
newQ.keysOnly = len(newQ.projection) == 0
newQ.limit = 0
if q.limit < 0 {
// If the original query was unlimited, set the new query's offset to maximum.
newQ.offset = math.MaxInt32
} else {
newQ.offset = q.offset + q.limit
if newQ.offset < 0 {
// Do the best we can, in the presence of overflow.
newQ.offset = math.MaxInt32
}
}
req := &pb.Query{}
if err := newQ.toProto(req, c.FullyQualifiedAppID()); err != nil {
return 0, err
}
res := &pb.QueryResult{}
if err := c.Call("datastore_v3", "RunQuery", req, res, nil); err != nil {
return 0, err
}
// n is the count we will return. For example, suppose that our original
// query had an offset of 4 and a limit of 2008: the count will be 2008,
// provided that there are at least 2012 matching entities. However, the
// RPCs will only skip 1000 results at a time. The RPC sequence is:
// call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
// response has (skippedResults, moreResults) = (1000, true)
// n += 1000 // n == 1000
// call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
// response has (skippedResults, moreResults) = (1000, true)
// n += 1000 // n == 2000
// call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
// response has (skippedResults, moreResults) = (12, false)
// n += 12 // n == 2012
// // exit the loop
// n -= 4 // n == 2008
var n int32
for {
// The QueryResult should have no actual entity data, just skipped results.
if len(res.Result) != 0 {
return 0, errors.New("datastore: internal error: Count request returned too much data")
}
n += res.GetSkippedResults()
if !res.GetMoreResults() {
break
}
if err := callNext(c, res, newQ.offset-n, 0); err != nil {
return 0, err
}
}
n -= q.offset
if n < 0 {
// If the offset was greater than the number of matching entities,
// return 0 instead of negative.
n = 0
}
return int(n), nil
}
// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
// returned by a query with more results.
func callNext(c appengine.Context, res *pb.QueryResult, offset, limit int32) error {
if res.Cursor == nil {
return errors.New("datastore: internal error: server did not return a cursor")
}
req := &pb.NextRequest{
Cursor: res.Cursor,
}
if limit >= 0 {
req.Count = proto.Int32(limit)
}
if offset != 0 {
req.Offset = proto.Int32(offset)
}
if res.CompiledCursor != nil {
req.Compile = proto.Bool(true)
}
res.Reset()
return c.Call("datastore_v3", "Next", req, res, nil)
}
// GetAll runs the query in the given context and returns all keys that match
// that query, as well as appending the values to dst.
//
// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
//
// As a special case, *PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when *[]PropertyList was intended.
//
// The keys returned by GetAll will be in a 1-1 correspondence with the entities
// added to dst.
//
// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
func (q *Query) GetAll(c appengine.Context, dst interface{}) ([]*Key, error) {
var (
dv reflect.Value
mat multiArgType
elemType reflect.Type
errFieldMismatch error
)
if !q.keysOnly {
dv = reflect.ValueOf(dst)
if dv.Kind() != reflect.Ptr || dv.IsNil() {
return nil, ErrInvalidEntityType
}
dv = dv.Elem()
mat, elemType = checkMultiArg(dv)
if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
return nil, ErrInvalidEntityType
}
}
var keys []*Key
for t := q.Run(c); ; {
k, e, err := t.next()
if err == Done {
break
}
if err != nil {
return keys, err
}
if !q.keysOnly {
ev := reflect.New(elemType)
if elemType.Kind() == reflect.Map {
// This is a special case. The zero values of a map type are
// not immediately useful; they have to be make'd.
//
// Funcs and channels are similar, in that a zero value is not useful,
// but even a freshly make'd channel isn't useful: there's no fixed
// channel buffer size that is always going to be large enough, and
// there's no goroutine to drain the other end. Theoretically, these
// types could be supported, for example by sniffing for a constructor
// method or requiring prior registration, but for now it's not a
// frequent enough concern to be worth it. Programmers can work around
// it by explicitly using Iterator.Next instead of the Query.GetAll
// convenience method.
x := reflect.MakeMap(elemType)
ev.Elem().Set(x)
}
if err = loadEntity(ev.Interface(), e); err != nil {
if _, ok := err.(*ErrFieldMismatch); ok {
// We continue loading entities even in the face of field mismatch errors.
// If we encounter any other error, that other error is returned. Otherwise,
// an ErrFieldMismatch is returned.
errFieldMismatch = err
} else {
return keys, err
}
}
if mat != multiArgTypeStructPtr {
ev = ev.Elem()
}
dv.Set(reflect.Append(dv, ev))
}
keys = append(keys, k)
}
return keys, errFieldMismatch
}
// Run runs the query in the given context.
func (q *Query) Run(c appengine.Context) *Iterator {
if q.err != nil {
return &Iterator{err: q.err}
}
t := &Iterator{
c: c,
limit: q.limit,
q: q,
prevCC: q.start,
}
var req pb.Query
if err := q.toProto(&req, c.FullyQualifiedAppID()); err != nil {
t.err = err
return t
}
if err := c.Call("datastore_v3", "RunQuery", &req, &t.res, nil); err != nil {
t.err = err
return t
}
offset := q.offset - t.res.GetSkippedResults()
for offset > 0 && t.res.GetMoreResults() {
t.prevCC = t.res.CompiledCursor
if err := callNext(t.c, &t.res, offset, t.limit); err != nil {
t.err = err
break
}
skip := t.res.GetSkippedResults()
if skip < 0 {
t.err = errors.New("datastore: internal error: negative number of skipped_results")
break
}
offset -= skip
}
if offset < 0 {
t.err = errors.New("datastore: internal error: query offset was overshot")
}
return t
}
// Iterator is the result of running a query.
type Iterator struct {
c appengine.Context
err error
// res is the result of the most recent RunQuery or Next API call.
res pb.QueryResult
// i is how many elements of res.Result we have iterated over.
i int
// limit is the limit on the number of results this iterator should return.
// A negative value means unlimited.
limit int32
// q is the original query which yielded this iterator.
q *Query
// prevCC is the compiled cursor that marks the end of the previous batch
// of results.
prevCC *pb.CompiledCursor
}
// Done is returned when a query iteration has completed.
var Done = errors.New("datastore: query has no more results")
// Next returns the key of the next result. When there are no more results,
// Done is returned as the error.
//
// If the query is not keys only and dst is non-nil, it also loads the entity
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
// the same semantics and possible errors as for the Get function.
func (t *Iterator) Next(dst interface{}) (*Key, error) {
k, e, err := t.next()
if err != nil {
return nil, err
}
if dst != nil && !t.q.keysOnly {
err = loadEntity(dst, e)
}
return k, err
}
func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
if t.err != nil {
return nil, nil, t.err
}
// Issue datastore_v3/Next RPCs as necessary.
for t.i == len(t.res.Result) {
if !t.res.GetMoreResults() {
t.err = Done
return nil, nil, t.err
}
t.prevCC = t.res.CompiledCursor
if err := callNext(t.c, &t.res, 0, t.limit); err != nil {
t.err = err
return nil, nil, t.err
}
if t.res.GetSkippedResults() != 0 {
t.err = errors.New("datastore: internal error: iterator has skipped results")
return nil, nil, t.err
}
t.i = 0
if t.limit >= 0 {
t.limit -= int32(len(t.res.Result))
if t.limit < 0 {
t.err = errors.New("datastore: internal error: query returned more results than the limit")
return nil, nil, t.err
}
}
}
// Extract the key from the t.i'th element of t.res.Result.
e := t.res.Result[t.i]
t.i++
if e.Key == nil {
return nil, nil, errors.New("datastore: internal error: server did not return a key")
}
k, err := protoToKey(e.Key)
if err != nil || k.Incomplete() {
return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
}
return k, e, nil
}
// Cursor returns a cursor for the iterator's current location.
func (t *Iterator) Cursor() (Cursor, error) {
if t.err != nil && t.err != Done {
return Cursor{}, t.err
}
// If we are at either end of the current batch of results,
// return the compiled cursor at that end.
skipped := t.res.GetSkippedResults()
if t.i == 0 && skipped == 0 {
if t.prevCC == nil {
// A nil pointer (of type *pb.CompiledCursor) means no constraint:
// passing it as the end cursor of a new query means unlimited results
// (glossing over the integer limit parameter for now).
// A non-nil pointer to an empty pb.CompiledCursor means the start:
// passing it as the end cursor of a new query means 0 results.
// If prevCC was nil, then the original query had no start cursor, but
// Iterator.Cursor should return "the start" instead of unlimited.
return Cursor{&zeroCC}, nil
}
return Cursor{t.prevCC}, nil
}
if t.i == len(t.res.Result) {
return Cursor{t.res.CompiledCursor}, nil
}
// Otherwise, re-run the query offset to this iterator's position, starting from
// the most recent compiled cursor. This is done on a best-effort basis, as it
// is racy; if a concurrent process has added or removed entities, then the
// cursor returned may be inconsistent.
q := t.q.clone()
q.start = t.prevCC
q.offset = skipped + int32(t.i)
q.limit = 0
q.keysOnly = len(q.projection) == 0
t1 := q.Run(t.c)
_, _, err := t1.next()
if err != Done {
if err == nil {
err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
}
return Cursor{}, err
}
return Cursor{t1.res.CompiledCursor}, nil
}
var zeroCC pb.CompiledCursor
// Cursor is an iterator's position. It can be converted to and from an opaque
// string. A cursor can be used from different HTTP requests, but only with a
// query with the same kind, ancestor, filter and order constraints.
type Cursor struct {
cc *pb.CompiledCursor
}
// String returns a base-64 string representation of a cursor.
func (c Cursor) String() string {
if c.cc == nil {
return ""
}
b, err := proto.Marshal(c.cc)
if err != nil {
// The only way to construct a Cursor with a non-nil cc field is to
// unmarshal from the byte representation. We panic if the unmarshal
// succeeds but the marshaling of the unchanged protobuf value fails.
panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
}
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// Decode decodes a cursor from its base-64 string representation.
func DecodeCursor(s string) (Cursor, error) {
if s == "" {
return Cursor{&zeroCC}, nil
}
if n := len(s) % 4; n != 0 {
s += strings.Repeat("=", 4-n)
}
b, err := base64.URLEncoding.DecodeString(s)
if err != nil {
return Cursor{}, err
}
cc := &pb.CompiledCursor{}
if err := proto.Unmarshal(b, cc); err != nil {
return Cursor{}, err
}
return Cursor{cc}, nil
}

View File

@ -0,0 +1,580 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"fmt"
"reflect"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine/internal/aetesting"
pb "google.golang.org/appengine/internal/datastore"
)
var (
path1 = &pb.Path{
Element: []*pb.Path_Element{
{
Type: proto.String("Gopher"),
Id: proto.Int64(6),
},
},
}
path2 = &pb.Path{
Element: []*pb.Path_Element{
{
Type: proto.String("Gopher"),
Id: proto.Int64(6),
},
{
Type: proto.String("Gopher"),
Id: proto.Int64(8),
},
},
}
)
func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error {
expectedIn := &pb.Query{
App: proto.String("dev~fake-app"),
Kind: proto.String("Gopher"),
Compile: proto.Bool(true),
}
if !proto.Equal(in, expectedIn) {
return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn)
}
*out = pb.QueryResult{
Result: []*pb.EntityProto{
{
Key: &pb.Reference{
App: proto.String("s~test-app"),
Path: path1,
},
EntityGroup: path1,
Property: []*pb.Property{
{
Meaning: pb.Property_TEXT.Enum(),
Name: proto.String("Name"),
Value: &pb.PropertyValue{
StringValue: proto.String("George"),
},
},
{
Name: proto.String("Height"),
Value: &pb.PropertyValue{
Int64Value: proto.Int64(32),
},
},
},
},
{
Key: &pb.Reference{
App: proto.String("s~test-app"),
Path: path2,
},
EntityGroup: path1, // ancestor is George
Property: []*pb.Property{
{
Meaning: pb.Property_TEXT.Enum(),
Name: proto.String("Name"),
Value: &pb.PropertyValue{
StringValue: proto.String("Rufus"),
},
},
// No height for Rufus.
},
},
},
MoreResults: proto.Bool(false),
}
return nil
}
type StructThatImplementsPLS struct{}
func (StructThatImplementsPLS) Load(p []Property) error { return nil }
func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
var _ PropertyLoadSaver = StructThatImplementsPLS{}
type StructPtrThatImplementsPLS struct{}
func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil }
func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{}
type PropertyMap map[string]Property
func (m PropertyMap) Load(props []Property) error {
for _, p := range props {
if p.Multiple {
return errors.New("PropertyMap does not support multiple properties")
}
m[p.Name] = p
}
return nil
}
func (m PropertyMap) Save() ([]Property, error) {
props := make([]Property, 0, len(m))
for _, p := range m {
if p.Multiple {
return nil, errors.New("PropertyMap does not support multiple properties")
}
props = append(props, p)
}
return props, nil
}
var _ PropertyLoadSaver = PropertyMap{}
type Gopher struct {
Name string
Height int
}
// typeOfEmptyInterface is the type of interface{}, but we can't use
// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an
// interface{}.
var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
func TestCheckMultiArg(t *testing.T) {
testCases := []struct {
v interface{}
mat multiArgType
elemType reflect.Type
}{
// Invalid cases.
{nil, multiArgTypeInvalid, nil},
{Gopher{}, multiArgTypeInvalid, nil},
{&Gopher{}, multiArgTypeInvalid, nil},
{PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case.
{PropertyMap{}, multiArgTypeInvalid, nil},
{[]*PropertyList(nil), multiArgTypeInvalid, nil},
{[]*PropertyMap(nil), multiArgTypeInvalid, nil},
{[]**Gopher(nil), multiArgTypeInvalid, nil},
{[]*interface{}(nil), multiArgTypeInvalid, nil},
// Valid cases.
{
[]PropertyList(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(PropertyList{}),
},
{
[]PropertyMap(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(PropertyMap{}),
},
{
[]StructThatImplementsPLS(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(StructThatImplementsPLS{}),
},
{
[]StructPtrThatImplementsPLS(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(StructPtrThatImplementsPLS{}),
},
{
[]Gopher(nil),
multiArgTypeStruct,
reflect.TypeOf(Gopher{}),
},
{
[]*Gopher(nil),
multiArgTypeStructPtr,
reflect.TypeOf(Gopher{}),
},
{
[]interface{}(nil),
multiArgTypeInterface,
typeOfEmptyInterface,
},
}
for _, tc := range testCases {
mat, elemType := checkMultiArg(reflect.ValueOf(tc.v))
if mat != tc.mat || elemType != tc.elemType {
t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v",
tc.v, mat, elemType, tc.mat, tc.elemType)
}
}
}
func TestSimpleQuery(t *testing.T) {
struct1 := Gopher{Name: "George", Height: 32}
struct2 := Gopher{Name: "Rufus"}
pList1 := PropertyList{
{
Name: "Name",
Value: "George",
},
{
Name: "Height",
Value: int64(32),
},
}
pList2 := PropertyList{
{
Name: "Name",
Value: "Rufus",
},
}
pMap1 := PropertyMap{
"Name": Property{
Name: "Name",
Value: "George",
},
"Height": Property{
Name: "Height",
Value: int64(32),
},
}
pMap2 := PropertyMap{
"Name": Property{
Name: "Name",
Value: "Rufus",
},
}
testCases := []struct {
dst interface{}
want interface{}
}{
// The destination must have type *[]P, *[]S or *[]*S, for some non-interface
// type P such that *P implements PropertyLoadSaver, or for some struct type S.
{new([]Gopher), &[]Gopher{struct1, struct2}},
{new([]*Gopher), &[]*Gopher{&struct1, &struct2}},
{new([]PropertyList), &[]PropertyList{pList1, pList2}},
{new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}},
// Any other destination type is invalid.
{0, nil},
{Gopher{}, nil},
{PropertyList{}, nil},
{PropertyMap{}, nil},
{[]int{}, nil},
{[]Gopher{}, nil},
{[]PropertyList{}, nil},
{new(int), nil},
{new(Gopher), nil},
{new(PropertyList), nil}, // This is a special case.
{new(PropertyMap), nil},
{new([]int), nil},
{new([]map[int]int), nil},
{new([]map[string]Property), nil},
{new([]map[string]interface{}), nil},
{new([]*int), nil},
{new([]*map[int]int), nil},
{new([]*map[string]Property), nil},
{new([]*map[string]interface{}), nil},
{new([]**Gopher), nil},
{new([]*PropertyList), nil},
{new([]*PropertyMap), nil},
}
for _, tc := range testCases {
nCall := 0
c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
nCall++
return fakeRunQuery(in, out)
})
var (
expectedErr error
expectedNCall int
)
if tc.want == nil {
expectedErr = ErrInvalidEntityType
} else {
expectedNCall = 1
}
keys, err := NewQuery("Gopher").GetAll(c, tc.dst)
if err != expectedErr {
t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr)
continue
}
if nCall != expectedNCall {
t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall)
continue
}
if err != nil {
continue
}
key1 := NewKey(c, "Gopher", "", 6, nil)
expectedKeys := []*Key{
key1,
NewKey(c, "Gopher", "", 8, key1),
}
if l1, l2 := len(keys), len(expectedKeys); l1 != l2 {
t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2)
continue
}
for i, key := range keys {
if key.AppID() != "s~test-app" {
t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID())
continue
}
if !keysEqual(key, expectedKeys[i]) {
t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i])
continue
}
}
if !reflect.DeepEqual(tc.dst, tc.want) {
t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want)
continue
}
}
}
// keysEqual is like (*Key).Equal, but ignores the App ID.
func keysEqual(a, b *Key) bool {
for a != nil && b != nil {
if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() {
return false
}
a, b = a.Parent(), b.Parent()
}
return a == b
}
func TestQueriesAreImmutable(t *testing.T) {
// Test that deriving q2 from q1 does not modify q1.
q0 := NewQuery("foo")
q1 := NewQuery("foo")
q2 := q1.Offset(2)
if !reflect.DeepEqual(q0, q1) {
t.Errorf("q0 and q1 were not equal")
}
if reflect.DeepEqual(q1, q2) {
t.Errorf("q1 and q2 were equal")
}
// Test that deriving from q4 twice does not conflict, even though
// q4 has a long list of order clauses. This tests that the arrays
// backed by a query's slice of orders are not shared.
f := func() *Query {
q := NewQuery("bar")
// 47 is an ugly number that is unlikely to be near a re-allocation
// point in repeated append calls. For example, it's not near a power
// of 2 or a multiple of 10.
for i := 0; i < 47; i++ {
q = q.Order(fmt.Sprintf("x%d", i))
}
return q
}
q3 := f().Order("y")
q4 := f()
q5 := q4.Order("y")
q6 := q4.Order("z")
if !reflect.DeepEqual(q3, q5) {
t.Errorf("q3 and q5 were not equal")
}
if reflect.DeepEqual(q5, q6) {
t.Errorf("q5 and q6 were equal")
}
}
func TestFilterParser(t *testing.T) {
testCases := []struct {
filterStr string
wantOK bool
wantFieldName string
wantOp operator
}{
// Supported ops.
{"x<", true, "x", lessThan},
{"x <", true, "x", lessThan},
{"x <", true, "x", lessThan},
{" x < ", true, "x", lessThan},
{"x <=", true, "x", lessEq},
{"x =", true, "x", equal},
{"x >=", true, "x", greaterEq},
{"x >", true, "x", greaterThan},
{"in >", true, "in", greaterThan},
{"in>", true, "in", greaterThan},
// Valid but (currently) unsupported ops.
{"x!=", false, "", 0},
{"x !=", false, "", 0},
{" x != ", false, "", 0},
{"x IN", false, "", 0},
{"x in", false, "", 0},
// Invalid ops.
{"x EQ", false, "", 0},
{"x lt", false, "", 0},
{"x <>", false, "", 0},
{"x >>", false, "", 0},
{"x ==", false, "", 0},
{"x =<", false, "", 0},
{"x =>", false, "", 0},
{"x !", false, "", 0},
{"x ", false, "", 0},
{"x", false, "", 0},
}
for _, tc := range testCases {
q := NewQuery("foo").Filter(tc.filterStr, 42)
if ok := q.err == nil; ok != tc.wantOK {
t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
continue
}
if !tc.wantOK {
continue
}
if len(q.filter) != 1 {
t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1)
continue
}
got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42}
if got != want {
t.Errorf("%q: got %v, want %v", tc.filterStr, got, want)
continue
}
}
}
func TestQueryToProto(t *testing.T) {
// The context is required to make Keys for the test cases.
var got *pb.Query
NoErr := errors.New("No error")
c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
got = in
return NoErr // return a non-nil error so Run doesn't keep going.
})
testCases := []struct {
desc string
query *Query
want *pb.Query
err string
}{
{
desc: "empty",
query: NewQuery(""),
want: &pb.Query{},
},
{
desc: "standard query",
query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42),
want: &pb.Query{
Kind: proto.String("kind"),
Filter: []*pb.Query_Filter{
{
Op: pb.Query_Filter_GREATER_THAN.Enum(),
Property: []*pb.Property{
{
Name: proto.String("I"),
Value: &pb.PropertyValue{Int64Value: proto.Int64(17)},
Multiple: proto.Bool(false),
},
},
},
{
Op: pb.Query_Filter_EQUAL.Enum(),
Property: []*pb.Property{
{
Name: proto.String("U"),
Value: &pb.PropertyValue{StringValue: proto.String("Dave")},
Multiple: proto.Bool(false),
},
},
},
},
Order: []*pb.Query_Order{
{
Property: proto.String("I"),
Direction: pb.Query_Order_DESCENDING.Enum(),
},
},
Limit: proto.Int32(7),
Offset: proto.Int32(42),
},
},
{
desc: "ancestor",
query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)),
want: &pb.Query{
Ancestor: &pb.Reference{
App: proto.String("dev~fake-app"),
Path: &pb.Path{
Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}},
},
},
},
},
{
desc: "projection",
query: NewQuery("").Project("A", "B"),
want: &pb.Query{
PropertyName: []string{"A", "B"},
},
},
{
desc: "projection with distinct",
query: NewQuery("").Project("A", "B").Distinct(),
want: &pb.Query{
PropertyName: []string{"A", "B"},
GroupByPropertyName: []string{"A", "B"},
},
},
{
desc: "keys only",
query: NewQuery("").KeysOnly(),
want: &pb.Query{
KeysOnly: proto.Bool(true),
RequirePerfectPlan: proto.Bool(true),
},
},
{
desc: "empty filter",
query: NewQuery("kind").Filter("=", 17),
err: "empty query filter field nam",
},
{
desc: "bad filter type",
query: NewQuery("kind").Filter("M =", map[string]bool{}),
err: "bad query filter value type",
},
{
desc: "bad filter operator",
query: NewQuery("kind").Filter("I <<=", 17),
err: `invalid operator "<<=" in filter "I <<="`,
},
{
desc: "empty order",
query: NewQuery("kind").Order(""),
err: "empty order",
},
{
desc: "bad order direction",
query: NewQuery("kind").Order("+I"),
err: `invalid order: "+I`,
},
}
for _, tt := range testCases {
got = nil
if _, err := tt.query.Run(c).Next(nil); err != NoErr {
if tt.err == "" || !strings.Contains(err.Error(), tt.err) {
t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err)
}
continue
}
if tt.err != "" {
t.Errorf("%s: no error, want %q", tt.desc, tt.err)
continue
}
// Fields that are common to all protos.
tt.want.App = proto.String("dev~fake-app")
tt.want.Compile = proto.Bool(true)
if !proto.Equal(got, tt.want) {
t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want)
}
}
}

View File

@ -0,0 +1,300 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"fmt"
"math"
"reflect"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
pb "google.golang.org/appengine/internal/datastore"
)
func toUnixMicro(t time.Time) int64 {
// We cannot use t.UnixNano() / 1e3 because we want to handle times more than
// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
// be represented in the numerator of a single int64 divide.
return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
}
func fromUnixMicro(t int64) time.Time {
return time.Unix(t/1e6, (t%1e6)*1e3)
}
var (
minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
)
// valueToProto converts a named value to a newly allocated Property.
// The returned error string is empty on success.
func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
var (
pv pb.PropertyValue
unsupported bool
)
switch v.Kind() {
case reflect.Invalid:
// No-op.
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
pv.Int64Value = proto.Int64(v.Int())
case reflect.Bool:
pv.BooleanValue = proto.Bool(v.Bool())
case reflect.String:
pv.StringValue = proto.String(v.String())
case reflect.Float32, reflect.Float64:
pv.DoubleValue = proto.Float64(v.Float())
case reflect.Ptr:
if k, ok := v.Interface().(*Key); ok {
if k != nil {
pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
}
} else {
unsupported = true
}
case reflect.Struct:
switch t := v.Interface().(type) {
case time.Time:
if t.Before(minTime) || t.After(maxTime) {
return nil, "time value out of range"
}
pv.Int64Value = proto.Int64(toUnixMicro(t))
case appengine.GeoPoint:
if !t.Valid() {
return nil, "invalid GeoPoint value"
}
// NOTE: Strangely, latitude maps to X, longitude to Y.
pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
default:
unsupported = true
}
case reflect.Slice:
if b, ok := v.Interface().([]byte); ok {
pv.StringValue = proto.String(string(b))
} else {
// nvToProto should already catch slice values.
// If we get here, we have a slice of slice values.
unsupported = true
}
default:
unsupported = true
}
if unsupported {
return nil, "unsupported datastore value type: " + v.Type().String()
}
p = &pb.Property{
Name: proto.String(name),
Value: &pv,
Multiple: proto.Bool(multiple),
}
if v.IsValid() {
switch v.Interface().(type) {
case []byte:
p.Meaning = pb.Property_BLOB.Enum()
case ByteString:
p.Meaning = pb.Property_BYTESTRING.Enum()
case appengine.BlobKey:
p.Meaning = pb.Property_BLOBKEY.Enum()
case time.Time:
p.Meaning = pb.Property_GD_WHEN.Enum()
case appengine.GeoPoint:
p.Meaning = pb.Property_GEORSS_POINT.Enum()
}
}
return p, ""
}
// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
var err error
var props []Property
if e, ok := src.(PropertyLoadSaver); ok {
props, err = e.Save()
} else {
props, err = SaveStruct(src)
}
if err != nil {
return nil, err
}
return propertiesToProto(defaultAppID, key, props)
}
func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error {
p := Property{
Name: name,
NoIndex: noIndex,
Multiple: multiple,
}
switch x := v.Interface().(type) {
case *Key:
p.Value = x
case time.Time:
p.Value = x
case appengine.BlobKey:
p.Value = x
case appengine.GeoPoint:
p.Value = x
case ByteString:
p.Value = x
default:
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.Value = v.Int()
case reflect.Bool:
p.Value = v.Bool()
case reflect.String:
p.Value = v.String()
case reflect.Float32, reflect.Float64:
p.Value = v.Float()
case reflect.Slice:
if v.Type().Elem().Kind() == reflect.Uint8 {
p.NoIndex = true
p.Value = v.Bytes()
}
case reflect.Struct:
if !v.CanAddr() {
return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
}
sub, err := newStructPLS(v.Addr().Interface())
if err != nil {
return fmt.Errorf("datastore: unsupported struct field: %v", err)
}
return sub.(structPLS).save(props, name, noIndex, multiple)
}
}
if p.Value == nil {
return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
}
*props = append(*props, p)
return nil
}
func (s structPLS) Save() ([]Property, error) {
var props []Property
if err := s.save(&props, "", false, false); err != nil {
return nil, err
}
return props, nil
}
func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error {
for i, t := range s.codec.byIndex {
if t.name == "-" {
continue
}
name := t.name
if prefix != "" {
name = prefix + name
}
v := s.v.Field(i)
if !v.IsValid() || !v.CanSet() {
continue
}
noIndex1 := noIndex || t.noIndex
// For slice fields that aren't []byte, save each element.
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
for j := 0; j < v.Len(); j++ {
if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil {
return err
}
}
continue
}
// Otherwise, save the field itself.
if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil {
return err
}
}
return nil
}
func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
e := &pb.EntityProto{
Key: keyToProto(defaultAppID, key),
}
if key.parent == nil {
e.EntityGroup = &pb.Path{}
} else {
e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
}
prevMultiple := make(map[string]bool)
for _, p := range props {
if pm, ok := prevMultiple[p.Name]; ok {
if !pm || !p.Multiple {
return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
}
} else {
prevMultiple[p.Name] = p.Multiple
}
x := &pb.Property{
Name: proto.String(p.Name),
Value: new(pb.PropertyValue),
Multiple: proto.Bool(p.Multiple),
}
switch v := p.Value.(type) {
case int64:
x.Value.Int64Value = proto.Int64(v)
case bool:
x.Value.BooleanValue = proto.Bool(v)
case string:
x.Value.StringValue = proto.String(v)
if p.NoIndex {
x.Meaning = pb.Property_TEXT.Enum()
}
case float64:
x.Value.DoubleValue = proto.Float64(v)
case *Key:
if v != nil {
x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
}
case time.Time:
if v.Before(minTime) || v.After(maxTime) {
return nil, fmt.Errorf("datastore: time value out of range")
}
x.Value.Int64Value = proto.Int64(toUnixMicro(v))
x.Meaning = pb.Property_GD_WHEN.Enum()
case appengine.BlobKey:
x.Value.StringValue = proto.String(string(v))
x.Meaning = pb.Property_BLOBKEY.Enum()
case appengine.GeoPoint:
if !v.Valid() {
return nil, fmt.Errorf("datastore: invalid GeoPoint value")
}
// NOTE: Strangely, latitude maps to X, longitude to Y.
x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
x.Meaning = pb.Property_GEORSS_POINT.Enum()
case []byte:
x.Value.StringValue = proto.String(string(v))
x.Meaning = pb.Property_BLOB.Enum()
if !p.NoIndex {
return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
}
case ByteString:
x.Value.StringValue = proto.String(string(v))
x.Meaning = pb.Property_BYTESTRING.Enum()
default:
if p.Value != nil {
return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
}
}
if p.NoIndex {
e.RawProperty = append(e.RawProperty, x)
} else {
e.Property = append(e.Property, x)
if len(e.Property) > maxIndexedProperties {
return nil, errors.New("datastore: too many indexed properties")
}
}
}
return e, nil
}

View File

@ -0,0 +1,65 @@
// Copyright 2012 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"testing"
"time"
)
func TestUnixMicro(t *testing.T) {
// Test that all these time.Time values survive a round trip to unix micros.
testCases := []time.Time{
{},
time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC),
time.Unix(-1e6, -1000),
time.Unix(-1e6, 0),
time.Unix(-1e6, +1000),
time.Unix(-60, -1000),
time.Unix(-60, 0),
time.Unix(-60, +1000),
time.Unix(-1, -1000),
time.Unix(-1, 0),
time.Unix(-1, +1000),
time.Unix(0, -3000),
time.Unix(0, -2000),
time.Unix(0, -1000),
time.Unix(0, 0),
time.Unix(0, +1000),
time.Unix(0, +2000),
time.Unix(+60, -1000),
time.Unix(+60, 0),
time.Unix(+60, +1000),
time.Unix(+1e6, -1000),
time.Unix(+1e6, 0),
time.Unix(+1e6, +1000),
time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC),
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC),
time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC),
time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC),
}
for _, tc := range testCases {
got := fromUnixMicro(toUnixMicro(tc))
if !got.Equal(tc) {
t.Errorf("got %q, want %q", got, tc)
}
}
// Test that a time.Time that isn't an integral number of microseconds
// is not perfectly reconstructed after a round trip.
t0 := time.Unix(0, 123)
t1 := fromUnixMicro(toUnixMicro(t0))
if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 {
t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond())
}
}

View File

@ -0,0 +1,138 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
"google.golang.org/appengine/internal"
basepb "google.golang.org/appengine/internal/base"
pb "google.golang.org/appengine/internal/datastore"
)
func init() {
internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
x.Transaction = t
})
internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
x.Transaction = t
})
internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
x.Transaction = t
})
internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
x.Transaction = t
})
}
// ErrConcurrentTransaction is returned when a transaction is rolled back due
// to a conflict with a concurrent transaction.
var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
type transaction struct {
appengine.Context
transaction pb.Transaction
finished bool
}
func (t *transaction) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
if t.finished {
return errors.New("datastore: transaction context has expired")
}
internal.ApplyTransaction(in, &t.transaction)
return t.Context.Call(service, method, in, out, opts)
}
func runOnce(c appengine.Context, f func(appengine.Context) error, opts *TransactionOptions) error {
// Begin the transaction.
t := &transaction{Context: c}
req := &pb.BeginTransactionRequest{
App: proto.String(c.FullyQualifiedAppID()),
}
if opts != nil && opts.XG {
req.AllowMultipleEg = proto.Bool(true)
}
if err := t.Context.Call("datastore_v3", "BeginTransaction", req, &t.transaction, nil); err != nil {
return err
}
// Call f, rolling back the transaction if f returns a non-nil error, or panics.
// The panic is not recovered.
defer func() {
if t.finished {
return
}
t.finished = true
// Ignore the error return value, since we are already returning a non-nil
// error (or we're panicking).
c.Call("datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}, nil)
}()
if err := f(t); err != nil {
return err
}
t.finished = true
// Commit the transaction.
res := &pb.CommitResponse{}
err := c.Call("datastore_v3", "Commit", &t.transaction, res, nil)
if ae, ok := err.(*internal.APIError); ok {
if appengine.IsDevAppServer() {
// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
return ErrConcurrentTransaction
}
}
if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
return ErrConcurrentTransaction
}
}
return err
}
// RunInTransaction runs f in a transaction. It calls f with a transaction
// context tc that f should use for all App Engine operations.
//
// If f returns nil, RunInTransaction attempts to commit the transaction,
// returning nil if it succeeds. If the commit fails due to a conflicting
// transaction, RunInTransaction retries f, each time with a new transaction
// context. It gives up and returns ErrConcurrentTransaction after three
// failed attempts.
//
// If f returns non-nil, then any datastore changes will not be applied and
// RunInTransaction returns that same error. The function f is not retried.
//
// Note that when f returns, the transaction is not yet committed. Calling code
// must be careful not to assume that any of f's changes have been committed
// until RunInTransaction returns nil.
//
// Nested transactions are not supported; c may not be a transaction context.
func RunInTransaction(c appengine.Context, f func(tc appengine.Context) error, opts *TransactionOptions) error {
if _, ok := c.(*transaction); ok {
return errors.New("datastore: nested transactions are not supported")
}
for i := 0; i < 3; i++ {
if err := runOnce(c, f, opts); err != ErrConcurrentTransaction {
return err
}
}
return ErrConcurrentTransaction
}
// TransactionOptions are the options for running a transaction.
type TransactionOptions struct {
// XG is whether the transaction can cross multiple entity groups. In
// comparison, a single group transaction is one where all datastore keys
// used have the same root key. Note that cross group transactions do not
// have the same behavior as single group transactions. In particular, it
// is much more likely to see partially applied transactions in different
// entity groups, in global queries.
// It is valid to set XG to true even if the transaction is within a
// single entity group.
XG bool
}

View File

@ -0,0 +1,275 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
/*
Package delay provides a way to execute code outside the scope of a
user request by using the taskqueue API.
To declare a function that may be executed later, call Func
in a top-level assignment context, passing it an arbitrary string key
and a function whose first argument is of type appengine.Context.
var laterFunc = delay.Func("key", myFunc)
It is also possible to use a function literal.
var laterFunc = delay.Func("key", func(c appengine.Context, x string) {
// ...
})
To call a function, invoke its Call method.
laterFunc.Call(c, "something")
A function may be called any number of times. If the function has any
return arguments, and the last one is of type error, the function may
return a non-nil error to signal that the function should be retried.
The arguments to functions may be of any type that is encodable by the gob
package. If an argument is of interface type, it is the client's responsibility
to register with the gob package whatever concrete type may be passed for that
argument; see http://golang.org/pkg/gob/#Register for details.
Any errors during initialization or execution of a function will be
logged to the application logs. Error logs that occur during initialization will
be associated with the request that invoked the Call method.
The state of a function invocation that has not yet successfully
executed is preserved by combining the file name in which it is declared
with the string key that was passed to the Func function. Updating an app
with pending function invocations is safe as long as the relevant
functions have the (filename, key) combination preserved.
The delay package uses the Task Queue API to create tasks that call the
reserved application path "/_ah/queue/go/delay".
This path must not be marked as "login: required" in app.yaml;
it must be marked as "login: admin" or have no access restriction.
*/
package delay
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"net/http"
"reflect"
"runtime"
"google.golang.org/appengine"
"google.golang.org/appengine/taskqueue"
)
// Function represents a function that may have a delayed invocation.
type Function struct {
fv reflect.Value // Kind() == reflect.Func
key string
err error // any error during initialization
}
const (
// The HTTP path for invocations.
path = "/_ah/queue/go/delay"
// Use the default queue.
queue = ""
)
var (
// registry of all delayed functions
funcs = make(map[string]*Function)
// precomputed types
contextType = reflect.TypeOf((*appengine.Context)(nil)).Elem()
errorType = reflect.TypeOf((*error)(nil)).Elem()
// errors
errFirstArg = errors.New("first argument must be appengine.Context")
)
// Func declares a new Function. The second argument must be a function with a
// first argument of type appengine.Context.
// This function must be called at program initialization time. That means it
// must be called in a global variable declaration or from an init function.
// This restriction is necessary because the instance that delays a function
// call may not be the one that executes it. Only the code executed at program
// initialization time is guaranteed to have been run by an instance before it
// receives a request.
func Func(key string, i interface{}) *Function {
f := &Function{fv: reflect.ValueOf(i)}
// Derive unique, somewhat stable key for this func.
_, file, _, _ := runtime.Caller(1)
f.key = file + ":" + key
t := f.fv.Type()
if t.Kind() != reflect.Func {
f.err = errors.New("not a function")
return f
}
if t.NumIn() == 0 || t.In(0) != contextType {
f.err = errFirstArg
return f
}
// Register the function's arguments with the gob package.
// This is required because they are marshaled inside a []interface{}.
// gob.Register only expects to be called during initialization;
// that's fine because this function expects the same.
for i := 0; i < t.NumIn(); i++ {
// Only concrete types may be registered. If the argument has
// interface type, the client is resposible for registering the
// concrete types it will hold.
if t.In(i).Kind() == reflect.Interface {
continue
}
gob.Register(reflect.Zero(t.In(i)).Interface())
}
funcs[f.key] = f
return f
}
type invocation struct {
Key string
Args []interface{}
}
// Call invokes a delayed function.
// f.Call(c, ...)
// is equivalent to
// t, _ := f.Task(...)
// taskqueue.Add(c, t, "")
func (f *Function) Call(c appengine.Context, args ...interface{}) {
t, err := f.Task(args...)
if err != nil {
c.Errorf("%v", err)
return
}
if _, err := taskqueueAdder(c, t, queue); err != nil {
c.Errorf("delay: taskqueue.Add failed: %v", err)
return
}
}
// Task creates a Task that will invoke the function.
// Its parameters may be tweaked before adding it to a queue.
// Users should not modify the Path or Payload fields of the returned Task.
func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) {
if f.err != nil {
return nil, fmt.Errorf("delay: func is invalid: %v", f.err)
}
nArgs := len(args) + 1 // +1 for the appengine.Context
ft := f.fv.Type()
minArgs := ft.NumIn()
if ft.IsVariadic() {
minArgs--
}
if nArgs < minArgs {
return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs)
}
if !ft.IsVariadic() && nArgs > minArgs {
return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs)
}
// Check arg types.
for i := 1; i < nArgs; i++ {
at := reflect.TypeOf(args[i-1])
var dt reflect.Type
if i < minArgs {
// not a variadic arg
dt = ft.In(i)
} else {
// a variadic arg
dt = ft.In(minArgs).Elem()
}
// nil arguments won't have a type, so they need special handling.
if at == nil {
// nil interface
switch dt.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
continue // may be nil
}
return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt)
}
switch at.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
av := reflect.ValueOf(args[i-1])
if av.IsNil() {
// nil value in interface; not supported by gob, so we replace it
// with a nil interface value
args[i-1] = nil
}
}
if !at.AssignableTo(dt) {
return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt)
}
}
inv := invocation{
Key: f.key,
Args: args,
}
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(inv); err != nil {
return nil, fmt.Errorf("delay: gob encoding failed: %v", err)
}
return &taskqueue.Task{
Path: path,
Payload: buf.Bytes(),
}, nil
}
var taskqueueAdder = taskqueue.Add // for testing
func init() {
http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
runFunc(appengine.NewContext(req), w, req)
})
}
func runFunc(c appengine.Context, w http.ResponseWriter, req *http.Request) {
defer req.Body.Close()
var inv invocation
if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil {
c.Errorf("delay: failed decoding task payload: %v", err)
c.Warningf("delay: dropping task")
return
}
f := funcs[inv.Key]
if f == nil {
c.Errorf("delay: no func with key %q found", inv.Key)
c.Warningf("delay: dropping task")
return
}
ft := f.fv.Type()
in := []reflect.Value{reflect.ValueOf(c)}
for _, arg := range inv.Args {
var v reflect.Value
if arg != nil {
v = reflect.ValueOf(arg)
} else {
// Task was passed a nil argument, so we must construct
// the zero value for the argument here.
n := len(in) // we're constructing the nth argument
var at reflect.Type
if !ft.IsVariadic() || n < ft.NumIn()-1 {
at = ft.In(n)
} else {
at = ft.In(ft.NumIn() - 1).Elem()
}
v = reflect.Zero(at)
}
in = append(in, v)
}
out := f.fv.Call(in)
if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType {
if errv := out[n-1]; !errv.IsNil() {
c.Errorf("delay: func failed (will retry): %v", errv.Interface())
w.WriteHeader(http.StatusInternalServerError)
return
}
}
}

View File

@ -0,0 +1,307 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package delay
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"google.golang.org/appengine"
"google.golang.org/appengine/taskqueue"
)
type CustomType struct {
N int
}
type CustomInterface interface {
N() int
}
type CustomImpl int
func (c CustomImpl) N() int { return int(c) }
// CustomImpl needs to be registered with gob.
func init() {
gob.Register(CustomImpl(0))
}
var (
invalidFunc = Func("invalid", func() {})
regFuncRuns = 0
regFuncMsg = ""
regFunc = Func("reg", func(c appengine.Context, arg string) {
regFuncRuns++
regFuncMsg = arg
})
custFuncTally = 0
custFunc = Func("cust", func(c appengine.Context, ct *CustomType, ci CustomInterface) {
a, b := 2, 3
if ct != nil {
a = ct.N
}
if ci != nil {
b = ci.N()
}
custFuncTally += a + b
})
anotherCustFunc = Func("cust2", func(c appengine.Context, n int, ct *CustomType, ci CustomInterface) {
})
varFuncMsg = ""
varFunc = Func("variadic", func(c appengine.Context, format string, args ...int) {
// convert []int to []interface{} for fmt.Sprintf.
as := make([]interface{}, len(args))
for i, a := range args {
as[i] = a
}
varFuncMsg = fmt.Sprintf(format, as...)
})
errFuncRuns = 0
errFuncErr = errors.New("error!")
errFunc = Func("err", func(c appengine.Context) error {
errFuncRuns++
if errFuncRuns == 1 {
return nil
}
return errFuncErr
})
)
type fakeContext struct {
appengine.Context
logging [][]interface{}
}
func (f *fakeContext) log(level, format string, args ...interface{}) {
f.logging = append(f.logging, append([]interface{}{level, format}, args...))
}
func (f *fakeContext) Infof(format string, args ...interface{}) { f.log("INFO", format, args...) }
func (f *fakeContext) Errorf(format string, args ...interface{}) { f.log("ERROR", format, args...) }
func TestInvalidFunction(t *testing.T) {
c := &fakeContext{}
invalidFunc.Call(c)
wantLogging := [][]interface{}{
{"ERROR", "%v", fmt.Errorf("delay: func is invalid: %s", errFirstArg)},
}
if !reflect.DeepEqual(c.logging, wantLogging) {
t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
}
}
func TestVariadicFunctionArguments(t *testing.T) {
// Check the argument type validation for variadic functions.
c := &fakeContext{}
calls := 0
taskqueueAdder = func(c appengine.Context, t *taskqueue.Task, _ string) (*taskqueue.Task, error) {
calls++
return t, nil
}
varFunc.Call(c, "hi")
varFunc.Call(c, "%d", 12)
varFunc.Call(c, "%d %d %d", 3, 1, 4)
if calls != 3 {
t.Errorf("Got %d calls to taskqueueAdder, want 3", calls)
}
varFunc.Call(c, "%d %s", 12, "a string is bad")
wantLogging := [][]interface{}{
{"ERROR", "%v", errors.New("delay: argument 3 has wrong type: string is not assignable to int")},
}
if !reflect.DeepEqual(c.logging, wantLogging) {
t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
}
}
func TestBadArguments(t *testing.T) {
// Try running regFunc with different sets of inappropriate arguments.
c := &fakeContext{}
regFunc.Call(c)
regFunc.Call(c, "lala", 53)
regFunc.Call(c, 53)
wantLogging := [][]interface{}{
{"ERROR", "%v", errors.New("delay: too few arguments to func: 1 < 2")},
{"ERROR", "%v", errors.New("delay: too many arguments to func: 3 > 2")},
{"ERROR", "%v", errors.New("delay: argument 1 has wrong type: int is not assignable to string")},
}
if !reflect.DeepEqual(c.logging, wantLogging) {
t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
}
}
func TestRunningFunction(t *testing.T) {
c := &fakeContext{}
// Fake out the adding of a task.
var task *taskqueue.Task
taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
if queue != "" {
t.Errorf(`Got queue %q, expected ""`, queue)
}
task = tk
return tk, nil
}
regFuncRuns, regFuncMsg = 0, "" // reset state
const msg = "Why, hello!"
regFunc.Call(c, msg)
// Simulate the Task Queue service.
req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
if err != nil {
t.Fatalf("Failed making http.Request: %v", err)
}
rw := httptest.NewRecorder()
runFunc(c, rw, req)
if regFuncRuns != 1 {
t.Errorf("regFuncRuns: got %d, want 1", regFuncRuns)
}
if regFuncMsg != msg {
t.Errorf("regFuncMsg: got %q, want %q", regFuncMsg, msg)
}
}
func TestCustomType(t *testing.T) {
c := &fakeContext{}
// Fake out the adding of a task.
var task *taskqueue.Task
taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
if queue != "" {
t.Errorf(`Got queue %q, expected ""`, queue)
}
task = tk
return tk, nil
}
custFuncTally = 0 // reset state
custFunc.Call(c, &CustomType{N: 11}, CustomImpl(13))
// Simulate the Task Queue service.
req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
if err != nil {
t.Fatalf("Failed making http.Request: %v", err)
}
rw := httptest.NewRecorder()
runFunc(c, rw, req)
if custFuncTally != 24 {
t.Errorf("custFuncTally = %d, want 24", custFuncTally)
}
// Try the same, but with nil values; one is a nil pointer (and thus a non-nil interface value),
// and the other is a nil interface value.
custFuncTally = 0 // reset state
custFunc.Call(c, (*CustomType)(nil), nil)
// Simulate the Task Queue service.
req, err = http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
if err != nil {
t.Fatalf("Failed making http.Request: %v", err)
}
rw = httptest.NewRecorder()
runFunc(c, rw, req)
if custFuncTally != 5 {
t.Errorf("custFuncTally = %d, want 5", custFuncTally)
}
}
func TestRunningVariadic(t *testing.T) {
c := &fakeContext{}
// Fake out the adding of a task.
var task *taskqueue.Task
taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
if queue != "" {
t.Errorf(`Got queue %q, expected ""`, queue)
}
task = tk
return tk, nil
}
varFuncMsg = "" // reset state
varFunc.Call(c, "Amiga %d has %d KB RAM", 500, 512)
// Simulate the Task Queue service.
req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
if err != nil {
t.Fatalf("Failed making http.Request: %v", err)
}
rw := httptest.NewRecorder()
runFunc(c, rw, req)
const expected = "Amiga 500 has 512 KB RAM"
if varFuncMsg != expected {
t.Errorf("varFuncMsg = %q, want %q", varFuncMsg, expected)
}
}
func TestErrorFunction(t *testing.T) {
c := &fakeContext{}
// Fake out the adding of a task.
var task *taskqueue.Task
taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
if queue != "" {
t.Errorf(`Got queue %q, expected ""`, queue)
}
task = tk
return tk, nil
}
errFunc.Call(c)
// Simulate the Task Queue service.
// The first call should succeed; the second call should fail.
{
req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
if err != nil {
t.Fatalf("Failed making http.Request: %v", err)
}
rw := httptest.NewRecorder()
runFunc(c, rw, req)
}
{
req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
if err != nil {
t.Fatalf("Failed making http.Request: %v", err)
}
rw := httptest.NewRecorder()
runFunc(c, rw, req)
if rw.Code != http.StatusInternalServerError {
t.Errorf("Got status code %d, want %d", rw.Code, http.StatusInternalServerError)
}
wantLogging := [][]interface{}{
{"ERROR", "delay: func failed (will retry): %v", errFuncErr},
}
if !reflect.DeepEqual(c.logging, wantLogging) {
t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
}
}
}

View File

@ -0,0 +1,19 @@
# Demo application for Managed VMs.
application: vm-guestbook
version: 1
runtime: go
vm: true
api_version: go1
manual_scaling:
instances: 1
handlers:
# Favicon. Without this, the browser hits this once per page view.
- url: /favicon.ico
static_files: favicon.ico
upload: favicon.ico
# Main app. All the real work is here.
- url: /.*
script: _go_app

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -0,0 +1,102 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package guestbook
import (
"html/template"
"net/http"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/datastore"
"google.golang.org/appengine/user"
)
var initTime time.Time
type Greeting struct {
Author string
Content string
Date time.Time
}
func init() {
http.HandleFunc("/", handleMainPage)
http.HandleFunc("/sign", handleSign)
}
// guestbookKey returns the key used for all guestbook entries.
func guestbookKey(c appengine.Context) *datastore.Key {
// The string "default_guestbook" here could be varied to have multiple guestbooks.
return datastore.NewKey(c, "Guestbook", "default_guestbook", 0, nil)
}
var tpl = template.Must(template.ParseGlob("templates/*.html"))
func handleMainPage(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, "GET requests only", http.StatusMethodNotAllowed)
return
}
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
c := appengine.NewContext(r)
tic := time.Now()
q := datastore.NewQuery("Greeting").Ancestor(guestbookKey(c)).Order("-Date").Limit(10)
var gg []*Greeting
if _, err := q.GetAll(c, &gg); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
c.Errorf("GetAll: %v", err)
return
}
c.Infof("Datastore lookup took %s", time.Since(tic).String())
c.Infof("Rendering %d greetings", len(gg))
var email, logout, login string
if u := user.Current(c); u != nil {
logout, _ = user.LogoutURL(c, "/")
email = u.Email
} else {
login, _ = user.LoginURL(c, "/")
}
data := struct {
Greetings []*Greeting
Login, Logout, Email string
}{
Greetings: gg,
Login: login,
Logout: logout,
Email: email,
}
w.Header().Set("Content-Type", "text/html; charset=utf-8")
if err := tpl.ExecuteTemplate(w, "guestbook.html", data); err != nil {
c.Errorf("%v", err)
}
}
func handleSign(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "POST requests only", http.StatusMethodNotAllowed)
return
}
c := appengine.NewContext(r)
g := &Greeting{
Content: r.FormValue("content"),
Date: time.Now(),
}
if u := user.Current(c); u != nil {
g.Author = u.String()
}
key := datastore.NewIncompleteKey(c, "Greeting", guestbookKey(c))
if _, err := datastore.Put(c, key, g); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Redirect with 303 which causes the subsequent request to use GET.
http.Redirect(w, r, "/", http.StatusSeeOther)
}

View File

@ -0,0 +1,7 @@
indexes:
- kind: Greeting
ancestor: yes
properties:
- name: Date
direction: desc

View File

@ -0,0 +1,26 @@
<!DOCTYPE html>
<html>
<head>
<title>Guestbook Demo</title>
</head>
<body>
<p>
{{with .Email}}You are currently logged in as {{.}}.{{end}}
{{with .Login}}<a href="{{.}}">Sign in</a>{{end}}
{{with .Logout}}<a href="{{.}}">Sign out</a>{{end}}
</p>
{{range .Greetings }}
<p>
{{with .Author}}<b>{{.}}</b>{{else}}An anonymous person{{end}}
on <em>{{.Date.Format "3:04pm, Mon 2 Jan"}}</em>
wrote <blockquote>{{.Content}}</blockquote>
</p>
{{end}}
<form action="/sign" method="post">
<div><textarea name="content" rows="3" cols="60"></textarea></div>
<div><input type="submit" value="Sign Guestbook"></div>
</form>
</body>
</html>

View File

@ -0,0 +1,15 @@
application: helloworld
version: 1
runtime: go
api_version: go1
vm: true
manual_scaling:
instances: 1
handlers:
- url: /favicon.ico
static_files: favicon.ico
upload: favicon.ico
- url: /.*
script: _go_app

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -0,0 +1,45 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package helloworld
import (
"html/template"
"net/http"
"time"
"google.golang.org/appengine"
)
var initTime = time.Now()
func init() {
http.HandleFunc("/", handle)
}
func handle(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
c := appengine.NewContext(r)
c.Infof("Serving the front page.")
tmpl.Execute(w, time.Since(initTime))
}
var tmpl = template.Must(template.New("front").Parse(`
<html><body>
<p>
Hello, World! 세상아 안녕!
</p>
<p>
This instance has been running for <em>{{.}}</em>.
</p>
</body></html>
`))

View File

@ -0,0 +1,46 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// This file provides error functions for common API failure modes.
package appengine
import (
"fmt"
"google.golang.org/appengine/internal"
)
// IsOverQuota reports whether err represents an API call failure
// due to insufficient available quota.
func IsOverQuota(err error) bool {
callErr, ok := err.(*internal.CallError)
return ok && callErr.Code == 4
}
// MultiError is returned by batch operations when there are errors with
// particular elements. Errors will be in a one-to-one correspondence with
// the input elements; successful elements will have a nil entry.
type MultiError []error
func (m MultiError) Error() string {
s, n := "", 0
for _, e := range m {
if e != nil {
if n == 0 {
s = e.Error()
}
n++
}
}
switch n {
case 0:
return "(0 errors)"
case 1:
return s
case 2:
return s + " (and 1 other error)"
}
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
}

View File

@ -0,0 +1,26 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
// Package file provides helper functions for using Google Cloud Storage.
package file
import (
"fmt"
"google.golang.org/appengine"
aipb "google.golang.org/appengine/internal/app_identity"
)
// DefaultBucketName returns the name of this application's
// default Google Cloud Storage bucket.
func DefaultBucketName(c appengine.Context) (string, error) {
req := &aipb.GetDefaultGcsBucketNameRequest{}
res := &aipb.GetDefaultGcsBucketNameResponse{}
err := c.Call("app_identity_service", "GetDefaultGcsBucketName", req, res, nil)
if err != nil {
return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res)
}
return res.GetDefaultGcsBucketName(), nil
}

Some files were not shown because too many files have changed in this diff Show More