Move deps from _workspace/ to vendor/

godep restore
pushd $GOPATH/src/github.com/appc/spec
git co master
popd
go get go4.org/errorutil
rm -rf Godeps
godep save ./...
git add vendor
git add -f $(git ls-files --other vendor/)
git co -- Godeps/LICENSES Godeps/.license_file_state Godeps/OWNERS
This commit is contained in:
Tim Hockin
2016-05-08 20:30:21 -07:00
parent 899f9b4e31
commit 3c0c5ed4e0
4400 changed files with 16739 additions and 376 deletions

View File

@@ -0,0 +1,29 @@
BigQuery Storage Driver
=======
[EXPERIMENTAL] Support for BigQuery backend as cAdvisor storage driver.
The current implementation takes bunch of BigQuery specific flags for authentication.
These will be merged into a single backend config.
To run the current version, following flags need to be specified:
```
# Storage driver to use.
-storage_driver=bigquery
# Information about server-to-server Oauth token.
# These can be obtained by creating a Service Account client id under `Google Developer API`
# service client id
-bq_id="XYZ.apps.googleusercontent.com"
# service email address
-bq_account="ABC@developer.gserviceaccount.com"
# path to pem key (converted from p12 file)
-bq_credentials_file="/path/to/key.pem"
# project id to use for storing datasets.
-bq_project_id="awesome_project"
```
See [Service account Authentication](https://developers.google.com/accounts/docs/OAuth2) for Oauth related details.

View File

@@ -0,0 +1,312 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"os"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/storage"
"github.com/google/cadvisor/storage/bigquery/client"
bigquery "google.golang.org/api/bigquery/v2"
)
func init() {
storage.RegisterStorageDriver("bigquery", new)
}
type bigqueryStorage struct {
client *client.Client
machineName string
}
const (
// Bigquery schema types
typeTimestamp string = "TIMESTAMP"
typeString string = "STRING"
typeInteger string = "INTEGER"
colTimestamp string = "timestamp"
colMachineName string = "machine"
colContainerName string = "container_name"
colCpuCumulativeUsage string = "cpu_cumulative_usage"
// Cumulative Cpu usage in system and user mode
colCpuCumulativeUsageSystem string = "cpu_cumulative_usage_system"
colCpuCumulativeUsageUser string = "cpu_cumulative_usage_user"
// Memory usage
colMemoryUsage string = "memory_usage"
// Working set size
colMemoryWorkingSet string = "memory_working_set"
// Container page fault
colMemoryContainerPgfault string = "memory_container_pgfault"
// Constainer major page fault
colMemoryContainerPgmajfault string = "memory_container_pgmajfault"
// Hierarchical page fault
colMemoryHierarchicalPgfault string = "memory_hierarchical_pgfault"
// Hierarchical major page fault
colMemoryHierarchicalPgmajfault string = "memory_hierarchical_pgmajfault"
// Cumulative count of bytes received.
colRxBytes string = "rx_bytes"
// Cumulative count of receive errors encountered.
colRxErrors string = "rx_errors"
// Cumulative count of bytes transmitted.
colTxBytes string = "tx_bytes"
// Cumulative count of transmit errors encountered.
colTxErrors string = "tx_errors"
// Filesystem device.
colFsDevice = "fs_device"
// Filesystem limit.
colFsLimit = "fs_limit"
// Filesystem available space.
colFsUsage = "fs_usage"
)
func new() (storage.StorageDriver, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
return newStorage(
hostname,
*storage.ArgDbTable,
*storage.ArgDbName,
)
}
// TODO(jnagal): Infer schema through reflection. (See bigquery/client/example)
func (self *bigqueryStorage) GetSchema() *bigquery.TableSchema {
fields := make([]*bigquery.TableFieldSchema, 19)
i := 0
fields[i] = &bigquery.TableFieldSchema{
Type: typeTimestamp,
Name: colTimestamp,
Mode: "REQUIRED",
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeString,
Name: colMachineName,
Mode: "REQUIRED",
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeString,
Name: colContainerName,
Mode: "REQUIRED",
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colCpuCumulativeUsage,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colCpuCumulativeUsageSystem,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colCpuCumulativeUsageUser,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colMemoryUsage,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colMemoryWorkingSet,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colMemoryContainerPgfault,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colMemoryContainerPgmajfault,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colMemoryHierarchicalPgfault,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colMemoryHierarchicalPgmajfault,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colRxBytes,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colRxErrors,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colTxBytes,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colTxErrors,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeString,
Name: colFsDevice,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colFsLimit,
}
i++
fields[i] = &bigquery.TableFieldSchema{
Type: typeInteger,
Name: colFsUsage,
}
return &bigquery.TableSchema{
Fields: fields,
}
}
func (self *bigqueryStorage) containerStatsToRows(
ref info.ContainerReference,
stats *info.ContainerStats,
) (row map[string]interface{}) {
row = make(map[string]interface{})
// Timestamp
row[colTimestamp] = stats.Timestamp
// Machine name
row[colMachineName] = self.machineName
// Container name
name := ref.Name
if len(ref.Aliases) > 0 {
name = ref.Aliases[0]
}
row[colContainerName] = name
// Cumulative Cpu Usage
row[colCpuCumulativeUsage] = stats.Cpu.Usage.Total
// Cumulative Cpu Usage in system mode
row[colCpuCumulativeUsageSystem] = stats.Cpu.Usage.System
// Cumulative Cpu Usage in user mode
row[colCpuCumulativeUsageUser] = stats.Cpu.Usage.User
// Memory Usage
row[colMemoryUsage] = stats.Memory.Usage
// Working set size
row[colMemoryWorkingSet] = stats.Memory.WorkingSet
// container page fault
row[colMemoryContainerPgfault] = stats.Memory.ContainerData.Pgfault
// container major page fault
row[colMemoryContainerPgmajfault] = stats.Memory.ContainerData.Pgmajfault
// hierarchical page fault
row[colMemoryHierarchicalPgfault] = stats.Memory.HierarchicalData.Pgfault
// hierarchical major page fault
row[colMemoryHierarchicalPgmajfault] = stats.Memory.HierarchicalData.Pgmajfault
// Network stats.
row[colRxBytes] = stats.Network.RxBytes
row[colRxErrors] = stats.Network.RxErrors
row[colTxBytes] = stats.Network.TxBytes
row[colTxErrors] = stats.Network.TxErrors
// TODO(jnagal): Handle per-cpu stats.
return
}
func (self *bigqueryStorage) containerFilesystemStatsToRows(
ref info.ContainerReference,
stats *info.ContainerStats,
) (rows []map[string]interface{}) {
for _, fsStat := range stats.Filesystem {
row := make(map[string]interface{}, 0)
row[colFsDevice] = fsStat.Device
row[colFsLimit] = fsStat.Limit
row[colFsUsage] = fsStat.Usage
rows = append(rows, row)
}
return rows
}
func (self *bigqueryStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
if stats == nil {
return nil
}
rows := make([]map[string]interface{}, 0)
rows = append(rows, self.containerStatsToRows(ref, stats))
rows = append(rows, self.containerFilesystemStatsToRows(ref, stats)...)
for _, row := range rows {
err := self.client.InsertRow(row)
if err != nil {
return err
}
}
return nil
}
func (self *bigqueryStorage) Close() error {
self.client.Close()
self.client = nil
return nil
}
// Create a new bigquery storage driver.
// machineName: A unique identifier to identify the host that current cAdvisor
// instance is running on.
// tableName: BigQuery table used for storing stats.
func newStorage(machineName, datasetId, tableName string) (storage.StorageDriver, error) {
bqClient, err := client.NewClient()
if err != nil {
return nil, err
}
err = bqClient.CreateDataset(datasetId)
if err != nil {
return nil, err
}
ret := &bigqueryStorage{
client: bqClient,
machineName: machineName,
}
schema := ret.GetSchema()
err = bqClient.CreateTable(tableName, schema)
if err != nil {
return nil, err
}
return ret, nil
}

View File

@@ -0,0 +1,233 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
import (
"flag"
"fmt"
"io/ioutil"
"strings"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
bigquery "google.golang.org/api/bigquery/v2"
)
var (
// TODO(jnagal): Condense all flags to an identity file and a pem key file.
clientId = flag.String("bq_id", "", "Client ID")
clientSecret = flag.String("bq_secret", "notasecret", "Client Secret")
projectId = flag.String("bq_project_id", "", "Bigquery project ID")
serviceAccount = flag.String("bq_account", "", "Service account email")
pemFile = flag.String("bq_credentials_file", "", "Credential Key file (pem)")
)
const (
errAlreadyExists string = "Error 409: Already Exists"
)
type Client struct {
service *bigquery.Service
token *oauth2.Token
datasetId string
tableId string
}
// Helper method to create an authenticated connection.
func connect() (*oauth2.Token, *bigquery.Service, error) {
if *clientId == "" {
return nil, nil, fmt.Errorf("no client id specified")
}
if *serviceAccount == "" {
return nil, nil, fmt.Errorf("no service account specified")
}
if *projectId == "" {
return nil, nil, fmt.Errorf("no project id specified")
}
authScope := bigquery.BigqueryScope
if *pemFile == "" {
return nil, nil, fmt.Errorf("no credentials specified")
}
pemBytes, err := ioutil.ReadFile(*pemFile)
if err != nil {
return nil, nil, fmt.Errorf("could not access credential file %v - %v", pemFile, err)
}
jwtConfig := &jwt.Config{
Email: *serviceAccount,
Scopes: []string{authScope},
PrivateKey: pemBytes,
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
token, err := jwtConfig.TokenSource(oauth2.NoContext).Token()
if err != nil {
return nil, nil, err
}
if !token.Valid() {
return nil, nil, fmt.Errorf("invalid token for BigQuery oauth")
}
config := &oauth2.Config{
ClientID: *clientId,
ClientSecret: *clientSecret,
Scopes: []string{authScope},
Endpoint: oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
},
}
client := config.Client(oauth2.NoContext, token)
service, err := bigquery.New(client)
if err != nil {
fmt.Printf("Failed to create new service: %v\n", err)
return nil, nil, err
}
return token, service, nil
}
// Creates a new client instance with an authenticated connection to bigquery.
func NewClient() (*Client, error) {
token, service, err := connect()
if err != nil {
return nil, err
}
c := &Client{
token: token,
service: service,
}
return c, nil
}
func (c *Client) Close() error {
c.service = nil
return nil
}
// Helper method to return the bigquery service connection.
// Expired connection is refreshed.
func (c *Client) getService() (*bigquery.Service, error) {
if c.token == nil || c.service == nil {
return nil, fmt.Errorf("service not initialized")
}
// Refresh expired token.
if !c.token.Valid() {
token, service, err := connect()
if err != nil {
return nil, err
}
c.token = token
c.service = service
return service, nil
}
return c.service, nil
}
func (c *Client) PrintDatasets() error {
datasetList, err := c.service.Datasets.List(*projectId).Do()
if err != nil {
fmt.Printf("Failed to get list of datasets\n")
return err
} else {
fmt.Printf("Successfully retrieved datasets. Retrieved: %d\n", len(datasetList.Datasets))
}
for _, d := range datasetList.Datasets {
fmt.Printf("%s %s\n", d.Id, d.FriendlyName)
}
return nil
}
func (c *Client) CreateDataset(datasetId string) error {
if c.service == nil {
return fmt.Errorf("no service created")
}
_, err := c.service.Datasets.Insert(*projectId, &bigquery.Dataset{
DatasetReference: &bigquery.DatasetReference{
DatasetId: datasetId,
ProjectId: *projectId,
},
}).Do()
// TODO(jnagal): Do a Get() to verify dataset already exists.
if err != nil && !strings.Contains(err.Error(), errAlreadyExists) {
return err
}
c.datasetId = datasetId
return nil
}
// Create a table with provided table ID and schema.
// Schema is currently not updated if the table already exists.
func (c *Client) CreateTable(tableId string, schema *bigquery.TableSchema) error {
if c.service == nil || c.datasetId == "" {
return fmt.Errorf("no dataset created")
}
_, err := c.service.Tables.Get(*projectId, c.datasetId, tableId).Do()
if err != nil {
// Create a new table.
_, err := c.service.Tables.Insert(*projectId, c.datasetId, &bigquery.Table{
Schema: schema,
TableReference: &bigquery.TableReference{
DatasetId: c.datasetId,
ProjectId: *projectId,
TableId: tableId,
},
}).Do()
if err != nil {
return err
}
}
// TODO(jnagal): Update schema if it has changed. We can only extend existing schema.
c.tableId = tableId
return nil
}
// Add a row to the connected table.
func (c *Client) InsertRow(rowData map[string]interface{}) error {
service, _ := c.getService()
if service == nil || c.datasetId == "" || c.tableId == "" {
return fmt.Errorf("table not setup to add rows")
}
jsonRows := make(map[string]bigquery.JsonValue)
for key, value := range rowData {
jsonRows[key] = bigquery.JsonValue(value)
}
rows := []*bigquery.TableDataInsertAllRequestRows{
{
Json: jsonRows,
},
}
// TODO(jnagal): Batch insert requests.
insertRequest := &bigquery.TableDataInsertAllRequest{Rows: rows}
result, err := service.Tabledata.InsertAll(*projectId, c.datasetId, c.tableId, insertRequest).Do()
if err != nil {
return fmt.Errorf("error inserting row: %v", err)
}
if len(result.InsertErrors) > 0 {
errstr := fmt.Sprintf("Insertion for %d rows failed\n", len(result.InsertErrors))
for _, errors := range result.InsertErrors {
for _, errorproto := range errors.Errors {
errstr += fmt.Sprintf("Error inserting row %d: %+v\n", errors.Index, errorproto)
}
}
return fmt.Errorf(errstr)
}
return nil
}

View File

@@ -0,0 +1,87 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"flag"
"fmt"
"time"
"github.com/SeanDolphin/bqschema"
"github.com/google/cadvisor/storage/bigquery/client"
)
type container struct {
Name string `json:"name"`
CpuUsage uint64 `json:"cpuusage,omitempty"`
MemoryUsage uint64 `json:"memoryusage,omitempty"`
NetworkUsage uint64 `json:"networkusage,omitempty"`
Timestamp time.Time `json:"timestamp"`
}
func main() {
flag.Parse()
c, err := client.NewClient()
if err != nil {
fmt.Printf("Failed to connect to bigquery\n")
panic(err)
}
c.PrintDatasets()
// Create a new dataset.
err = c.CreateDataset("sampledataset")
if err != nil {
fmt.Printf("Failed to create dataset %v\n", err)
panic(err)
}
// Create a new table
containerData := container{
Name: "test_container",
CpuUsage: 123456,
MemoryUsage: 1024,
NetworkUsage: 9046,
Timestamp: time.Now(),
}
schema, err := bqschema.ToSchema(containerData)
if err != nil {
fmt.Printf("Failed to create schema")
panic(err)
}
err = c.CreateTable("sampletable", schema)
if err != nil {
fmt.Printf("Failed to create table")
panic(err)
}
// Add Data
m := make(map[string]interface{})
t := time.Now()
for i := 0; i < 10; i++ {
m["Name"] = containerData.Name
m["CpuUsage"] = containerData.CpuUsage + uint64(i*100)
m["MemoryUsage"] = containerData.MemoryUsage - uint64(i*10)
m["NetworkUsage"] = containerData.NetworkUsage + uint64(i*10)
m["Timestamp"] = t.Add(time.Duration(i) * time.Second)
err = c.InsertRow(m)
if err != nil {
fmt.Printf("Failed to insert row")
panic(err)
}
}
}