Merge github.com:GoogleCloudPlatform/kubernetes

This commit is contained in:
Avinash Sridharan
2015-07-31 22:09:47 -07:00
374 changed files with 11890 additions and 2620 deletions

88
Godeps/Godeps.json generated
View File

@@ -34,6 +34,10 @@
"Comment": "release-96", "Comment": "release-96",
"Rev": "98c78185197025f935947caac56a7b6d022f89d2" "Rev": "98c78185197025f935947caac56a7b6d022f89d2"
}, },
{
"ImportPath": "github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata",
"Rev": "e34a32f9b0ecbc0784865fb2d47f3818c09521d4"
},
{ {
"ImportPath": "github.com/Sirupsen/logrus", "ImportPath": "github.com/Sirupsen/logrus",
"Comment": "v0.6.2-10-g51fe59a", "Comment": "v0.6.2-10-g51fe59a",
@@ -246,93 +250,93 @@
}, },
{ {
"ImportPath": "github.com/google/cadvisor/api", "ImportPath": "github.com/google/cadvisor/api",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/cache/memory", "ImportPath": "github.com/google/cadvisor/cache/memory",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/collector", "ImportPath": "github.com/google/cadvisor/collector",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/container", "ImportPath": "github.com/google/cadvisor/container",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/events", "ImportPath": "github.com/google/cadvisor/events",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/fs", "ImportPath": "github.com/google/cadvisor/fs",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/healthz", "ImportPath": "github.com/google/cadvisor/healthz",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/http", "ImportPath": "github.com/google/cadvisor/http",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/info/v1", "ImportPath": "github.com/google/cadvisor/info/v1",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/info/v2", "ImportPath": "github.com/google/cadvisor/info/v2",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/manager", "ImportPath": "github.com/google/cadvisor/manager",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/metrics", "ImportPath": "github.com/google/cadvisor/metrics",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/pages", "ImportPath": "github.com/google/cadvisor/pages",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/storage", "ImportPath": "github.com/google/cadvisor/storage",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/summary", "ImportPath": "github.com/google/cadvisor/summary",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/utils", "ImportPath": "github.com/google/cadvisor/utils",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/validate", "ImportPath": "github.com/google/cadvisor/validate",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/cadvisor/version", "ImportPath": "github.com/google/cadvisor/version",
"Comment": "0.15.1", "Comment": "0.16.0-51-g78419de",
"Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43"
}, },
{ {
"ImportPath": "github.com/google/go-github/github", "ImportPath": "github.com/google/go-github/github",
@@ -425,10 +429,10 @@
"ImportPath": "github.com/mitchellh/mapstructure", "ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf" "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
}, },
{ {
"ImportPath": "github.com/mxk/go-flowrate/flowrate", "ImportPath": "github.com/mxk/go-flowrate/flowrate",
"Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo", "ImportPath": "github.com/onsi/ginkgo",
"Comment": "v1.2.0-6-gd981d36", "Comment": "v1.2.0-6-gd981d36",
@@ -573,6 +577,10 @@
"ImportPath": "gopkg.in/yaml.v2", "ImportPath": "gopkg.in/yaml.v2",
"Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4" "Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4"
}, },
{
"ImportPath": "gopkg.in/natefinch/lumberjack.v2/",
"Rev": "20b71e5b60d756d3d2f80def009790325acc2b23"
},
{ {
"ImportPath": "speter.net/go/exp/math/dec/inf", "ImportPath": "speter.net/go/exp/math/dec/inf",
"Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7" "Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7"

View File

@@ -0,0 +1,279 @@
// Copyright 2014 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"strings"
"sync"
"time"
"google.golang.org/cloud/internal"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var metaClient = &http.Client{
Transport: &internal.Transport{
Base: &http.Transport{
Dial: (&net.Dialer{
Timeout: 750 * time.Millisecond,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 750 * time.Millisecond,
},
},
}
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv("GCE_METADATA_HOST")
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = "169.254.169.254"
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
res, err := metaClient.Do(req)
if err != nil {
return "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(all), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
} else {
v, err = Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var onGCE struct {
sync.Mutex
set bool
v bool
}
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
defer onGCE.Unlock()
onGCE.Lock()
if onGCE.set {
return onGCE.v
}
onGCE.set = true
// We use the DNS name of the metadata service here instead of the IP address
// because we expect that to fail faster in the not-on-GCE case.
res, err := metaClient.Get("http://metadata.google.internal")
if err != nil {
return false
}
onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
return onGCE.v
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}

View File

@@ -39,6 +39,7 @@ const (
attributesApi = "attributes" attributesApi = "attributes"
versionApi = "version" versionApi = "version"
psApi = "ps" psApi = "ps"
customMetricsApi = "appmetrics"
) )
// Interface for a cAdvisor API version // Interface for a cAdvisor API version
@@ -305,7 +306,7 @@ func (self *version2_0) Version() string {
} }
func (self *version2_0) SupportedRequestTypes() []string { func (self *version2_0) SupportedRequestTypes() []string {
return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi, psApi} return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi, psApi, customMetricsApi}
} }
func (self *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { func (self *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
@@ -364,6 +365,32 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
contStats[name] = convertStats(cont) contStats[name] = convertStats(cont)
} }
return writeResult(contStats, w) return writeResult(contStats, w)
case customMetricsApi:
containerName := getContainerName(request)
glog.V(4).Infof("Api - Custom Metrics: Looking for metrics for container %q, options %+v", containerName, opt)
conts, err := m.GetRequestedContainersInfo(containerName, opt)
if err != nil {
return err
}
specs, err := m.GetContainerSpec(containerName, opt)
if err != nil {
return err
}
contMetrics := make(map[string]map[string][]info.MetricVal, 0)
for _, cont := range conts {
metrics := map[string][]info.MetricVal{}
contStats := convertStats(cont)
spec := specs[cont.Name]
for _, contStat := range contStats {
for _, ms := range spec.CustomMetrics {
if contStat.HasCustomMetrics && !contStat.CustomMetrics[ms.Name].Timestamp.IsZero() {
metrics[ms.Name] = append(metrics[ms.Name], contStat.CustomMetrics[ms.Name])
}
}
}
contMetrics[containerName] = metrics
}
return writeResult(contMetrics, w)
case specApi: case specApi:
containerName := getContainerName(request) containerName := getContainerName(request)
glog.V(4).Infof("Api - Spec for container %q, options %+v", containerName, opt) glog.V(4).Infof("Api - Spec for container %q, options %+v", containerName, opt)
@@ -412,12 +439,13 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
stats := []v2.ContainerStats{} stats := []v2.ContainerStats{}
for _, val := range cont.Stats { for _, val := range cont.Stats {
stat := v2.ContainerStats{ stat := v2.ContainerStats{
Timestamp: val.Timestamp, Timestamp: val.Timestamp,
HasCpu: cont.Spec.HasCpu, HasCpu: cont.Spec.HasCpu,
HasMemory: cont.Spec.HasMemory, HasMemory: cont.Spec.HasMemory,
HasNetwork: cont.Spec.HasNetwork, HasNetwork: cont.Spec.HasNetwork,
HasFilesystem: cont.Spec.HasFilesystem, HasFilesystem: cont.Spec.HasFilesystem,
HasDiskIo: cont.Spec.HasDiskIo, HasDiskIo: cont.Spec.HasDiskIo,
HasCustomMetrics: cont.Spec.HasCustomMetrics,
} }
if stat.HasCpu { if stat.HasCpu {
stat.Cpu = val.Cpu stat.Cpu = val.Cpu
@@ -434,6 +462,9 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
if stat.HasDiskIo { if stat.HasDiskIo {
stat.DiskIo = val.DiskIo stat.DiskIo = val.DiskIo
} }
if stat.HasCustomMetrics {
stat.CustomMetrics = val.CustomMetrics
}
// TODO(rjnagal): Handle load stats. // TODO(rjnagal): Handle load stats.
stats = append(stats, stat) stats = append(stats, stat)
} }

View File

@@ -19,14 +19,15 @@ import (
"strings" "strings"
"time" "time"
"github.com/google/cadvisor/info/v2" "github.com/google/cadvisor/info/v1"
) )
type collectorManager struct { const metricLabelPrefix = "io.cadvisor.metric."
collectors []*collectorData
}
var _ CollectorManager = &collectorManager{} type GenericCollectorManager struct {
Collectors []*collectorData
NextCollectionTime time.Time
}
type collectorData struct { type collectorData struct {
collector Collector collector Collector
@@ -35,33 +36,54 @@ type collectorData struct {
// Returns a new CollectorManager that is thread-compatible. // Returns a new CollectorManager that is thread-compatible.
func NewCollectorManager() (CollectorManager, error) { func NewCollectorManager() (CollectorManager, error) {
return &collectorManager{ return &GenericCollectorManager{
collectors: []*collectorData{}, Collectors: []*collectorData{},
NextCollectionTime: time.Now(),
}, nil }, nil
} }
func (cm *collectorManager) RegisterCollector(collector Collector) error { func GetCollectorConfigs(labels map[string]string) map[string]string {
cm.collectors = append(cm.collectors, &collectorData{ configs := map[string]string{}
for k, v := range labels {
if strings.HasPrefix(k, metricLabelPrefix) {
name := strings.TrimPrefix(k, metricLabelPrefix)
configs[name] = v
}
}
return configs
}
func (cm *GenericCollectorManager) RegisterCollector(collector Collector) error {
cm.Collectors = append(cm.Collectors, &collectorData{
collector: collector, collector: collector,
nextCollectionTime: time.Now(), nextCollectionTime: time.Now(),
}) })
return nil return nil
} }
func (cm *collectorManager) Collect() (time.Time, []v2.Metric, error) { func (cm *GenericCollectorManager) GetSpec() ([]v1.MetricSpec, error) {
metricSpec := []v1.MetricSpec{}
for _, c := range cm.Collectors {
specs := c.collector.GetSpec()
metricSpec = append(metricSpec, specs...)
}
return metricSpec, nil
}
func (cm *GenericCollectorManager) Collect() (time.Time, map[string]v1.MetricVal, error) {
var errors []error var errors []error
// Collect from all collectors that are ready. // Collect from all collectors that are ready.
var next time.Time var next time.Time
var metrics []v2.Metric metrics := map[string]v1.MetricVal{}
for _, c := range cm.collectors { for _, c := range cm.Collectors {
if c.nextCollectionTime.Before(time.Now()) { if c.nextCollectionTime.Before(time.Now()) {
nextCollection, newMetrics, err := c.collector.Collect() var err error
c.nextCollectionTime, metrics, err = c.collector.Collect(metrics)
if err != nil { if err != nil {
errors = append(errors, err) errors = append(errors, err)
} }
metrics = append(metrics, newMetrics...)
c.nextCollectionTime = nextCollection
} }
// Keep track of the next collector that will be ready. // Keep track of the next collector that will be ready.
@@ -69,7 +91,7 @@ func (cm *collectorManager) Collect() (time.Time, []v2.Metric, error) {
next = c.nextCollectionTime next = c.nextCollectionTime
} }
} }
cm.NextCollectionTime = next
return next, metrics, compileErrors(errors) return next, metrics, compileErrors(errors)
} }

View File

@@ -18,7 +18,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/google/cadvisor/info/v2" "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -28,17 +28,21 @@ type fakeCollector struct {
collectedFrom int collectedFrom int
} }
func (fc *fakeCollector) Collect() (time.Time, []v2.Metric, error) { func (fc *fakeCollector) Collect(metric map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error) {
fc.collectedFrom++ fc.collectedFrom++
return fc.nextCollectionTime, []v2.Metric{}, fc.err return fc.nextCollectionTime, metric, fc.err
} }
func (fc *fakeCollector) Name() string { func (fc *fakeCollector) Name() string {
return "fake-collector" return "fake-collector"
} }
func (fc *fakeCollector) GetSpec() []v1.MetricSpec {
return []v1.MetricSpec{}
}
func TestCollect(t *testing.T) { func TestCollect(t *testing.T) {
cm := &collectorManager{} cm := &GenericCollectorManager{}
firstTime := time.Now().Add(-time.Hour) firstTime := time.Now().Add(-time.Hour)
secondTime := time.Now().Add(time.Hour) secondTime := time.Now().Add(time.Hour)

View File

@@ -0,0 +1,50 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"github.com/google/cadvisor/info/v1"
"time"
)
type Config struct {
//the endpoint to hit to scrape metrics
Endpoint string `json:"endpoint"`
//holds information about different metrics that can be collected
MetricsConfig []MetricConfig `json:"metrics_config"`
}
// metricConfig holds information extracted from the config file about a metric
type MetricConfig struct {
//the name of the metric
Name string `json:"name"`
//enum type for the metric type
MetricType v1.MetricType `json:"metric_type"`
// metric units to display on UI and in storage (eg: MB, cores)
// this is only used for display.
Units string `json:"units"`
//data type of the metric (eg: int, float)
DataType v1.DataType `json:"data_type"`
//the frequency at which the metric should be collected
PollingFrequency time.Duration `json:"polling_frequency"`
//the regular expression that can be used to extract the metric
Regex string `json:"regex"`
}

View File

@@ -0,0 +1,34 @@
{
"endpoint" : "http://localhost:8000/nginx_status",
"metrics_config" : [
{ "name" : "activeConnections",
"metric_type" : "gauge",
"units" : "number of active connections",
"data_type" : "int",
"polling_frequency" : 10,
"regex" : "Active connections: ([0-9]+)"
},
{ "name" : "reading",
"metric_type" : "gauge",
"units" : "number of reading connections",
"data_type" : "int",
"polling_frequency" : 10,
"regex" : "Reading: ([0-9]+) .*"
},
{ "name" : "writing",
"metric_type" : "gauge",
"data_type" : "int",
"units" : "number of writing connections",
"polling_frequency" : 10,
"regex" : ".*Writing: ([0-9]+).*"
},
{ "name" : "waiting",
"metric_type" : "gauge",
"units" : "number of waiting connections",
"data_type" : "int",
"polling_frequency" : 10,
"regex" : ".*Waiting: ([0-9]+)"
}
]
}

View File

@@ -17,7 +17,7 @@ package collector
import ( import (
"time" "time"
"github.com/google/cadvisor/info/v2" "github.com/google/cadvisor/info/v1"
) )
type FakeCollectorManager struct { type FakeCollectorManager struct {
@@ -27,7 +27,11 @@ func (fkm *FakeCollectorManager) RegisterCollector(collector Collector) error {
return nil return nil
} }
func (fkm *FakeCollectorManager) Collect() (time.Time, []v2.Metric, error) { func (fkm *FakeCollectorManager) GetSpec() ([]v1.MetricSpec, error) {
var zero time.Time return []v1.MetricSpec{}, nil
return zero, []v2.Metric{}, nil }
func (fkm *FakeCollectorManager) Collect(metric map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error) {
var zero time.Time
return zero, metric, nil
} }

View File

@@ -0,0 +1,165 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"github.com/google/cadvisor/info/v1"
)
type GenericCollector struct {
//name of the collector
name string
//holds information extracted from the config file for a collector
configFile Config
//holds information necessary to extract metrics
info *collectorInfo
}
type collectorInfo struct {
//minimum polling frequency among all metrics
minPollingFrequency time.Duration
//regular expresssions for all metrics
regexps []*regexp.Regexp
}
//Returns a new collector using the information extracted from the configfile
func NewCollector(collectorName string, configFile []byte) (*GenericCollector, error) {
var configInJSON Config
err := json.Unmarshal(configFile, &configInJSON)
if err != nil {
return nil, err
}
//TODO : Add checks for validity of config file (eg : Accurate JSON fields)
if len(configInJSON.MetricsConfig) == 0 {
return nil, fmt.Errorf("No metrics provided in config")
}
minPollFrequency := time.Duration(0)
regexprs := make([]*regexp.Regexp, len(configInJSON.MetricsConfig))
for ind, metricConfig := range configInJSON.MetricsConfig {
// Find the minimum specified polling frequency in metric config.
if metricConfig.PollingFrequency != 0 {
if minPollFrequency == 0 || metricConfig.PollingFrequency < minPollFrequency {
minPollFrequency = metricConfig.PollingFrequency
}
}
regexprs[ind], err = regexp.Compile(metricConfig.Regex)
if err != nil {
return nil, fmt.Errorf("Invalid regexp %v for metric %v", metricConfig.Regex, metricConfig.Name)
}
}
// Minimum supported polling frequency is 1s.
minSupportedFrequency := 1 * time.Second
if minPollFrequency < minSupportedFrequency {
minPollFrequency = minSupportedFrequency
}
return &GenericCollector{
name: collectorName,
configFile: configInJSON,
info: &collectorInfo{
minPollingFrequency: minPollFrequency,
regexps: regexprs},
}, nil
}
//Returns name of the collector
func (collector *GenericCollector) Name() string {
return collector.name
}
func (collector *GenericCollector) configToSpec(config MetricConfig) v1.MetricSpec {
return v1.MetricSpec{
Name: config.Name,
Type: config.MetricType,
Format: config.DataType,
Units: config.Units,
}
}
func (collector *GenericCollector) GetSpec() []v1.MetricSpec {
specs := []v1.MetricSpec{}
for _, metricConfig := range collector.configFile.MetricsConfig {
spec := collector.configToSpec(metricConfig)
specs = append(specs, spec)
}
return specs
}
//Returns collected metrics and the next collection time of the collector
func (collector *GenericCollector) Collect(metrics map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error) {
currentTime := time.Now()
nextCollectionTime := currentTime.Add(time.Duration(collector.info.minPollingFrequency))
uri := collector.configFile.Endpoint
response, err := http.Get(uri)
if err != nil {
return nextCollectionTime, nil, err
}
defer response.Body.Close()
pageContent, err := ioutil.ReadAll(response.Body)
if err != nil {
return nextCollectionTime, nil, err
}
var errorSlice []error
for ind, metricConfig := range collector.configFile.MetricsConfig {
matchString := collector.info.regexps[ind].FindStringSubmatch(string(pageContent))
if matchString != nil {
if metricConfig.DataType == v1.FloatType {
regVal, err := strconv.ParseFloat(strings.TrimSpace(matchString[1]), 64)
if err != nil {
errorSlice = append(errorSlice, err)
}
metrics[metricConfig.Name] = v1.MetricVal{
FloatValue: regVal, Timestamp: currentTime,
}
} else if metricConfig.DataType == v1.IntType {
regVal, err := strconv.ParseInt(strings.TrimSpace(matchString[1]), 10, 64)
if err != nil {
errorSlice = append(errorSlice, err)
}
metrics[metricConfig.Name] = v1.MetricVal{
IntValue: regVal, Timestamp: currentTime,
}
} else {
errorSlice = append(errorSlice, fmt.Errorf("Unexpected value of 'data_type' for metric '%v' in config ", metricConfig.Name))
}
} else {
errorSlice = append(errorSlice, fmt.Errorf("No match found for regexp: %v for metric '%v' in config", metricConfig.Regex, metricConfig.Name))
}
}
return nextCollectionTime, metrics, compileErrors(errorSlice)
}

View File

@@ -0,0 +1,167 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
)
func TestEmptyConfig(t *testing.T) {
assert := assert.New(t)
emptyConfig := `
{
"endpoint" : "http://localhost:8000/nginx_status",
"metrics_config" : [
]
}
`
//Create a temporary config file 'temp.json' with invalid json format
assert.NoError(ioutil.WriteFile("temp.json", []byte(emptyConfig), 0777))
configFile, err := ioutil.ReadFile("temp.json")
assert.NoError(err)
_, err = NewCollector("tempCollector", configFile)
assert.Error(err)
assert.NoError(os.Remove("temp.json"))
}
func TestConfigWithErrors(t *testing.T) {
assert := assert.New(t)
//Syntax error: Missed '"' after activeConnections
invalid := `
{
"endpoint" : "http://localhost:8000/nginx_status",
"metrics_config" : [
{
"name" : "activeConnections,
"metric_type" : "gauge",
"data_type" : "int",
"polling_frequency" : 10,
"regex" : "Active connections: ([0-9]+)"
}
]
}
`
//Create a temporary config file 'temp.json' with invalid json format
assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777))
configFile, err := ioutil.ReadFile("temp.json")
assert.NoError(err)
_, err = NewCollector("tempCollector", configFile)
assert.Error(err)
assert.NoError(os.Remove("temp.json"))
}
func TestConfigWithRegexErrors(t *testing.T) {
assert := assert.New(t)
//Error: Missed operand for '+' in activeConnections regex
invalid := `
{
"endpoint" : "host:port/nginx_status",
"metrics_config" : [
{
"name" : "activeConnections",
"metric_type" : "gauge",
"data_type" : "int",
"polling_frequency" : 10,
"regex" : "Active connections: (+)"
},
{
"name" : "reading",
"metric_type" : "gauge",
"data_type" : "int",
"polling_frequency" : 10,
"regex" : "Reading: ([0-9]+) .*"
}
]
}
`
//Create a temporary config file 'temp.json'
assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777))
configFile, err := ioutil.ReadFile("temp.json")
assert.NoError(err)
_, err = NewCollector("tempCollector", configFile)
assert.Error(err)
assert.NoError(os.Remove("temp.json"))
}
func TestConfig(t *testing.T) {
assert := assert.New(t)
//Create an nginx collector using the config file 'sample_config.json'
configFile, err := ioutil.ReadFile("config/sample_config.json")
assert.NoError(err)
collector, err := NewCollector("nginx", configFile)
assert.NoError(err)
assert.Equal(collector.name, "nginx")
assert.Equal(collector.configFile.Endpoint, "http://localhost:8000/nginx_status")
assert.Equal(collector.configFile.MetricsConfig[0].Name, "activeConnections")
}
func TestMetricCollection(t *testing.T) {
assert := assert.New(t)
//Collect nginx metrics from a fake nginx endpoint
configFile, err := ioutil.ReadFile("config/sample_config.json")
assert.NoError(err)
fakeCollector, err := NewCollector("nginx", configFile)
assert.NoError(err)
tempServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Active connections: 3\nserver accepts handled requests")
fmt.Fprintln(w, "5 5 32\nReading: 0 Writing: 1 Waiting: 2")
}))
defer tempServer.Close()
fakeCollector.configFile.Endpoint = tempServer.URL
metrics := map[string]v1.MetricVal{}
_, metrics, errMetric := fakeCollector.Collect(metrics)
assert.NoError(errMetric)
metricNames := []string{"activeConnections", "reading", "writing", "waiting"}
// activeConnections = 3
assert.Equal(metrics[metricNames[0]].IntValue, 3)
assert.Equal(metrics[metricNames[0]].FloatValue, 0)
// reading = 0
assert.Equal(metrics[metricNames[1]].IntValue, 0)
assert.Equal(metrics[metricNames[1]].FloatValue, 0)
// writing = 1
assert.Equal(metrics[metricNames[2]].IntValue, 1)
assert.Equal(metrics[metricNames[2]].FloatValue, 0)
// waiting = 2
assert.Equal(metrics[metricNames[3]].IntValue, 2)
assert.Equal(metrics[metricNames[3]].FloatValue, 0)
}

View File

@@ -15,7 +15,7 @@
package collector package collector
import ( import (
"github.com/google/cadvisor/info/v2" "github.com/google/cadvisor/info/v1"
"time" "time"
) )
@@ -27,7 +27,10 @@ type Collector interface {
// Returns the next time this collector should be collected from. // Returns the next time this collector should be collected from.
// Next collection time is always returned, even when an error occurs. // Next collection time is always returned, even when an error occurs.
// A collection time of zero means no more collection. // A collection time of zero means no more collection.
Collect() (time.Time, []v2.Metric, error) Collect(map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error)
// Return spec for all metrics associated with this collector
GetSpec() []v1.MetricSpec
// Name of this collector. // Name of this collector.
Name() string Name() string
@@ -42,5 +45,8 @@ type CollectorManager interface {
// at which a collector will be ready to collect from. // at which a collector will be ready to collect from.
// Next collection time is always returned, even when an error occurs. // Next collection time is always returned, even when an error occurs.
// A collection time of zero means no more collection. // A collection time of zero means no more collection.
Collect() (time.Time, []v2.Metric, error) Collect() (time.Time, map[string]v1.MetricVal, error)
// Get metric spec from all registered collectors.
GetSpec() ([]v1.MetricSpec, error)
} }

View File

@@ -73,6 +73,9 @@ type ContainerHandler interface {
// Returns absolute cgroup path for the requested resource. // Returns absolute cgroup path for the requested resource.
GetCgroupPath(resource string) (string, error) GetCgroupPath(resource string) (string, error)
// Returns container labels, if available.
GetContainerLabels() map[string]string
// Returns whether the container still exists. // Returns whether the container still exists.
Exists() bool Exists() bool
} }

View File

@@ -167,7 +167,7 @@ func libcontainerConfigToContainerSpec(config *libcontainerConfigs.Config, mi *i
} }
spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores) spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores)
spec.HasNetwork = true spec.HasNetwork = len(config.Networks) > 0
spec.HasDiskIo = true spec.HasDiskIo = true
return spec return spec
@@ -276,7 +276,7 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {
} }
func convertInterfaceStats(stats *info.InterfaceStats) { func convertInterfaceStats(stats *info.InterfaceStats) {
net := stats net := *stats
// Ingress for host veth is from the container. // Ingress for host veth is from the container.
// Hence tx_bytes stat on the host veth is actually number of bytes received by the container. // Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
@@ -332,6 +332,10 @@ func (self *dockerContainerHandler) ListThreads(listType container.ListType) ([]
return nil, nil return nil, nil
} }
func (self *dockerContainerHandler) GetContainerLabels() map[string]string {
return self.labels
}
func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) { func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {
return containerLibcontainer.GetProcesses(self.cgroupManager) return containerLibcontainer.GetProcesses(self.cgroupManager)
} }

View File

@@ -93,7 +93,7 @@ func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.
} }
stats.Network.Interfaces[i] = interfaceStats stats.Network.Interfaces[i] = interfaceStats
} }
// For backwards compatability. // For backwards compatibility.
if len(networkInterfaces) > 0 { if len(networkInterfaces) > 0 {
stats.Network.InterfaceStats = stats.Network.Interfaces[0] stats.Network.InterfaceStats = stats.Network.Interfaces[0]
} }
@@ -233,7 +233,7 @@ func toContainerStats3(libcontainerStats *libcontainer.Stats, ret *info.Containe
} }
} }
// Add to base struct for backwards compatability. // Add to base struct for backwards compatibility.
if len(ret.Network.Interfaces) > 0 { if len(ret.Network.Interfaces) > 0 {
ret.Network.InterfaceStats = ret.Network.Interfaces[0] ret.Network.InterfaceStats = ret.Network.Interfaces[0]
} }

View File

@@ -95,6 +95,11 @@ func (self *MockContainerHandler) GetCgroupPath(path string) (string, error) {
return args.Get(0).(string), args.Error(1) return args.Get(0).(string), args.Error(1)
} }
func (self *MockContainerHandler) GetContainerLabels() map[string]string {
args := self.Called()
return args.Get(0).(map[string]string)
}
type FactoryForMockContainerHandler struct { type FactoryForMockContainerHandler struct {
Name string Name string
PrepareContainerHandlerFunc func(name string, handler *MockContainerHandler) PrepareContainerHandlerFunc func(name string, handler *MockContainerHandler)

View File

@@ -33,6 +33,7 @@ import (
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils" "github.com/google/cadvisor/utils"
"github.com/google/cadvisor/utils/machine"
"golang.org/x/exp/inotify" "golang.org/x/exp/inotify"
) )
@@ -210,13 +211,33 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) {
} }
} }
// Memory. // Memory
memoryRoot, ok := self.cgroupPaths["memory"] if self.name == "/" {
if ok { // Get memory and swap limits of the running machine
if utils.FileExists(memoryRoot) { memLimit, err := machine.GetMachineMemoryCapacity()
if err != nil {
glog.Warningf("failed to obtain memory limit for machine container")
spec.HasMemory = false
} else {
spec.Memory.Limit = uint64(memLimit)
// Spec is marked to have memory only if the memory limit is set
spec.HasMemory = true spec.HasMemory = true
spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes") }
spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes")
swapLimit, err := machine.GetMachineSwapCapacity()
if err != nil {
glog.Warningf("failed to obtain swap limit for machine container")
} else {
spec.Memory.SwapLimit = uint64(swapLimit)
}
} else {
memoryRoot, ok := self.cgroupPaths["memory"]
if ok {
if utils.FileExists(memoryRoot) {
spec.HasMemory = true
spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes")
spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes")
}
} }
} }
@@ -335,6 +356,10 @@ func (self *rawContainerHandler) GetCgroupPath(resource string) (string, error)
return path, nil return path, nil
} }
func (self *rawContainerHandler) GetContainerLabels() map[string]string {
return map[string]string{}
}
// Lists all directories under "path" and outputs the results as children of "parent". // Lists all directories under "path" and outputs the results as children of "parent".
func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error { func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error {
// Ignore if this hierarchy does not exist. // Ignore if this hierarchy does not exist.

View File

@@ -58,6 +58,9 @@ type ContainerSpec struct {
// HasDiskIo when true, indicates that DiskIo stats will be available. // HasDiskIo when true, indicates that DiskIo stats will be available.
HasDiskIo bool `json:"has_diskio"` HasDiskIo bool `json:"has_diskio"`
HasCustomMetrics bool `json:"has_custom_metrics"`
CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"`
} }
// Container reference contains enough information to uniquely identify a container // Container reference contains enough information to uniquely identify a container
@@ -190,6 +193,9 @@ func (self *ContainerSpec) Eq(b *ContainerSpec) bool {
if self.HasDiskIo != b.HasDiskIo { if self.HasDiskIo != b.HasDiskIo {
return false return false
} }
if self.HasCustomMetrics != b.HasCustomMetrics {
return false
}
return true return true
} }
@@ -419,6 +425,9 @@ type ContainerStats struct {
// Task load stats // Task load stats
TaskStats LoadStats `json:"task_stats,omitempty"` TaskStats LoadStats `json:"task_stats,omitempty"`
//Custom metrics from all collectors
CustomMetrics map[string]MetricVal `json:"custom_metrics,omitempty"`
} }
func timeEq(t1, t2 time.Time, tolerance time.Duration) bool { func timeEq(t1, t2 time.Time, tolerance time.Duration) bool {

View File

@@ -112,6 +112,22 @@ type NetInfo struct {
Mtu int64 `json:"mtu"` Mtu int64 `json:"mtu"`
} }
type CloudProvider string
const (
GCE CloudProvider = "GCE"
AWS = "AWS"
Baremetal = "Baremetal"
UnkownProvider = "Unknown"
)
type InstanceType string
const (
NoInstance InstanceType = "None"
UnknownInstance = "Unknown"
)
type MachineInfo struct { type MachineInfo struct {
// The number of cores in this machine. // The number of cores in this machine.
NumCores int `json:"num_cores"` NumCores int `json:"num_cores"`
@@ -143,6 +159,12 @@ type MachineInfo struct {
// Machine Topology // Machine Topology
// Describes cpu/memory layout and hierarchy. // Describes cpu/memory layout and hierarchy.
Topology []Node `json:"topology"` Topology []Node `json:"topology"`
// Cloud provider the machine belongs to.
CloudProvider CloudProvider `json:"cloud_provider"`
// Type of cloud instance (e.g. GCE standard) the machine is.
InstanceType InstanceType `json:"instance_type"`
} }
type VersionInfo struct { type VersionInfo struct {

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package v2 package v1
import ( import (
"time" "time"
@@ -32,38 +32,35 @@ const (
MetricDelta = "delta" MetricDelta = "delta"
) )
// An exported metric. // DataType for metric being exported.
type Metric struct { type DataType string
const (
IntType DataType = "int"
FloatType = "float"
)
// Spec for custom metric.
type MetricSpec struct {
// The name of the metric. // The name of the metric.
Name string `json:"name"` Name string `json:"name"`
// Type of the metric. // Type of the metric.
Type MetricType `json:"type"` Type MetricType `json:"type"`
// Metadata associated with this metric. // Data Type for the stats.
Labels map[string]string Format DataType `json:"format"`
// Value of the metric. Only one of these values will be // Display Units for the stats.
// available according to the output type of the metric. Units string `json:"units"`
// If no values are available, there are no data points.
IntPoints []IntPoint `json:"int_points,omitempty"`
FloatPoints []FloatPoint `json:"float_points,omitempty"`
} }
// An integer metric data point. // An exported metric.
type IntPoint struct { type MetricVal struct {
// Time at which the metric was queried // Time at which the metric was queried
Timestamp time.Time `json:"timestamp"` Timestamp time.Time `json:"timestamp"`
// The value of the metric at this point. // The value of the metric at this point.
Value int64 `json:"value"` IntValue int64 `json:"int_value,omitempty"`
} FloatValue float64 `json:"float_value,omitempty"`
// A float metric data point.
type FloatPoint struct {
// Time at which the metric was queried
Timestamp time.Time `json:"timestamp"`
// The value of the metric at this point.
Value float64 `json:"value"`
} }

View File

@@ -73,6 +73,9 @@ type ContainerSpec struct {
HasMemory bool `json:"has_memory"` HasMemory bool `json:"has_memory"`
Memory MemorySpec `json:"memory,omitempty"` Memory MemorySpec `json:"memory,omitempty"`
HasCustomMetrics bool `json:"has_custom_metrics"`
CustomMetrics []v1.MetricSpec `json:"custom_metrics,omitempty"`
// Following resources have no associated spec, but are being isolated. // Following resources have no associated spec, but are being isolated.
HasNetwork bool `json:"has_network"` HasNetwork bool `json:"has_network"`
HasFilesystem bool `json:"has_filesystem"` HasFilesystem bool `json:"has_filesystem"`
@@ -100,6 +103,9 @@ type ContainerStats struct {
// Task load statistics // Task load statistics
HasLoad bool `json:"has_load"` HasLoad bool `json:"has_load"`
Load v1.LoadStats `json:"load_stats,omitempty"` Load v1.LoadStats `json:"load_stats,omitempty"`
// Custom Metrics
HasCustomMetrics bool `json:"has_custom_metrics"`
CustomMetrics map[string]v1.MetricVal `json:"custom_metrics,omitempty"`
} }
type Percentiles struct { type Percentiles struct {
@@ -110,8 +116,12 @@ type Percentiles struct {
Mean uint64 `json:"mean"` Mean uint64 `json:"mean"`
// Max seen over the collected sample. // Max seen over the collected sample.
Max uint64 `json:"max"` Max uint64 `json:"max"`
// 50th percentile over the collected sample.
Fifty uint64 `json:"fifty"`
// 90th percentile over the collected sample. // 90th percentile over the collected sample.
Ninety uint64 `json:"ninety"` Ninety uint64 `json:"ninety"`
// 95th percentile over the collected sample.
NinetyFive uint64 `json:"ninetyfive"`
} }
type Usage struct { type Usage struct {

View File

@@ -59,6 +59,12 @@ type Attributes struct {
// Machine Topology // Machine Topology
// Describes cpu/memory layout and hierarchy. // Describes cpu/memory layout and hierarchy.
Topology []v1.Node `json:"topology"` Topology []v1.Node `json:"topology"`
// Cloud provider the machine belongs to
CloudProvider v1.CloudProvider `json:"cloud_provider"`
// Type of cloud instance (e.g. GCE standard) the machine is.
InstanceType v1.InstanceType `json:"instance_type"`
} }
func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes { func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes {
@@ -76,5 +82,7 @@ func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes {
DiskMap: mi.DiskMap, DiskMap: mi.DiskMap,
NetworkDevices: mi.NetworkDevices, NetworkDevices: mi.NetworkDevices,
Topology: mi.Topology, Topology: mi.Topology,
CloudProvider: mi.CloudProvider,
InstanceType: mi.InstanceType,
} }
} }

View File

@@ -17,8 +17,10 @@ package manager
import ( import (
"flag" "flag"
"fmt" "fmt"
"io/ioutil"
"math" "math"
"os/exec" "os/exec"
"path"
"regexp" "regexp"
"sort" "sort"
"strconv" "strconv"
@@ -39,8 +41,6 @@ import (
// Housekeeping interval. // Housekeeping interval.
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings") var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")
var maxHousekeepingInterval = flag.Duration("max_housekeeping_interval", 60*time.Second, "Largest interval to allow between container housekeepings")
var allowDynamicHousekeeping = flag.Bool("allow_dynamic_housekeeping", true, "Whether to allow the housekeeping interval to be dynamic")
var cgroupPathRegExp = regexp.MustCompile(".*:devices:(.*?),.*") var cgroupPathRegExp = regexp.MustCompile(".*:devices:(.*?),.*")
@@ -54,16 +54,18 @@ type containerInfo struct {
} }
type containerData struct { type containerData struct {
handler container.ContainerHandler handler container.ContainerHandler
info containerInfo info containerInfo
memoryCache *memory.InMemoryCache memoryCache *memory.InMemoryCache
lock sync.Mutex lock sync.Mutex
loadReader cpuload.CpuLoadReader loadReader cpuload.CpuLoadReader
summaryReader *summary.StatsSummary summaryReader *summary.StatsSummary
loadAvg float64 // smoothed load average seen so far. loadAvg float64 // smoothed load average seen so far.
housekeepingInterval time.Duration housekeepingInterval time.Duration
lastUpdatedTime time.Time maxHousekeepingInterval time.Duration
lastErrorTime time.Time allowDynamicHousekeeping bool
lastUpdatedTime time.Time
lastErrorTime time.Time
// Whether to log the usage of this container when it is updated. // Whether to log the usage of this container when it is updated.
logUsage bool logUsage bool
@@ -136,11 +138,32 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) {
return string(matches[1]), nil return string(matches[1]), nil
} }
func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) { // Returns contents of a file inside the container root.
// report all processes for root. // Takes in a path relative to container root.
isRoot := c.info.Name == "/" func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte, error) {
// TODO(rjnagal): Take format as an option? pids, err := c.getContainerPids(inHostNamespace)
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup" if err != nil {
return nil, err
}
// TODO(rjnagal): Optimize by just reading container's cgroup.proc file when in host namespace.
rootfs := "/"
if !inHostNamespace {
rootfs = "/rootfs"
}
for _, pid := range pids {
filePath := path.Join(rootfs, "/proc", pid, "/root", filepath)
glog.V(3).Infof("Trying path %q", filePath)
data, err := ioutil.ReadFile(filePath)
if err == nil {
return data, err
}
}
// No process paths could be found. Declare config non-existent.
return nil, fmt.Errorf("file %q does not exist.", filepath)
}
// Return output for ps command in host /proc with specified format
func (c *containerData) getPsOutput(inHostNamespace bool, format string) ([]byte, error) {
args := []string{} args := []string{}
command := "ps" command := "ps"
if !inHostNamespace { if !inHostNamespace {
@@ -148,11 +171,53 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
args = append(args, "/rootfs", "ps") args = append(args, "/rootfs", "ps")
} }
args = append(args, "-e", "-o", format) args = append(args, "-e", "-o", format)
expectedFields := 12
out, err := exec.Command(command, args...).Output() out, err := exec.Command(command, args...).Output()
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to execute %q command: %v", command, err) return nil, fmt.Errorf("failed to execute %q command: %v", command, err)
} }
return out, err
}
// Get pids of processes in this container.
// A slightly lighterweight call than GetProcessList if other details are not required.
func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error) {
format := "pid,cgroup"
out, err := c.getPsOutput(inHostNamespace, format)
if err != nil {
return nil, err
}
expectedFields := 2
lines := strings.Split(string(out), "\n")
pids := []string{}
for _, line := range lines[1:] {
if len(line) == 0 {
continue
}
fields := strings.Fields(line)
if len(fields) < expectedFields {
return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line)
}
pid := fields[0]
cgroup, err := c.getCgroupPath(fields[1])
if err != nil {
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[1], err)
}
if c.info.Name == cgroup {
pids = append(pids, pid)
}
}
return pids, nil
}
func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) {
// report all processes for root.
isRoot := c.info.Name == "/"
format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup"
out, err := c.getPsOutput(inHostNamespace, format)
if err != nil {
return nil, err
}
expectedFields := 12
processes := []v2.ProcessInfo{} processes := []v2.ProcessInfo{}
lines := strings.Split(string(out), "\n") lines := strings.Split(string(out), "\n")
for _, line := range lines[1:] { for _, line := range lines[1:] {
@@ -183,13 +248,17 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid rss %q: %v", fields[6], err) return nil, fmt.Errorf("invalid rss %q: %v", fields[6], err)
} }
// convert to bytes
rss *= 1024
vs, err := strconv.ParseUint(fields[7], 0, 64) vs, err := strconv.ParseUint(fields[7], 0, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid virtual size %q: %v", fields[7], err) return nil, fmt.Errorf("invalid virtual size %q: %v", fields[7], err)
} }
// convert to bytes
vs *= 1024
cgroup, err := c.getCgroupPath(fields[11]) cgroup, err := c.getCgroupPath(fields[11])
if err != nil { if err != nil {
return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[10], err) return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[11], err)
} }
// Remove the ps command we just ran from cadvisor container. // Remove the ps command we just ran from cadvisor container.
// Not necessary, but makes the cadvisor page look cleaner. // Not necessary, but makes the cadvisor page look cleaner.
@@ -221,7 +290,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace
return processes, nil return processes, nil
} }
func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool, collectorManager collector.CollectorManager) (*containerData, error) { func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (*containerData, error) {
if memoryCache == nil { if memoryCache == nil {
return nil, fmt.Errorf("nil memory storage") return nil, fmt.Errorf("nil memory storage")
} }
@@ -234,14 +303,16 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
} }
cont := &containerData{ cont := &containerData{
handler: handler, handler: handler,
memoryCache: memoryCache, memoryCache: memoryCache,
housekeepingInterval: *HousekeepingInterval, housekeepingInterval: *HousekeepingInterval,
loadReader: loadReader, maxHousekeepingInterval: maxHousekeepingInterval,
logUsage: logUsage, allowDynamicHousekeeping: allowDynamicHousekeeping,
loadAvg: -1.0, // negative value indicates uninitialized. loadReader: loadReader,
stop: make(chan bool, 1), logUsage: logUsage,
collectorManager: collectorManager, loadAvg: -1.0, // negative value indicates uninitialized.
stop: make(chan bool, 1),
collectorManager: collectorManager,
} }
cont.info.ContainerReference = ref cont.info.ContainerReference = ref
@@ -260,7 +331,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h
// Determine when the next housekeeping should occur. // Determine when the next housekeeping should occur.
func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Time { func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Time {
if *allowDynamicHousekeeping { if self.allowDynamicHousekeeping {
var empty time.Time var empty time.Time
stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2) stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2)
if err != nil { if err != nil {
@@ -270,10 +341,10 @@ func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Tim
} else if len(stats) == 2 { } else if len(stats) == 2 {
// TODO(vishnuk): Use no processes as a signal. // TODO(vishnuk): Use no processes as a signal.
// Raise the interval if usage hasn't changed in the last housekeeping. // Raise the interval if usage hasn't changed in the last housekeeping.
if stats[0].StatsEq(stats[1]) && (self.housekeepingInterval < *maxHousekeepingInterval) { if stats[0].StatsEq(stats[1]) && (self.housekeepingInterval < self.maxHousekeepingInterval) {
self.housekeepingInterval *= 2 self.housekeepingInterval *= 2
if self.housekeepingInterval > *maxHousekeepingInterval { if self.housekeepingInterval > self.maxHousekeepingInterval {
self.housekeepingInterval = *maxHousekeepingInterval self.housekeepingInterval = self.maxHousekeepingInterval
} }
} else if self.housekeepingInterval != *HousekeepingInterval { } else if self.housekeepingInterval != *HousekeepingInterval {
// Lower interval back to the baseline. // Lower interval back to the baseline.
@@ -340,19 +411,7 @@ func (c *containerData) housekeeping() {
} }
} }
// TODO(vmarmol): Export metrics. next := c.nextHousekeeping(lastHousekeeping)
// Run custom collectors.
nextCollectionTime, _, err := c.collectorManager.Collect()
if err != nil && c.allowErrorLogging() {
glog.Warningf("[%s] Collection failed: %v", c.info.Name, err)
}
// Next housekeeping is the first of the stats or the custom collector's housekeeping.
nextHousekeeping := c.nextHousekeeping(lastHousekeeping)
next := nextHousekeeping
if !nextCollectionTime.IsZero() && nextCollectionTime.Before(nextHousekeeping) {
next = nextCollectionTime
}
// Schedule the next housekeeping. Sleep until that time. // Schedule the next housekeeping. Sleep until that time.
if time.Now().Before(next) { if time.Now().Before(next) {
@@ -380,6 +439,12 @@ func (c *containerData) updateSpec() error {
} }
return err return err
} }
customMetrics, err := c.collectorManager.GetSpec()
if len(customMetrics) > 0 {
spec.HasCustomMetrics = true
spec.CustomMetrics = customMetrics
}
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()
c.info.Spec = spec c.info.Spec = spec
@@ -432,6 +497,20 @@ func (c *containerData) updateStats() error {
glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err) glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err)
} }
} }
var customStatsErr error
cm := c.collectorManager.(*collector.GenericCollectorManager)
if len(cm.Collectors) > 0 {
if cm.NextCollectionTime.Before(time.Now()) {
customStats, err := c.updateCustomStats()
if customStats != nil {
stats.CustomMetrics = customStats
}
if err != nil {
customStatsErr = err
}
}
}
ref, err := c.handler.ContainerReference() ref, err := c.handler.ContainerReference()
if err != nil { if err != nil {
// Ignore errors if the container is dead. // Ignore errors if the container is dead.
@@ -444,7 +523,21 @@ func (c *containerData) updateStats() error {
if err != nil { if err != nil {
return err return err
} }
return statsErr if statsErr != nil {
return statsErr
}
return customStatsErr
}
func (c *containerData) updateCustomStats() (map[string]info.MetricVal, error) {
_, customStats, customStatsErr := c.collectorManager.Collect()
if customStatsErr != nil {
if !c.handler.Exists() {
return customStats, nil
}
customStatsErr = fmt.Errorf("%v, continuing to push custom stats", customStatsErr)
}
return customStats, customStatsErr
} }
func (c *containerData) updateSubcontainers() error { func (c *containerData) updateSubcontainers() error {

View File

@@ -41,7 +41,7 @@ func setupContainerData(t *testing.T, spec info.ContainerSpec) (*containerData,
nil, nil,
) )
memoryCache := memory.New(60, nil) memoryCache := memory.New(60, nil)
ret, err := newContainerData(containerName, memoryCache, mockHandler, nil, false, &collector.FakeCollectorManager{}) ret, err := newContainerData(containerName, memoryCache, mockHandler, nil, false, &collector.GenericCollectorManager{}, 60*time.Second, true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@@ -17,10 +17,7 @@ package manager
import ( import (
"bytes" "bytes"
"flag" "flag"
"fmt"
"io/ioutil" "io/ioutil"
"regexp"
"strconv"
"strings" "strings"
"syscall" "syscall"
@@ -29,193 +26,16 @@ import (
"github.com/google/cadvisor/container/docker" "github.com/google/cadvisor/container/docker"
"github.com/google/cadvisor/fs" "github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1" info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils" "github.com/google/cadvisor/utils/cloudinfo"
"github.com/google/cadvisor/utils/machine"
"github.com/google/cadvisor/utils/sysfs" "github.com/google/cadvisor/utils/sysfs"
"github.com/google/cadvisor/utils/sysinfo" "github.com/google/cadvisor/utils/sysinfo"
version "github.com/google/cadvisor/version" version "github.com/google/cadvisor/version"
) )
var cpuRegExp = regexp.MustCompile("processor\\t*: +([0-9]+)")
var coreRegExp = regexp.MustCompile("core id\\t*: +([0-9]+)")
var nodeRegExp = regexp.MustCompile("physical id\\t*: +([0-9]+)")
var CpuClockSpeedMHz = regexp.MustCompile("cpu MHz\\t*: +([0-9]+.[0-9]+)")
var memoryCapacityRegexp = regexp.MustCompile("MemTotal: *([0-9]+) kB")
var machineIdFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.") var machineIdFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.")
var bootIdFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.") var bootIdFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.")
func getClockSpeed(procInfo []byte) (uint64, error) {
// First look through sys to find a max supported cpu frequency.
const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
if utils.FileExists(maxFreqFile) {
val, err := ioutil.ReadFile(maxFreqFile)
if err != nil {
return 0, err
}
var maxFreq uint64
n, err := fmt.Sscanf(string(val), "%d", &maxFreq)
if err != nil || n != 1 {
return 0, fmt.Errorf("could not parse frequency %q", val)
}
return maxFreq, nil
}
// Fall back to /proc/cpuinfo
matches := CpuClockSpeedMHz.FindSubmatch(procInfo)
if len(matches) != 2 {
//Check if we are running on Power systems which have a different format
CpuClockSpeedMHz, _ = regexp.Compile("clock\\t*: +([0-9]+.[0-9]+)MHz")
matches = CpuClockSpeedMHz.FindSubmatch(procInfo)
if len(matches) != 2 {
return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo))
}
}
speed, err := strconv.ParseFloat(string(matches[1]), 64)
if err != nil {
return 0, err
}
// Convert to kHz
return uint64(speed * 1000), nil
}
func getMemoryCapacity(b []byte) (int64, error) {
matches := memoryCapacityRegexp.FindSubmatch(b)
if len(matches) != 2 {
return -1, fmt.Errorf("failed to find memory capacity in output: %q", string(b))
}
m, err := strconv.ParseInt(string(matches[1]), 10, 64)
if err != nil {
return -1, err
}
// Convert to bytes.
return m * 1024, err
}
func extractValue(s string, r *regexp.Regexp) (bool, int, error) {
matches := r.FindSubmatch([]byte(s))
if len(matches) == 2 {
val, err := strconv.ParseInt(string(matches[1]), 10, 32)
if err != nil {
return true, -1, err
}
return true, int(val), nil
}
return false, -1, nil
}
func findNode(nodes []info.Node, id int) (bool, int) {
for i, n := range nodes {
if n.Id == id {
return true, i
}
}
return false, -1
}
func addNode(nodes *[]info.Node, id int) (int, error) {
var idx int
if id == -1 {
// Some VMs don't fill topology data. Export single package.
id = 0
}
ok, idx := findNode(*nodes, id)
if !ok {
// New node
node := info.Node{Id: id}
// Add per-node memory information.
meminfo := fmt.Sprintf("/sys/devices/system/node/node%d/meminfo", id)
out, err := ioutil.ReadFile(meminfo)
// Ignore if per-node info is not available.
if err == nil {
m, err := getMemoryCapacity(out)
if err != nil {
return -1, err
}
node.Memory = uint64(m)
}
*nodes = append(*nodes, node)
idx = len(*nodes) - 1
}
return idx, nil
}
func getTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
nodes := []info.Node{}
numCores := 0
lastThread := -1
lastCore := -1
lastNode := -1
for _, line := range strings.Split(cpuinfo, "\n") {
ok, val, err := extractValue(line, cpuRegExp)
if err != nil {
return nil, -1, fmt.Errorf("could not parse cpu info from %q: %v", line, err)
}
if ok {
thread := val
numCores++
if lastThread != -1 {
// New cpu section. Save last one.
nodeIdx, err := addNode(&nodes, lastNode)
if err != nil {
return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err)
}
nodes[nodeIdx].AddThread(lastThread, lastCore)
lastCore = -1
lastNode = -1
}
lastThread = thread
}
ok, val, err = extractValue(line, coreRegExp)
if err != nil {
return nil, -1, fmt.Errorf("could not parse core info from %q: %v", line, err)
}
if ok {
lastCore = val
}
ok, val, err = extractValue(line, nodeRegExp)
if err != nil {
return nil, -1, fmt.Errorf("could not parse node info from %q: %v", line, err)
}
if ok {
lastNode = val
}
}
nodeIdx, err := addNode(&nodes, lastNode)
if err != nil {
return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err)
}
nodes[nodeIdx].AddThread(lastThread, lastCore)
if numCores < 1 {
return nil, numCores, fmt.Errorf("could not detect any cores")
}
for idx, node := range nodes {
caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0])
if err != nil {
glog.Errorf("failed to get cache information for node %d: %v", node.Id, err)
continue
}
numThreadsPerCore := len(node.Cores[0].Threads)
numThreadsPerNode := len(node.Cores) * numThreadsPerCore
for _, cache := range caches {
c := info.Cache{
Size: cache.Size,
Level: cache.Level,
Type: cache.Type,
}
if cache.Cpus == numThreadsPerNode && cache.Level > 2 {
// Add a node-level cache.
nodes[idx].AddNodeCache(c)
} else if cache.Cpus == numThreadsPerCore {
// Add to each core.
nodes[idx].AddPerCoreCache(c)
}
// Ignore unknown caches.
}
}
return nodes, numCores, nil
}
func getInfoFromFiles(filePaths string) string { func getInfoFromFiles(filePaths string) string {
if len(filePaths) == 0 { if len(filePaths) == 0 {
return "" return ""
@@ -232,18 +52,12 @@ func getInfoFromFiles(filePaths string) string {
func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, error) { func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, error) {
cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo")
clockSpeed, err := getClockSpeed(cpuinfo) clockSpeed, err := machine.GetClockSpeed(cpuinfo)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Get the amount of usable memory from /proc/meminfo. memoryCapacity, err := machine.GetMachineMemoryCapacity()
out, err := ioutil.ReadFile("/proc/meminfo")
if err != nil {
return nil, err
}
memoryCapacity, err := getMemoryCapacity(out)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -263,7 +77,7 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, err
glog.Errorf("Failed to get network devices: %v", err) glog.Errorf("Failed to get network devices: %v", err)
} }
topology, numCores, err := getTopology(sysFs, string(cpuinfo)) topology, numCores, err := machine.GetTopology(sysFs, string(cpuinfo))
if err != nil { if err != nil {
glog.Errorf("Failed to get topology information: %v", err) glog.Errorf("Failed to get topology information: %v", err)
} }
@@ -273,6 +87,10 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, err
glog.Errorf("Failed to get system UUID: %v", err) glog.Errorf("Failed to get system UUID: %v", err)
} }
realCloudInfo := cloudinfo.NewRealCloudInfo()
cloudProvider := realCloudInfo.GetCloudProvider()
instanceType := realCloudInfo.GetInstanceType()
machineInfo := &info.MachineInfo{ machineInfo := &info.MachineInfo{
NumCores: numCores, NumCores: numCores,
CpuFrequency: clockSpeed, CpuFrequency: clockSpeed,
@@ -283,6 +101,8 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, err
MachineID: getInfoFromFiles(*machineIdFilePath), MachineID: getInfoFromFiles(*machineIdFilePath),
SystemUUID: systemUUID, SystemUUID: systemUUID,
BootID: getInfoFromFiles(*bootIdFilePath), BootID: getInfoFromFiles(*bootIdFilePath),
CloudProvider: cloudProvider,
InstanceType: instanceType,
} }
for _, fs := range filesystems { for _, fs := range filesystems {

View File

@@ -114,7 +114,7 @@ type Manager interface {
} }
// New takes a memory storage and returns a new manager. // New takes a memory storage and returns a new manager.
func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs) (Manager, error) { func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (Manager, error) {
if memoryCache == nil { if memoryCache == nil {
return nil, fmt.Errorf("manager requires memory storage") return nil, fmt.Errorf("manager requires memory storage")
} }
@@ -139,13 +139,15 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs) (Manager, error)
inHostNamespace = true inHostNamespace = true
} }
newManager := &manager{ newManager := &manager{
containers: make(map[namespacedContainerName]*containerData), containers: make(map[namespacedContainerName]*containerData),
quitChannels: make([]chan error, 0, 2), quitChannels: make([]chan error, 0, 2),
memoryCache: memoryCache, memoryCache: memoryCache,
fsInfo: fsInfo, fsInfo: fsInfo,
cadvisorContainer: selfContainer, cadvisorContainer: selfContainer,
inHostNamespace: inHostNamespace, inHostNamespace: inHostNamespace,
startupTime: time.Now(), startupTime: time.Now(),
maxHousekeepingInterval: maxHousekeepingInterval,
allowDynamicHousekeeping: allowDynamicHousekeeping,
} }
machineInfo, err := getMachineInfo(sysfs, fsInfo) machineInfo, err := getMachineInfo(sysfs, fsInfo)
@@ -176,19 +178,21 @@ type namespacedContainerName struct {
} }
type manager struct { type manager struct {
containers map[namespacedContainerName]*containerData containers map[namespacedContainerName]*containerData
containersLock sync.RWMutex containersLock sync.RWMutex
memoryCache *memory.InMemoryCache memoryCache *memory.InMemoryCache
fsInfo fs.FsInfo fsInfo fs.FsInfo
machineInfo info.MachineInfo machineInfo info.MachineInfo
versionInfo info.VersionInfo versionInfo info.VersionInfo
quitChannels []chan error quitChannels []chan error
cadvisorContainer string cadvisorContainer string
inHostNamespace bool inHostNamespace bool
dockerContainersRegexp *regexp.Regexp dockerContainersRegexp *regexp.Regexp
loadReader cpuload.CpuLoadReader loadReader cpuload.CpuLoadReader
eventHandler events.EventManager eventHandler events.EventManager
startupTime time.Time startupTime time.Time
maxHousekeepingInterval time.Duration
allowDynamicHousekeeping bool
} }
// Start the container manager. // Start the container manager.
@@ -371,12 +375,13 @@ func (self *manager) GetContainerSpec(containerName string, options v2.RequestOp
func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec { func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
specV1 := self.getAdjustedSpec(cinfo) specV1 := self.getAdjustedSpec(cinfo)
specV2 := v2.ContainerSpec{ specV2 := v2.ContainerSpec{
CreationTime: specV1.CreationTime, CreationTime: specV1.CreationTime,
HasCpu: specV1.HasCpu, HasCpu: specV1.HasCpu,
HasMemory: specV1.HasMemory, HasMemory: specV1.HasMemory,
HasFilesystem: specV1.HasFilesystem, HasFilesystem: specV1.HasFilesystem,
HasNetwork: specV1.HasNetwork, HasNetwork: specV1.HasNetwork,
HasDiskIo: specV1.HasDiskIo, HasDiskIo: specV1.HasDiskIo,
HasCustomMetrics: specV1.HasCustomMetrics,
} }
if specV1.HasCpu { if specV1.HasCpu {
specV2.Cpu.Limit = specV1.Cpu.Limit specV2.Cpu.Limit = specV1.Cpu.Limit
@@ -388,6 +393,9 @@ func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
specV2.Memory.Reservation = specV1.Memory.Reservation specV2.Memory.Reservation = specV1.Memory.Reservation
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
} }
if specV1.HasCustomMetrics {
specV2.CustomMetrics = specV1.CustomMetrics
}
specV2.Aliases = cinfo.Aliases specV2.Aliases = cinfo.Aliases
specV2.Namespace = cinfo.Namespace specV2.Namespace = cinfo.Namespace
return specV2 return specV2
@@ -689,6 +697,28 @@ func (m *manager) GetProcessList(containerName string, options v2.RequestOptions
return ps, nil return ps, nil
} }
func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *containerData) error {
for k, v := range collectorConfigs {
configFile, err := cont.ReadFile(v, m.inHostNamespace)
if err != nil {
return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err)
}
glog.V(3).Infof("Got config from %q: %q", v, configFile)
newCollector, err := collector.NewCollector(k, configFile)
if err != nil {
glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
}
err = cont.collectorManager.RegisterCollector(newCollector)
if err != nil {
glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err)
return err
}
}
return nil
}
// Create a container. // Create a container.
func (m *manager) createContainer(containerName string) error { func (m *manager) createContainer(containerName string) error {
handler, accept, err := container.NewContainerHandler(containerName) handler, accept, err := container.NewContainerHandler(containerName)
@@ -700,17 +730,26 @@ func (m *manager) createContainer(containerName string) error {
glog.V(4).Infof("ignoring container %q", containerName) glog.V(4).Infof("ignoring container %q", containerName)
return nil return nil
} }
// TODO(vmarmol): Register collectors.
collectorManager, err := collector.NewCollectorManager() collectorManager, err := collector.NewCollectorManager()
if err != nil { if err != nil {
return err return err
} }
logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer
cont, err := newContainerData(containerName, m.memoryCache, handler, m.loadReader, logUsage, collectorManager) cont, err := newContainerData(containerName, m.memoryCache, handler, m.loadReader, logUsage, collectorManager, m.maxHousekeepingInterval, m.allowDynamicHousekeeping)
if err != nil { if err != nil {
return err return err
} }
// Add collectors
labels := handler.GetContainerLabels()
collectorConfigs := collector.GetCollectorConfigs(labels)
err = m.registerCollectors(collectorConfigs, cont)
if err != nil {
glog.Infof("failed to register collectors for %q: %v", containerName, err)
return err
}
// Add to the containers map. // Add to the containers map.
alreadyExists := func() bool { alreadyExists := func() bool {
m.containersLock.Lock() m.containersLock.Lock()

View File

@@ -53,7 +53,7 @@ func createManagerAndAddContainers(
spec, spec,
nil, nil,
).Once() ).Once()
cont, err := newContainerData(name, memoryCache, mockHandler, nil, false, &collector.FakeCollectorManager{}) cont, err := newContainerData(name, memoryCache, mockHandler, nil, false, &collector.GenericCollectorManager{}, 60*time.Second, true)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@@ -205,7 +205,7 @@ func TestDockerContainersInfo(t *testing.T) {
} }
func TestNewNilManager(t *testing.T) { func TestNewNilManager(t *testing.T) {
_, err := New(nil, nil) _, err := New(nil, nil, 60*time.Second, true)
if err == nil { if err == nil {
t.Fatalf("Expected nil manager to return error") t.Fatalf("Expected nil manager to return error")
} }

View File

@@ -18,7 +18,6 @@ package pages
import ( import (
"fmt" "fmt"
"html/template" "html/template"
"math"
"net/http" "net/http"
"net/url" "net/url"
"path" "path"
@@ -149,15 +148,19 @@ func toMegabytes(bytes uint64) float64 {
return float64(bytes) / (1 << 20) return float64(bytes) / (1 << 20)
} }
// Size after which we consider memory to be "unlimited". This is not
// MaxInt64 due to rounding by the kernel.
const maxMemorySize = uint64(1 << 62)
func printSize(bytes uint64) string { func printSize(bytes uint64) string {
if bytes >= math.MaxInt64 { if bytes >= maxMemorySize {
return "unlimited" return "unlimited"
} }
return ByteSize(bytes).Size() return ByteSize(bytes).Size()
} }
func printUnit(bytes uint64) string { func printUnit(bytes uint64) string {
if bytes >= math.MaxInt64 { if bytes >= maxMemorySize {
return "" return ""
} }
return ByteSize(bytes).Unit() return ByteSize(bytes).Unit()
@@ -229,7 +232,7 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) e
data := &pageData{ data := &pageData{
DisplayName: displayName, DisplayName: displayName,
ContainerName: cont.Name, ContainerName: escapeContainerName(cont.Name),
ParentContainers: parentContainers, ParentContainers: parentContainers,
Subcontainers: subcontainerLinks, Subcontainers: subcontainerLinks,
Spec: cont.Spec, Spec: cont.Spec,

View File

@@ -130,7 +130,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error
} }
data = &pageData{ data = &pageData{
DisplayName: displayName, DisplayName: displayName,
ContainerName: cont.Name, ContainerName: escapeContainerName(cont.Name),
ParentContainers: parentContainers, ParentContainers: parentContainers,
Spec: cont.Spec, Spec: cont.Spec,
Stats: cont.Stats, Stats: cont.Stats,

View File

@@ -18,6 +18,7 @@ import (
"fmt" "fmt"
"html/template" "html/template"
"net/http" "net/http"
"net/url"
"strings" "strings"
auth "github.com/abbot/go-http-auth" auth "github.com/abbot/go-http-auth"
@@ -159,3 +160,12 @@ func getContainerDisplayName(cont info.ContainerReference) string {
return displayName return displayName
} }
// Escape the non-path characters on a container name.
func escapeContainerName(containerName string) string {
parts := strings.Split(containerName, "/")
for i := range parts {
parts[i] = url.QueryEscape(parts[i])
}
return strings.Join(parts, "/")
}

View File

@@ -44,8 +44,8 @@ func (self *redisStorage) defaultReadyToFlush() bool {
return time.Since(self.lastWrite) >= self.bufferDuration return time.Since(self.lastWrite) >= self.bufferDuration
} }
//We must add some defaut params (for example: MachineName,ContainerName...)because containerStats do not include them //We must add some default params (for example: MachineName,ContainerName...)because containerStats do not include them
func (self *redisStorage) containerStatsAndDefautValues(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec { func (self *redisStorage) containerStatsAndDefaultValues(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec {
timestamp := stats.Timestamp.UnixNano() / 1E3 timestamp := stats.Timestamp.UnixNano() / 1E3
var containerName string var containerName string
if len(ref.Aliases) > 0 { if len(ref.Aliases) > 0 {
@@ -72,8 +72,8 @@ func (self *redisStorage) AddStats(ref info.ContainerReference, stats *info.Cont
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write. // AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
self.lock.Lock() self.lock.Lock()
defer self.lock.Unlock() defer self.lock.Unlock()
// Add some defaut params based on containerStats // Add some default params based on containerStats
detail := self.containerStatsAndDefautValues(ref, stats) detail := self.containerStatsAndDefaultValues(ref, stats)
//To json //To json
b, _ := json.Marshal(detail) b, _ := json.Marshal(detail)
if self.readyToFlush() { if self.readyToFlush() {

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package statsd package client
import ( import (
"fmt" "fmt"
@@ -22,8 +22,9 @@ import (
) )
type Client struct { type Client struct {
HostPort string HostPort string
conn net.Conn Namespace string
conn net.Conn
} }
func (self *Client) Open() error { func (self *Client) Open() error {
@@ -36,38 +37,28 @@ func (self *Client) Open() error {
return nil return nil
} }
func (self *Client) Close() { func (self *Client) Close() error {
self.conn.Close() self.conn.Close()
self.conn = nil
return nil
} }
func (self *Client) UpdateGauge(name, value string) error { // Simple send to statsd daemon without sampling.
stats := make(map[string]string) func (self *Client) Send(namespace, containerName, key string, value uint64) error {
val := fmt.Sprintf("%s|g", value) // only send counter value
stats[name] = val formatted := fmt.Sprintf("%s.%s.%s:%d|g", namespace, containerName, key, value)
if err := self.send(stats); err != nil { _, err := fmt.Fprintf(self.conn, formatted)
if err != nil {
glog.V(3).Infof("failed to send data %q: %v", formatted, err)
return err return err
} }
return nil return nil
} }
// Simple send to statsd daemon without sampling.
func (self *Client) send(data map[string]string) error {
for k, v := range data {
formatted := fmt.Sprintf("%s:%s", k, v)
_, err := fmt.Fprintf(self.conn, formatted)
if err != nil {
glog.V(3).Infof("failed to send data %q: %v", formatted, err)
// return on first error.
return err
}
}
return nil
}
func New(hostPort string) (*Client, error) { func New(hostPort string) (*Client, error) {
client := Client{HostPort: hostPort} Client := Client{HostPort: hostPort}
if err := client.Open(); err != nil { if err := Client.Open(); err != nil {
return nil, err return nil, err
} }
return &client, nil return &Client, nil
} }

View File

@@ -0,0 +1,127 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package statsd
import (
info "github.com/google/cadvisor/info/v1"
client "github.com/google/cadvisor/storage/statsd/client"
)
type statsdStorage struct {
client *client.Client
Namespace string
}
const (
colCpuCumulativeUsage string = "cpu_cumulative_usage"
// Memory Usage
colMemoryUsage string = "memory_usage"
// Working set size
colMemoryWorkingSet string = "memory_working_set"
// Cumulative count of bytes received.
colRxBytes string = "rx_bytes"
// Cumulative count of receive errors encountered.
colRxErrors string = "rx_errors"
// Cumulative count of bytes transmitted.
colTxBytes string = "tx_bytes"
// Cumulative count of transmit errors encountered.
colTxErrors string = "tx_errors"
// Filesystem summary
colFsSummary = "fs_summary"
// Filesystem limit.
colFsLimit = "fs_limit"
// Filesystem usage.
colFsUsage = "fs_usage"
)
func (self *statsdStorage) containerStatsToValues(
stats *info.ContainerStats,
) (series map[string]uint64) {
series = make(map[string]uint64)
// Cumulative Cpu Usage
series[colCpuCumulativeUsage] = stats.Cpu.Usage.Total
// Memory Usage
series[colMemoryUsage] = stats.Memory.Usage
// Working set size
series[colMemoryWorkingSet] = stats.Memory.WorkingSet
// Network stats.
series[colRxBytes] = stats.Network.RxBytes
series[colRxErrors] = stats.Network.RxErrors
series[colTxBytes] = stats.Network.TxBytes
series[colTxErrors] = stats.Network.TxErrors
return series
}
func (self *statsdStorage) containerFsStatsToValues(
series *map[string]uint64,
stats *info.ContainerStats,
) {
for _, fsStat := range stats.Filesystem {
// Summary stats.
(*series)[colFsSummary+"."+colFsLimit] += fsStat.Limit
(*series)[colFsSummary+"."+colFsUsage] += fsStat.Usage
// Per device stats.
(*series)[fsStat.Device+"."+colFsLimit] = fsStat.Limit
(*series)[fsStat.Device+"."+colFsUsage] = fsStat.Usage
}
}
//Push the data into redis
func (self *statsdStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {
if stats == nil {
return nil
}
var containerName string
if len(ref.Aliases) > 0 {
containerName = ref.Aliases[0]
} else {
containerName = ref.Name
}
series := self.containerStatsToValues(stats)
self.containerFsStatsToValues(&series, stats)
for key, value := range series {
err := self.client.Send(self.Namespace, containerName, key, value)
if err != nil {
return err
}
}
return nil
}
func (self *statsdStorage) Close() error {
self.client.Close()
self.client = nil
return nil
}
func New(namespace, hostPort string) (*statsdStorage, error) {
statsdClient, err := client.New(hostPort)
if err != nil {
return nil, err
}
statsdStorage := &statsdStorage{
client: statsdClient,
Namespace: namespace,
}
return statsdStorage, nil
}

View File

@@ -28,20 +28,23 @@ const secondsToMilliSeconds = 1000
const milliSecondsToNanoSeconds = 1000000 const milliSecondsToNanoSeconds = 1000000
const secondsToNanoSeconds = secondsToMilliSeconds * milliSecondsToNanoSeconds const secondsToNanoSeconds = secondsToMilliSeconds * milliSecondsToNanoSeconds
type uint64Slice []uint64 type Uint64Slice []uint64
func (a uint64Slice) Len() int { return len(a) } func (a Uint64Slice) Len() int { return len(a) }
func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a Uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } func (a Uint64Slice) Less(i, j int) bool { return a[i] < a[j] }
// Get 90th percentile of the provided samples. Round to integer. // Get percentile of the provided samples. Round to integer.
func (self uint64Slice) Get90Percentile() uint64 { func (self Uint64Slice) GetPercentile(d float64) uint64 {
if d < 0.0 || d > 1.0 {
return 0
}
count := self.Len() count := self.Len()
if count == 0 { if count == 0 {
return 0 return 0
} }
sort.Sort(self) sort.Sort(self)
n := float64(0.9 * (float64(count) + 1)) n := float64(d * (float64(count) + 1))
idx, frac := math.Modf(n) idx, frac := math.Modf(n)
index := int(idx) index := int(idx)
percentile := float64(self[index-1]) percentile := float64(self[index-1])
@@ -71,7 +74,7 @@ func (self *mean) Add(value uint64) {
type resource struct { type resource struct {
// list of samples being tracked. // list of samples being tracked.
samples uint64Slice samples Uint64Slice
// average from existing samples. // average from existing samples.
mean mean mean mean
// maximum value seen so far in the added samples. // maximum value seen so far in the added samples.
@@ -94,27 +97,31 @@ func (self *resource) Add(p info.Percentiles) {
// Add a single sample. Internally, we convert it to a fake percentile sample. // Add a single sample. Internally, we convert it to a fake percentile sample.
func (self *resource) AddSample(val uint64) { func (self *resource) AddSample(val uint64) {
sample := info.Percentiles{ sample := info.Percentiles{
Present: true, Present: true,
Mean: val, Mean: val,
Max: val, Max: val,
Ninety: val, Fifty: val,
Ninety: val,
NinetyFive: val,
} }
self.Add(sample) self.Add(sample)
} }
// Get max, average, and 90p from existing samples. // Get max, average, and 90p from existing samples.
func (self *resource) GetPercentile() info.Percentiles { func (self *resource) GetAllPercentiles() info.Percentiles {
p := info.Percentiles{} p := info.Percentiles{}
p.Mean = uint64(self.mean.Mean) p.Mean = uint64(self.mean.Mean)
p.Max = self.max p.Max = self.max
p.Ninety = self.samples.Get90Percentile() p.Fifty = self.samples.GetPercentile(0.5)
p.Ninety = self.samples.GetPercentile(0.9)
p.NinetyFive = self.samples.GetPercentile(0.95)
p.Present = true p.Present = true
return p return p
} }
func NewResource(size int) *resource { func NewResource(size int) *resource {
return &resource{ return &resource{
samples: make(uint64Slice, 0, size), samples: make(Uint64Slice, 0, size),
mean: mean{count: 0, Mean: 0}, mean: mean{count: 0, Mean: 0},
} }
} }
@@ -128,8 +135,8 @@ func GetDerivedPercentiles(stats []*info.Usage) info.Usage {
memory.Add(stat.Memory) memory.Add(stat.Memory)
} }
usage := info.Usage{} usage := info.Usage{}
usage.Cpu = cpu.GetPercentile() usage.Cpu = cpu.GetAllPercentiles()
usage.Memory = memory.GetPercentile() usage.Memory = memory.GetAllPercentiles()
return usage return usage
} }
@@ -183,7 +190,7 @@ func GetMinutePercentiles(stats []*secondSample) info.Usage {
percent := getPercentComplete(stats) percent := getPercentComplete(stats)
return info.Usage{ return info.Usage{
PercentComplete: percent, PercentComplete: percent,
Cpu: cpu.GetPercentile(), Cpu: cpu.GetAllPercentiles(),
Memory: memory.GetPercentile(), Memory: memory.GetAllPercentiles(),
} }
} }

View File

@@ -23,25 +23,29 @@ import (
const Nanosecond = 1000000000 const Nanosecond = 1000000000
func Test90Percentile(t *testing.T) { func assertPercentile(t *testing.T, s Uint64Slice, f float64, want uint64) {
if got := s.GetPercentile(f); got != want {
t.Errorf("GetPercentile(%f) is %d, should be %d.", f, got, want)
}
}
func TestPercentile(t *testing.T) {
N := 100 N := 100
stats := make(uint64Slice, 0, N) s := make(Uint64Slice, 0, N)
for i := N; i > 0; i-- { for i := N; i > 0; i-- {
stats = append(stats, uint64(i)) s = append(s, uint64(i))
} }
p := stats.Get90Percentile() assertPercentile(t, s, 0.2, 20)
if p != 90 { assertPercentile(t, s, 0.7, 70)
t.Errorf("90th percentile is %d, should be 90.", p) assertPercentile(t, s, 0.9, 90)
}
// 90p should be between 94 and 95. Promoted to 95.
N = 105 N = 105
for i := 101; i <= N; i++ { for i := 101; i <= N; i++ {
stats = append(stats, uint64(i)) s = append(s, uint64(i))
}
p = stats.Get90Percentile()
if p != 95 {
t.Errorf("90th percentile is %d, should be 95.", p)
} }
// 90p should be between 94 and 95. Promoted to 95.
assertPercentile(t, s, 0.2, 21)
assertPercentile(t, s, 0.7, 74)
assertPercentile(t, s, 0.9, 95)
} }
func TestMean(t *testing.T) { func TestMean(t *testing.T) {
@@ -74,19 +78,23 @@ func TestAggregates(t *testing.T) {
usage := GetMinutePercentiles(stats) usage := GetMinutePercentiles(stats)
// Cpu mean, max, and 90p should all be 1000 ms/s. // Cpu mean, max, and 90p should all be 1000 ms/s.
cpuExpected := info.Percentiles{ cpuExpected := info.Percentiles{
Present: true, Present: true,
Mean: 1000, Mean: 1000,
Max: 1000, Max: 1000,
Ninety: 1000, Fifty: 1000,
Ninety: 1000,
NinetyFive: 1000,
} }
if usage.Cpu != cpuExpected { if usage.Cpu != cpuExpected {
t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected)
} }
memExpected := info.Percentiles{ memExpected := info.Percentiles{
Present: true, Present: true,
Mean: 50 * 1024, Mean: 50 * 1024,
Max: 99 * 1024, Max: 99 * 1024,
Ninety: 90 * 1024, Fifty: 50 * 1024,
Ninety: 90 * 1024,
NinetyFive: 95 * 1024,
} }
if usage.Memory != memExpected { if usage.Memory != memExpected {
t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected)
@@ -119,19 +127,23 @@ func TestSamplesCloseInTimeIgnored(t *testing.T) {
usage := GetMinutePercentiles(stats) usage := GetMinutePercentiles(stats)
// Cpu mean, max, and 90p should all be 1000 ms/s. All high-value samples are discarded. // Cpu mean, max, and 90p should all be 1000 ms/s. All high-value samples are discarded.
cpuExpected := info.Percentiles{ cpuExpected := info.Percentiles{
Present: true, Present: true,
Mean: 1000, Mean: 1000,
Max: 1000, Max: 1000,
Ninety: 1000, Fifty: 1000,
Ninety: 1000,
NinetyFive: 1000,
} }
if usage.Cpu != cpuExpected { if usage.Cpu != cpuExpected {
t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected)
} }
memExpected := info.Percentiles{ memExpected := info.Percentiles{
Present: true, Present: true,
Mean: 50 * 1024, Mean: 50 * 1024,
Max: 99 * 1024, Max: 99 * 1024,
Ninety: 90 * 1024, Fifty: 50 * 1024,
Ninety: 90 * 1024,
NinetyFive: 95 * 1024,
} }
if usage.Memory != memExpected { if usage.Memory != memExpected {
t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected)
@@ -146,35 +158,43 @@ func TestDerivedStats(t *testing.T) {
s := &info.Usage{ s := &info.Usage{
PercentComplete: 100, PercentComplete: 100,
Cpu: info.Percentiles{ Cpu: info.Percentiles{
Present: true, Present: true,
Mean: i * Nanosecond, Mean: i * Nanosecond,
Max: i * Nanosecond, Max: i * Nanosecond,
Ninety: i * Nanosecond, Fifty: i * Nanosecond,
Ninety: i * Nanosecond,
NinetyFive: i * Nanosecond,
}, },
Memory: info.Percentiles{ Memory: info.Percentiles{
Present: true, Present: true,
Mean: i * 1024, Mean: i * 1024,
Max: i * 1024, Max: i * 1024,
Ninety: i * 1024, Fifty: i * 1024,
Ninety: i * 1024,
NinetyFive: i * 1024,
}, },
} }
stats = append(stats, s) stats = append(stats, s)
} }
usage := GetDerivedPercentiles(stats) usage := GetDerivedPercentiles(stats)
cpuExpected := info.Percentiles{ cpuExpected := info.Percentiles{
Present: true, Present: true,
Mean: 50 * Nanosecond, Mean: 50 * Nanosecond,
Max: 99 * Nanosecond, Max: 99 * Nanosecond,
Ninety: 90 * Nanosecond, Fifty: 50 * Nanosecond,
Ninety: 90 * Nanosecond,
NinetyFive: 95 * Nanosecond,
} }
if usage.Cpu != cpuExpected { if usage.Cpu != cpuExpected {
t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected)
} }
memExpected := info.Percentiles{ memExpected := info.Percentiles{
Present: true, Present: true,
Mean: 50 * 1024, Mean: 50 * 1024,
Max: 99 * 1024, Max: 99 * 1024,
Ninety: 90 * 1024, Fifty: 50 * 1024,
Ninety: 90 * 1024,
NinetyFive: 95 * 1024,
} }
if usage.Memory != memExpected { if usage.Memory != memExpected {
t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected)

View File

@@ -0,0 +1,87 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Get information about the cloud provider (if any) cAdvisor is running on.
package cloudinfo
import (
info "github.com/google/cadvisor/info/v1"
)
type CloudInfo interface {
GetCloudProvider() info.CloudProvider
GetInstanceType() info.InstanceType
}
type realCloudInfo struct {
cloudProvider info.CloudProvider
instanceType info.InstanceType
}
func NewRealCloudInfo() CloudInfo {
cloudProvider := detectCloudProvider()
instanceType := detectInstanceType(cloudProvider)
return &realCloudInfo{
cloudProvider: cloudProvider,
instanceType: instanceType,
}
}
func (self *realCloudInfo) GetCloudProvider() info.CloudProvider {
return self.cloudProvider
}
func (self *realCloudInfo) GetInstanceType() info.InstanceType {
return self.instanceType
}
func detectCloudProvider() info.CloudProvider {
switch {
case onGCE():
return info.GCE
case onAWS():
return info.AWS
case onBaremetal():
return info.Baremetal
}
return info.UnkownProvider
}
func detectInstanceType(cloudProvider info.CloudProvider) info.InstanceType {
switch cloudProvider {
case info.GCE:
return getGceInstanceType()
case info.AWS:
return getAwsInstanceType()
case info.Baremetal:
return info.NoInstance
}
return info.UnknownInstance
}
//TODO: Implement method.
func onAWS() bool {
return false
}
//TODO: Implement method.
func getAwsInstanceType() info.InstanceType {
return info.UnknownInstance
}
//TODO: Implement method.
func onBaremetal() bool {
return false
}

View File

@@ -0,0 +1,36 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cloudinfo
import (
"strings"
"github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata"
info "github.com/google/cadvisor/info/v1"
)
func onGCE() bool {
return metadata.OnGCE()
}
func getGceInstanceType() info.InstanceType {
machineType, err := metadata.Get("instance/machine-type")
if err != nil {
return info.UnknownInstance
}
responseParts := strings.Split(machineType, "/") // Extract the instance name from the machine type.
return info.InstanceType(responseParts[len(responseParts)-1])
}

View File

@@ -18,7 +18,7 @@
package mockfs package mockfs
import ( import (
gomock "code.google.com/p/gomock/gomock" gomock "github.com/golang/mock/gomock"
fs "github.com/google/cadvisor/utils/fs" fs "github.com/google/cadvisor/utils/fs"
) )

View File

@@ -0,0 +1,243 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package machine
import (
"fmt"
"io/ioutil"
"regexp"
"strconv"
"strings"
"github.com/golang/glog"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/utils"
"github.com/google/cadvisor/utils/sysfs"
"github.com/google/cadvisor/utils/sysinfo"
)
// The utils/machine package contains functions that extract machine-level specs.
var cpuRegExp = regexp.MustCompile("processor\\t*: +([0-9]+)")
var coreRegExp = regexp.MustCompile("core id\\t*: +([0-9]+)")
var nodeRegExp = regexp.MustCompile("physical id\\t*: +([0-9]+)")
var CpuClockSpeedMHz = regexp.MustCompile("cpu MHz\\t*: +([0-9]+.[0-9]+)")
var memoryCapacityRegexp = regexp.MustCompile("MemTotal: *([0-9]+) kB")
var swapCapacityRegexp = regexp.MustCompile("SwapTotal: *([0-9]+) kB")
// GetClockSpeed returns the CPU clock speed, given a []byte formatted as the /proc/cpuinfo file.
func GetClockSpeed(procInfo []byte) (uint64, error) {
// First look through sys to find a max supported cpu frequency.
const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq"
if utils.FileExists(maxFreqFile) {
val, err := ioutil.ReadFile(maxFreqFile)
if err != nil {
return 0, err
}
var maxFreq uint64
n, err := fmt.Sscanf(string(val), "%d", &maxFreq)
if err != nil || n != 1 {
return 0, fmt.Errorf("could not parse frequency %q", val)
}
return maxFreq, nil
}
// Fall back to /proc/cpuinfo
matches := CpuClockSpeedMHz.FindSubmatch(procInfo)
if len(matches) != 2 {
//Check if we are running on Power systems which have a different format
CpuClockSpeedMHz, _ = regexp.Compile("clock\\t*: +([0-9]+.[0-9]+)MHz")
matches = CpuClockSpeedMHz.FindSubmatch(procInfo)
if len(matches) != 2 {
return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo))
}
}
speed, err := strconv.ParseFloat(string(matches[1]), 64)
if err != nil {
return 0, err
}
// Convert to kHz
return uint64(speed * 1000), nil
}
// GetMachineMemoryCapacity returns the machine's total memory from /proc/meminfo.
// Returns the total memory capacity as an int64 (number of bytes).
func GetMachineMemoryCapacity() (int64, error) {
out, err := ioutil.ReadFile("/proc/meminfo")
if err != nil {
return 0, err
}
memoryCapacity, err := parseCapacity(out, memoryCapacityRegexp)
if err != nil {
return 0, err
}
return memoryCapacity, err
}
// GetMachineSwapCapacity returns the machine's total swap from /proc/meminfo.
// Returns the total swap capacity as an int64 (number of bytes).
func GetMachineSwapCapacity() (int64, error) {
out, err := ioutil.ReadFile("/proc/meminfo")
if err != nil {
return 0, err
}
swapCapacity, err := parseCapacity(out, swapCapacityRegexp)
if err != nil {
return 0, err
}
return swapCapacity, err
}
// parseCapacity matches a Regexp in a []byte, returning the resulting value in bytes.
// Assumes that the value matched by the Regexp is in KB.
func parseCapacity(b []byte, r *regexp.Regexp) (int64, error) {
matches := r.FindSubmatch(b)
if len(matches) != 2 {
return -1, fmt.Errorf("failed to match regexp in output: %q", string(b))
}
m, err := strconv.ParseInt(string(matches[1]), 10, 64)
if err != nil {
return -1, err
}
// Convert to bytes.
return m * 1024, err
}
func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) {
nodes := []info.Node{}
numCores := 0
lastThread := -1
lastCore := -1
lastNode := -1
for _, line := range strings.Split(cpuinfo, "\n") {
ok, val, err := extractValue(line, cpuRegExp)
if err != nil {
return nil, -1, fmt.Errorf("could not parse cpu info from %q: %v", line, err)
}
if ok {
thread := val
numCores++
if lastThread != -1 {
// New cpu section. Save last one.
nodeIdx, err := addNode(&nodes, lastNode)
if err != nil {
return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err)
}
nodes[nodeIdx].AddThread(lastThread, lastCore)
lastCore = -1
lastNode = -1
}
lastThread = thread
}
ok, val, err = extractValue(line, coreRegExp)
if err != nil {
return nil, -1, fmt.Errorf("could not parse core info from %q: %v", line, err)
}
if ok {
lastCore = val
}
ok, val, err = extractValue(line, nodeRegExp)
if err != nil {
return nil, -1, fmt.Errorf("could not parse node info from %q: %v", line, err)
}
if ok {
lastNode = val
}
}
nodeIdx, err := addNode(&nodes, lastNode)
if err != nil {
return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err)
}
nodes[nodeIdx].AddThread(lastThread, lastCore)
if numCores < 1 {
return nil, numCores, fmt.Errorf("could not detect any cores")
}
for idx, node := range nodes {
caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0])
if err != nil {
glog.Errorf("failed to get cache information for node %d: %v", node.Id, err)
continue
}
numThreadsPerCore := len(node.Cores[0].Threads)
numThreadsPerNode := len(node.Cores) * numThreadsPerCore
for _, cache := range caches {
c := info.Cache{
Size: cache.Size,
Level: cache.Level,
Type: cache.Type,
}
if cache.Cpus == numThreadsPerNode && cache.Level > 2 {
// Add a node-level cache.
nodes[idx].AddNodeCache(c)
} else if cache.Cpus == numThreadsPerCore {
// Add to each core.
nodes[idx].AddPerCoreCache(c)
}
// Ignore unknown caches.
}
}
return nodes, numCores, nil
}
func extractValue(s string, r *regexp.Regexp) (bool, int, error) {
matches := r.FindSubmatch([]byte(s))
if len(matches) == 2 {
val, err := strconv.ParseInt(string(matches[1]), 10, 32)
if err != nil {
return true, -1, err
}
return true, int(val), nil
}
return false, -1, nil
}
func findNode(nodes []info.Node, id int) (bool, int) {
for i, n := range nodes {
if n.Id == id {
return true, i
}
}
return false, -1
}
func addNode(nodes *[]info.Node, id int) (int, error) {
var idx int
if id == -1 {
// Some VMs don't fill topology data. Export single package.
id = 0
}
ok, idx := findNode(*nodes, id)
if !ok {
// New node
node := info.Node{Id: id}
// Add per-node memory information.
meminfo := fmt.Sprintf("/sys/devices/system/node/node%d/meminfo", id)
out, err := ioutil.ReadFile(meminfo)
// Ignore if per-node info is not available.
if err == nil {
m, err := parseCapacity(out, memoryCapacityRegexp)
if err != nil {
return -1, err
}
node.Memory = uint64(m)
}
*nodes = append(*nodes, node)
idx = len(*nodes) - 1
}
return idx, nil
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
package manager package machine
import ( import (
"io/ioutil" "io/ioutil"
@@ -38,7 +38,7 @@ func TestTopology(t *testing.T) {
Cpus: 2, Cpus: 2,
} }
sysFs.SetCacheInfo(c) sysFs.SetCacheInfo(c)
topology, numCores, err := getTopology(sysFs, string(testcpuinfo)) topology, numCores, err := GetTopology(sysFs, string(testcpuinfo))
if err != nil { if err != nil {
t.Errorf("failed to get topology for sample cpuinfo %s", string(testcpuinfo)) t.Errorf("failed to get topology for sample cpuinfo %s", string(testcpuinfo))
} }
@@ -84,7 +84,7 @@ func TestTopologyWithSimpleCpuinfo(t *testing.T) {
Cpus: 1, Cpus: 1,
} }
sysFs.SetCacheInfo(c) sysFs.SetCacheInfo(c)
topology, numCores, err := getTopology(sysFs, "processor\t: 0\n") topology, numCores, err := GetTopology(sysFs, "processor\t: 0\n")
if err != nil { if err != nil {
t.Errorf("Expected cpuinfo with no topology data to succeed.") t.Errorf("Expected cpuinfo with no topology data to succeed.")
} }
@@ -110,7 +110,7 @@ func TestTopologyWithSimpleCpuinfo(t *testing.T) {
} }
func TestTopologyEmptyCpuinfo(t *testing.T) { func TestTopologyEmptyCpuinfo(t *testing.T) {
_, _, err := getTopology(&fakesysfs.FakeSysFs{}, "") _, _, err := GetTopology(&fakesysfs.FakeSysFs{}, "")
if err == nil { if err == nil {
t.Errorf("Expected empty cpuinfo to fail.") t.Errorf("Expected empty cpuinfo to fail.")
} }

View File

@@ -15,4 +15,4 @@
package version package version
// Version of cAdvisor. // Version of cAdvisor.
const VERSION = "0.15.1" const VERSION = "0.16.0"

View File

@@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

View File

@@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Nate Finch
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,166 @@
# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://drone.io/github.com/natefinch/lumberjack/status.png)](https://drone.io/github.com/natefinch/lumberjack/latest) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
### Lumberjack is a Go package for writing logs to rolling files.
Package lumberjack provides a rolling logger.
Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
thusly:
import "gopkg.in/natefinch/lumberjack.v2"
The package name remains simply lumberjack, and the code resides at
https://github.com/natefinch/lumberjack under the v2.0 branch.
Lumberjack is intended to be one part of a logging infrastructure.
It is not an all-in-one solution, but instead is a pluggable
component at the bottom of the logging stack that simply controls the files
to which logs are written.
Lumberjack plays well with any logging package that can write to an
io.Writer, including the standard library's log package.
Lumberjack assumes that only one process is writing to the output files.
Using the same lumberjack configuration from multiple processes on the same
machine will result in improper behavior.
**Example**
To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
Code:
```go
log.SetOutput(&lumberjack.Logger{
Filename: "/var/log/myapp/foo.log",
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, //days
})
```
## type Logger
``` go
type Logger struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename" yaml:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize" yaml:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage" yaml:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
// contains filtered or unexported fields
}
```
Logger is an io.WriteCloser that writes to the specified filename.
Logger opens or creates the logfile on first Write. If the file exists and
is less than MaxSize megabytes, lumberjack will open and append to that file.
If the file exists and its size is >= MaxSize megabytes, the file is renamed
by putting the current time in a timestamp in the name immediately before the
file's extension (or the end of the filename if there's no extension). A new
log file is then created using original filename.
Whenever a write would cause the current log file exceed MaxSize megabytes,
the current file is closed, renamed, and a new log file created with the
original name. Thus, the filename you give Logger is always the "current" log
file.
### Cleaning Up Old Log Files
Whenever a new logfile gets created, old log files may be deleted. The most
recent files according to the encoded timestamp will be retained, up to a
number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
with an encoded timestamp older than MaxAge days are deleted, regardless of
MaxBackups. Note that the time encoded in the timestamp is the rotation
time, which may differ from the last time that file was written to.
If MaxBackups and MaxAge are both 0, no old log files will be deleted.
### func (\*Logger) Close
``` go
func (l *Logger) Close() error
```
Close implements io.Closer, and closes the current logfile.
### func (\*Logger) Rotate
``` go
func (l *Logger) Rotate() error
```
Rotate causes Logger to close the existing log file and immediately create a
new one. This is a helper function for applications that want to initiate
rotations outside of the normal rotation rules, such as in response to
SIGHUP. After rotating, this initiates a cleanup of old log files according
to the normal rules.
**Example**
Example of how to rotate in response to SIGHUP.
Code:
```go
l := &lumberjack.Logger{}
log.SetOutput(l)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for {
<-c
l.Rotate()
}
}()
```
### func (\*Logger) Write
``` go
func (l *Logger) Write(p []byte) (n int, err error)
```
Write implements io.Writer. If a write would cause the log file to be larger
than MaxSize, the file is closed, renamed to include a timestamp of the
current time, and a new log file is created using the original log file name.
If the length of the write is greater than MaxSize, an error is returned.
- - -
Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)

View File

@@ -0,0 +1,11 @@
// +build !linux
package lumberjack
import (
"os"
)
func chown(_ string, _ os.FileInfo) error {
return nil
}

View File

@@ -0,0 +1,19 @@
package lumberjack
import (
"os"
"syscall"
)
// os_Chown is a var so we can mock it out during tests.
var os_Chown = os.Chown
func chown(name string, info os.FileInfo) error {
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
if err != nil {
return err
}
f.Close()
stat := info.Sys().(*syscall.Stat_t)
return os_Chown(name, int(stat.Uid), int(stat.Gid))
}

View File

@@ -0,0 +1,18 @@
package lumberjack_test
import (
"log"
"gopkg.in/natefinch/lumberjack.v2"
)
// To use lumberjack with the standard library's log package, just pass it into
// the SetOutput function when your application starts.
func Example() {
log.SetOutput(&lumberjack.Logger{
Filename: "/var/log/myapp/foo.log",
MaxSize: 500, // megabytes
MaxBackups: 3,
MaxAge: 28, // days
})
}

View File

@@ -0,0 +1,104 @@
// +build linux
package lumberjack
import (
"os"
"syscall"
"testing"
)
func TestMaintainMode(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestMaintainMode", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
mode := os.FileMode(0770)
f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode)
isNil(err, t)
f.Close()
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
filename2 := backupFile(dir)
info, err := os.Stat(filename)
isNil(err, t)
info2, err := os.Stat(filename2)
isNil(err, t)
equals(mode, info.Mode(), t)
equals(mode, info2.Mode(), t)
}
func TestMaintainOwner(t *testing.T) {
fakeC := fakeChown{}
os_Chown = fakeC.Set
os_Stat = fakeStat
defer func() {
os_Chown = os.Chown
os_Stat = os.Stat
}()
currentTime = fakeTime
dir := makeTempDir("TestMaintainOwner", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
equals(555, fakeC.uid, t)
equals(666, fakeC.gid, t)
}
type fakeChown struct {
name string
uid int
gid int
}
func (f *fakeChown) Set(name string, uid, gid int) error {
f.name = name
f.uid = uid
f.gid = gid
return nil
}
func fakeStat(name string) (os.FileInfo, error) {
info, err := os.Stat(name)
if err != nil {
return info, err
}
stat := info.Sys().(*syscall.Stat_t)
stat.Uid = 555
stat.Gid = 666
return info, nil
}

View File

@@ -0,0 +1,417 @@
// Package lumberjack provides a rolling logger.
//
// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
// thusly:
//
// import "gopkg.in/natefinch/lumberjack.v2"
//
// The package name remains simply lumberjack, and the code resides at
// https://github.com/natefinch/lumberjack under the v2.0 branch.
//
// Lumberjack is intended to be one part of a logging infrastructure.
// It is not an all-in-one solution, but instead is a pluggable
// component at the bottom of the logging stack that simply controls the files
// to which logs are written.
//
// Lumberjack plays well with any logging package that can write to an
// io.Writer, including the standard library's log package.
//
// Lumberjack assumes that only one process is writing to the output files.
// Using the same lumberjack configuration from multiple processes on the same
// machine will result in improper behavior.
package lumberjack
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
)
const (
backupTimeFormat = "2006-01-02T15-04-05.000"
defaultMaxSize = 100
)
// ensure we always implement io.WriteCloser
var _ io.WriteCloser = (*Logger)(nil)
// Logger is an io.WriteCloser that writes to the specified filename.
//
// Logger opens or creates the logfile on first Write. If the file exists and
// is less than MaxSize megabytes, lumberjack will open and append to that file.
// If the file exists and its size is >= MaxSize megabytes, the file is renamed
// by putting the current time in a timestamp in the name immediately before the
// file's extension (or the end of the filename if there's no extension). A new
// log file is then created using original filename.
//
// Whenever a write would cause the current log file exceed MaxSize megabytes,
// the current file is closed, renamed, and a new log file created with the
// original name. Thus, the filename you give Logger is always the "current" log
// file.
//
// Cleaning Up Old Log Files
//
// Whenever a new logfile gets created, old log files may be deleted. The most
// recent files according to the encoded timestamp will be retained, up to a
// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
// with an encoded timestamp older than MaxAge days are deleted, regardless of
// MaxBackups. Note that the time encoded in the timestamp is the rotation
// time, which may differ from the last time that file was written to.
//
// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
type Logger struct {
// Filename is the file to write logs to. Backup log files will be retained
// in the same directory. It uses <processname>-lumberjack.log in
// os.TempDir() if empty.
Filename string `json:"filename" yaml:"filename"`
// MaxSize is the maximum size in megabytes of the log file before it gets
// rotated. It defaults to 100 megabytes.
MaxSize int `json:"maxsize" yaml:"maxsize"`
// MaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is not to remove old log files
// based on age.
MaxAge int `json:"maxage" yaml:"maxage"`
// MaxBackups is the maximum number of old log files to retain. The default
// is to retain all old log files (though MaxAge may still cause them to get
// deleted.)
MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
// LocalTime determines if the time used for formatting the timestamps in
// backup files is the computer's local time. The default is to use UTC
// time.
LocalTime bool `json:"localtime" yaml:"localtime"`
size int64
file *os.File
mu sync.Mutex
}
var (
// currentTime exists so it can be mocked out by tests.
currentTime = time.Now
// os_Stat exists so it can be mocked out by tests.
os_Stat = os.Stat
// megabyte is the conversion factor between MaxSize and bytes. It is a
// variable so tests can mock it out and not need to write megabytes of data
// to disk.
megabyte = 1024 * 1024
)
// Write implements io.Writer. If a write would cause the log file to be larger
// than MaxSize, the file is closed, renamed to include a timestamp of the
// current time, and a new log file is created using the original log file name.
// If the length of the write is greater than MaxSize, an error is returned.
func (l *Logger) Write(p []byte) (n int, err error) {
l.mu.Lock()
defer l.mu.Unlock()
writeLen := int64(len(p))
if writeLen > l.max() {
return 0, fmt.Errorf(
"write length %d exceeds maximum file size %d", writeLen, l.max(),
)
}
if l.file == nil {
if err = l.openExistingOrNew(len(p)); err != nil {
return 0, err
}
}
if l.size+writeLen > l.max() {
if err := l.rotate(); err != nil {
return 0, err
}
}
n, err = l.file.Write(p)
l.size += int64(n)
return n, err
}
// Close implements io.Closer, and closes the current logfile.
func (l *Logger) Close() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.close()
}
// close closes the file if it is open.
func (l *Logger) close() error {
if l.file == nil {
return nil
}
err := l.file.Close()
l.file = nil
return err
}
// Rotate causes Logger to close the existing log file and immediately create a
// new one. This is a helper function for applications that want to initiate
// rotations outside of the normal rotation rules, such as in response to
// SIGHUP. After rotating, this initiates a cleanup of old log files according
// to the normal rules.
func (l *Logger) Rotate() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.rotate()
}
// rotate closes the current file, moves it aside with a timestamp in the name,
// (if it exists), opens a new file with the original filename, and then runs
// cleanup.
func (l *Logger) rotate() error {
if err := l.close(); err != nil {
return err
}
if err := l.openNew(); err != nil {
return err
}
return l.cleanup()
}
// openNew opens a new log file for writing, moving any old log file out of the
// way. This methods assumes the file has already been closed.
func (l *Logger) openNew() error {
err := os.MkdirAll(l.dir(), 0744)
if err != nil {
return fmt.Errorf("can't make directories for new logfile: %s", err)
}
name := l.filename()
mode := os.FileMode(0644)
info, err := os_Stat(name)
if err == nil {
// Copy the mode off the old logfile.
mode = info.Mode()
// move the existing file
newname := backupName(name, l.LocalTime)
if err := os.Rename(name, newname); err != nil {
return fmt.Errorf("can't rename log file: %s", err)
}
// this is a no-op anywhere but linux
if err := chown(name, info); err != nil {
return err
}
}
// we use truncate here because this should only get called when we've moved
// the file ourselves. if someone else creates the file in the meantime,
// just wipe out the contents.
f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
if err != nil {
return fmt.Errorf("can't open new logfile: %s", err)
}
l.file = f
l.size = 0
return nil
}
// backupName creates a new filename from the given name, inserting a timestamp
// between the filename and the extension, using the local time if requested
// (otherwise UTC).
func backupName(name string, local bool) string {
dir := filepath.Dir(name)
filename := filepath.Base(name)
ext := filepath.Ext(filename)
prefix := filename[:len(filename)-len(ext)]
t := currentTime()
if !local {
t = t.UTC()
}
timestamp := t.Format(backupTimeFormat)
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
}
// openExistingOrNew opens the logfile if it exists and if the current write
// would not put it over MaxSize. If there is no such file or the write would
// put it over the MaxSize, a new file is created.
func (l *Logger) openExistingOrNew(writeLen int) error {
filename := l.filename()
info, err := os_Stat(filename)
if os.IsNotExist(err) {
return l.openNew()
}
if err != nil {
return fmt.Errorf("error getting log file info: %s", err)
}
if info.Size()+int64(writeLen) >= l.max() {
return l.rotate()
}
file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
if err != nil {
// if we fail to open the old log file for some reason, just ignore
// it and open a new log file.
return l.openNew()
}
l.file = file
l.size = info.Size()
return nil
}
// genFilename generates the name of the logfile from the current time.
func (l *Logger) filename() string {
if l.Filename != "" {
return l.Filename
}
name := filepath.Base(os.Args[0]) + "-lumberjack.log"
return filepath.Join(os.TempDir(), name)
}
// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as
// none of them are older than MaxAge.
func (l *Logger) cleanup() error {
if l.MaxBackups == 0 && l.MaxAge == 0 {
return nil
}
files, err := l.oldLogFiles()
if err != nil {
return err
}
var deletes []logInfo
if l.MaxBackups > 0 && l.MaxBackups < len(files) {
deletes = files[l.MaxBackups:]
files = files[:l.MaxBackups]
}
if l.MaxAge > 0 {
diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
cutoff := currentTime().Add(-1 * diff)
for _, f := range files {
if f.timestamp.Before(cutoff) {
deletes = append(deletes, f)
}
}
}
if len(deletes) == 0 {
return nil
}
go deleteAll(l.dir(), deletes)
return nil
}
func deleteAll(dir string, files []logInfo) {
// remove files on a separate goroutine
for _, f := range files {
// what am I going to do, log this?
_ = os.Remove(filepath.Join(dir, f.Name()))
}
}
// oldLogFiles returns the list of backup log files stored in the same
// directory as the current log file, sorted by ModTime
func (l *Logger) oldLogFiles() ([]logInfo, error) {
files, err := ioutil.ReadDir(l.dir())
if err != nil {
return nil, fmt.Errorf("can't read log file directory: %s", err)
}
logFiles := []logInfo{}
prefix, ext := l.prefixAndExt()
for _, f := range files {
if f.IsDir() {
continue
}
name := l.timeFromName(f.Name(), prefix, ext)
if name == "" {
continue
}
t, err := time.Parse(backupTimeFormat, name)
if err == nil {
logFiles = append(logFiles, logInfo{t, f})
}
// error parsing means that the suffix at the end was not generated
// by lumberjack, and therefore it's not a backup file.
}
sort.Sort(byFormatTime(logFiles))
return logFiles, nil
}
// timeFromName extracts the formatted time from the filename by stripping off
// the filename's prefix and extension. This prevents someone's filename from
// confusing time.parse.
func (l *Logger) timeFromName(filename, prefix, ext string) string {
if !strings.HasPrefix(filename, prefix) {
return ""
}
filename = filename[len(prefix):]
if !strings.HasSuffix(filename, ext) {
return ""
}
filename = filename[:len(filename)-len(ext)]
return filename
}
// max returns the maximum size in bytes of log files before rolling.
func (l *Logger) max() int64 {
if l.MaxSize == 0 {
return int64(defaultMaxSize * megabyte)
}
return int64(l.MaxSize) * int64(megabyte)
}
// dir returns the directory for the current filename.
func (l *Logger) dir() string {
return filepath.Dir(l.filename())
}
// prefixAndExt returns the filename part and extension part from the Logger's
// filename.
func (l *Logger) prefixAndExt() (prefix, ext string) {
filename := filepath.Base(l.filename())
ext = filepath.Ext(filename)
prefix = filename[:len(filename)-len(ext)] + "-"
return prefix, ext
}
// logInfo is a convenience struct to return the filename and its embedded
// timestamp.
type logInfo struct {
timestamp time.Time
os.FileInfo
}
// byFormatTime sorts by newest time formatted in the name.
type byFormatTime []logInfo
func (b byFormatTime) Less(i, j int) bool {
return b[i].timestamp.After(b[j].timestamp)
}
func (b byFormatTime) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
func (b byFormatTime) Len() int {
return len(b)
}

View File

@@ -0,0 +1,690 @@
package lumberjack
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/BurntSushi/toml"
"gopkg.in/yaml.v1"
)
// !!!NOTE!!!
//
// Running these tests in parallel will almost certainly cause sporadic (or even
// regular) failures, because they're all messing with the same global variable
// that controls the logic's mocked time.Now. So... don't do that.
// Since all the tests uses the time to determine filenames etc, we need to
// control the wall clock as much as possible, which means having a wall clock
// that doesn't change unless we want it to.
var fakeCurrentTime = time.Now()
func fakeTime() time.Time {
return fakeCurrentTime
}
func TestNewFile(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestNewFile", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(logFile(dir), n, t)
fileCount(dir, 1, t)
}
func TestOpenExisting(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestOpenExisting", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
data := []byte("foo!")
err := ioutil.WriteFile(filename, data, 0644)
isNil(err, t)
existsWithLen(filename, len(data), t)
l := &Logger{
Filename: filename,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
// make sure the file got appended
existsWithLen(filename, len(data)+n, t)
// make sure no other files were created
fileCount(dir, 1, t)
}
func TestWriteTooLong(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestWriteTooLong", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
MaxSize: 5,
}
defer l.Close()
b := []byte("booooooooooooooo!")
n, err := l.Write(b)
notNil(err, t)
equals(0, n, t)
equals(err.Error(),
fmt.Sprintf("write length %d exceeds maximum file size %d", len(b), l.MaxSize), t)
_, err = os.Stat(logFile(dir))
assert(os.IsNotExist(err), t, "File exists, but should not have been created")
}
func TestMakeLogDir(t *testing.T) {
currentTime = fakeTime
dir := time.Now().Format("TestMakeLogDir" + backupTimeFormat)
dir = filepath.Join(os.TempDir(), dir)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(logFile(dir), n, t)
fileCount(dir, 1, t)
}
func TestDefaultFilename(t *testing.T) {
currentTime = fakeTime
dir := os.TempDir()
filename := filepath.Join(dir, filepath.Base(os.Args[0])+"-lumberjack.log")
defer os.Remove(filename)
l := &Logger{}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
}
func TestAutoRotate(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestAutoRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
newFakeTime()
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// the old logfile should be moved aside and the main logfile should have
// only the last write in it.
existsWithLen(filename, n, t)
// the backup file will use the current fake time and have the old contents.
existsWithLen(backupFile(dir), len(b), t)
fileCount(dir, 2, t)
}
func TestFirstWriteRotate(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestFirstWriteRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
}
defer l.Close()
start := []byte("boooooo!")
err := ioutil.WriteFile(filename, start, 0600)
isNil(err, t)
newFakeTime()
// this would make us rotate
b := []byte("fooo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
existsWithLen(backupFile(dir), len(start), t)
fileCount(dir, 2, t)
}
func TestMaxBackups(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestMaxBackups", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxBackups: 1,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
newFakeTime()
// this will put us over the max
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
secondFilename := backupFile(dir)
existsWithLen(secondFilename, len(b), t)
// make sure the old file still exists with the same size.
existsWithLen(filename, n, t)
fileCount(dir, 2, t)
newFakeTime()
// this will make us rotate again
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
thirdFilename := backupFile(dir)
existsWithLen(thirdFilename, len(b2), t)
existsWithLen(filename, n, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// should only have two files in the dir still
fileCount(dir, 2, t)
// second file name should still exist
existsWithLen(thirdFilename, len(b2), t)
// should have deleted the first backup
notExist(secondFilename, t)
// now test that we don't delete directories or non-logfile files
newFakeTime()
// create a file that is close to but different from the logfile name.
// It shouldn't get caught by our deletion filters.
notlogfile := logFile(dir) + ".foo"
err = ioutil.WriteFile(notlogfile, []byte("data"), 0644)
isNil(err, t)
// Make a directory that exactly matches our log file filters... it still
// shouldn't get caught by the deletion filter since it's a directory.
notlogfiledir := backupFile(dir)
err = os.Mkdir(notlogfiledir, 0700)
isNil(err, t)
newFakeTime()
// this will make us rotate again
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
fourthFilename := backupFile(dir)
existsWithLen(fourthFilename, len(b2), t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// We should have four things in the directory now - the 2 log files, the
// not log file, and the directory
fileCount(dir, 4, t)
// third file name should still exist
existsWithLen(filename, n, t)
existsWithLen(fourthFilename, len(b2), t)
// should have deleted the first filename
notExist(thirdFilename, t)
// the not-a-logfile should still exist
exists(notlogfile, t)
// the directory
exists(notlogfiledir, t)
}
func TestCleanupExistingBackups(t *testing.T) {
// test that if we start with more backup files than we're supposed to have
// in total, that extra ones get cleaned up when we rotate.
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestCleanupExistingBackups", t)
defer os.RemoveAll(dir)
// make 3 backup files
data := []byte("data")
backup := backupFile(dir)
err := ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
newFakeTime()
backup = backupFile(dir)
err = ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
newFakeTime()
backup = backupFile(dir)
err = ioutil.WriteFile(backup, data, 0644)
isNil(err, t)
// now create a primary log file with some data
filename := logFile(dir)
err = ioutil.WriteFile(filename, data, 0644)
isNil(err, t)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxBackups: 1,
}
defer l.Close()
newFakeTime()
b2 := []byte("foooooo!")
n, err := l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(time.Millisecond * 10)
// now we should only have 2 files left - the primary and one backup
fileCount(dir, 2, t)
}
func TestMaxAge(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestMaxAge", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxSize: 10,
MaxAge: 1,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
// two days later
newFakeTime()
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
existsWithLen(backupFile(dir), len(b), t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// We should still have 2 log files, since the most recent backup was just
// created.
fileCount(dir, 2, t)
existsWithLen(filename, len(b2), t)
// we should have deleted the old file due to being too old
existsWithLen(backupFile(dir), len(b), t)
// two days later
newFakeTime()
b3 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b3), n, t)
existsWithLen(backupFile(dir), len(b2), t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
// We should have 2 log files - the main log file, and the most recent
// backup. The earlier backup is past the cutoff and should be gone.
fileCount(dir, 2, t)
existsWithLen(filename, len(b3), t)
// we should have deleted the old file due to being too old
existsWithLen(backupFile(dir), len(b2), t)
}
func TestOldLogFiles(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestOldLogFiles", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
data := []byte("data")
err := ioutil.WriteFile(filename, data, 07)
isNil(err, t)
// This gives us a time with the same precision as the time we get from the
// timestamp in the name.
t1, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
isNil(err, t)
backup := backupFile(dir)
err = ioutil.WriteFile(backup, data, 07)
isNil(err, t)
newFakeTime()
t2, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat))
isNil(err, t)
backup2 := backupFile(dir)
err = ioutil.WriteFile(backup2, data, 07)
isNil(err, t)
l := &Logger{Filename: filename}
files, err := l.oldLogFiles()
isNil(err, t)
equals(2, len(files), t)
// should be sorted by newest file first, which would be t2
equals(t2, files[0].timestamp, t)
equals(t1, files[1].timestamp, t)
}
func TestTimeFromName(t *testing.T) {
l := &Logger{Filename: "/var/log/myfoo/foo.log"}
prefix, ext := l.prefixAndExt()
val := l.timeFromName("foo-2014-05-04T14-44-33.555.log", prefix, ext)
equals("2014-05-04T14-44-33.555", val, t)
val = l.timeFromName("foo-2014-05-04T14-44-33.555", prefix, ext)
equals("", val, t)
val = l.timeFromName("2014-05-04T14-44-33.555.log", prefix, ext)
equals("", val, t)
val = l.timeFromName("foo.log", prefix, ext)
equals("", val, t)
}
func TestLocalTime(t *testing.T) {
currentTime = fakeTime
megabyte = 1
dir := makeTempDir("TestLocalTime", t)
defer os.RemoveAll(dir)
l := &Logger{
Filename: logFile(dir),
MaxSize: 10,
LocalTime: true,
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
b2 := []byte("fooooooo!")
n2, err := l.Write(b2)
isNil(err, t)
equals(len(b2), n2, t)
existsWithLen(logFile(dir), n2, t)
existsWithLen(backupFileLocal(dir), n, t)
}
func TestRotate(t *testing.T) {
currentTime = fakeTime
dir := makeTempDir("TestRotate", t)
defer os.RemoveAll(dir)
filename := logFile(dir)
l := &Logger{
Filename: filename,
MaxBackups: 1,
MaxSize: 100, // megabytes
}
defer l.Close()
b := []byte("boo!")
n, err := l.Write(b)
isNil(err, t)
equals(len(b), n, t)
existsWithLen(filename, n, t)
fileCount(dir, 1, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
filename2 := backupFile(dir)
existsWithLen(filename2, n, t)
existsWithLen(filename, 0, t)
fileCount(dir, 2, t)
newFakeTime()
err = l.Rotate()
isNil(err, t)
// we need to wait a little bit since the files get deleted on a different
// goroutine.
<-time.After(10 * time.Millisecond)
filename3 := backupFile(dir)
existsWithLen(filename3, 0, t)
existsWithLen(filename, 0, t)
fileCount(dir, 2, t)
b2 := []byte("foooooo!")
n, err = l.Write(b2)
isNil(err, t)
equals(len(b2), n, t)
// this will use the new fake time
existsWithLen(filename, n, t)
}
func TestJson(t *testing.T) {
data := []byte(`
{
"filename": "foo",
"maxsize": 5,
"maxage": 10,
"maxbackups": 3,
"localtime": true
}`[1:])
l := Logger{}
err := json.Unmarshal(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
}
func TestYaml(t *testing.T) {
data := []byte(`
filename: foo
maxsize: 5
maxage: 10
maxbackups: 3
localtime: true`[1:])
l := Logger{}
err := yaml.Unmarshal(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
}
func TestToml(t *testing.T) {
data := `
filename = "foo"
maxsize = 5
maxage = 10
maxbackups = 3
localtime = true`[1:]
l := Logger{}
md, err := toml.Decode(data, &l)
isNil(err, t)
equals("foo", l.Filename, t)
equals(5, l.MaxSize, t)
equals(10, l.MaxAge, t)
equals(3, l.MaxBackups, t)
equals(true, l.LocalTime, t)
equals(0, len(md.Undecoded()), t)
}
// makeTempDir creates a file with a semi-unique name in the OS temp directory.
// It should be based on the name of the test, to keep parallel tests from
// colliding, and must be cleaned up after the test is finished.
func makeTempDir(name string, t testing.TB) string {
dir := time.Now().Format(name + backupTimeFormat)
dir = filepath.Join(os.TempDir(), dir)
isNilUp(os.Mkdir(dir, 0777), t, 1)
return dir
}
// existsWithLen checks that the given file exists and has the correct length.
func existsWithLen(path string, length int, t testing.TB) {
info, err := os.Stat(path)
isNilUp(err, t, 1)
equalsUp(int64(length), info.Size(), t, 1)
}
// logFile returns the log file name in the given directory for the current fake
// time.
func logFile(dir string) string {
return filepath.Join(dir, "foobar.log")
}
func backupFile(dir string) string {
return filepath.Join(dir, "foobar-"+fakeTime().UTC().Format(backupTimeFormat)+".log")
}
func backupFileLocal(dir string) string {
return filepath.Join(dir, "foobar-"+fakeTime().Format(backupTimeFormat)+".log")
}
// logFileLocal returns the log file name in the given directory for the current
// fake time using the local timezone.
func logFileLocal(dir string) string {
return filepath.Join(dir, fakeTime().Format(backupTimeFormat))
}
// fileCount checks that the number of files in the directory is exp.
func fileCount(dir string, exp int, t testing.TB) {
files, err := ioutil.ReadDir(dir)
isNilUp(err, t, 1)
// Make sure no other files were created.
equalsUp(exp, len(files), t, 1)
}
// newFakeTime sets the fake "current time" to two days later.
func newFakeTime() {
fakeCurrentTime = fakeCurrentTime.Add(time.Hour * 24 * 2)
}
func notExist(path string, t testing.TB) {
_, err := os.Stat(path)
assertUp(os.IsNotExist(err), t, 1, "expected to get os.IsNotExist, but instead got %v", err)
}
func exists(path string, t testing.TB) {
_, err := os.Stat(path)
assertUp(err == nil, t, 1, "expected file to exist, but got error from os.Stat: %v", err)
}

View File

@@ -0,0 +1,27 @@
// +build linux
package lumberjack_test
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/natefinch/lumberjack"
)
// Example of how to rotate in response to SIGHUP.
func ExampleLogger_Rotate() {
l := &lumberjack.Logger{}
log.SetOutput(l)
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for {
<-c
l.Rotate()
}
}()
}

View File

@@ -0,0 +1,91 @@
package lumberjack
import (
"fmt"
"path/filepath"
"reflect"
"runtime"
"testing"
)
// assert will log the given message if condition is false.
func assert(condition bool, t testing.TB, msg string, v ...interface{}) {
assertUp(condition, t, 1, msg, v...)
}
// assertUp is like assert, but used inside helper functions, to ensure that
// the file and line number reported by failures corresponds to one or more
// levels up the stack.
func assertUp(condition bool, t testing.TB, caller int, msg string, v ...interface{}) {
if !condition {
_, file, line, _ := runtime.Caller(caller + 1)
v = append([]interface{}{filepath.Base(file), line}, v...)
fmt.Printf("%s:%d: "+msg+"\n", v...)
t.FailNow()
}
}
// equals tests that the two values are equal according to reflect.DeepEqual.
func equals(exp, act interface{}, t testing.TB) {
equalsUp(exp, act, t, 1)
}
// equalsUp is like equals, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func equalsUp(exp, act interface{}, t testing.TB, caller int) {
if !reflect.DeepEqual(exp, act) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: exp: %v (%T), got: %v (%T)\n",
filepath.Base(file), line, exp, exp, act, act)
t.FailNow()
}
}
// isNil reports a failure if the given value is not nil. Note that values
// which cannot be nil will always fail this check.
func isNil(obtained interface{}, t testing.TB) {
isNilUp(obtained, t, 1)
}
// isNilUp is like isNil, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func isNilUp(obtained interface{}, t testing.TB, caller int) {
if !_isNil(obtained) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: expected nil, got: %v\n", filepath.Base(file), line, obtained)
t.FailNow()
}
}
// notNil reports a failure if the given value is nil.
func notNil(obtained interface{}, t testing.TB) {
notNilUp(obtained, t, 1)
}
// notNilUp is like notNil, but used inside helper functions, to ensure that the
// file and line number reported by failures corresponds to one or more levels
// up the stack.
func notNilUp(obtained interface{}, t testing.TB, caller int) {
if _isNil(obtained) {
_, file, line, _ := runtime.Caller(caller + 1)
fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained)
t.FailNow()
}
}
// _isNil is a helper function for isNil and notNil, and should not be used
// directly.
func _isNil(obtained interface{}) bool {
if obtained == nil {
return true
}
switch v := reflect.ValueOf(obtained); v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}

View File

@@ -5637,6 +5637,152 @@
} }
] ]
}, },
{
"path": "/api/v1/namespaces/{namespace}/pods/{name}/attach",
"description": "API at /api/v1 version v1",
"operations": [
{
"type": "string",
"method": "GET",
"summary": "connect GET requests to attach of Pod",
"nickname": "connectGetNamespacedPodAttach",
"parameters": [
{
"type": "boolean",
"paramType": "query",
"name": "stdin",
"description": "redirect the standard input stream of the pod for this call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stdout",
"description": "redirect the standard output stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stderr",
"description": "redirect the standard error stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "tty",
"description": "allocate a terminal for this attach call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "query",
"name": "container",
"description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Pod",
"required": true,
"allowMultiple": false
}
],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
},
{
"type": "string",
"method": "POST",
"summary": "connect POST requests to attach of Pod",
"nickname": "connectPostNamespacedPodAttach",
"parameters": [
{
"type": "boolean",
"paramType": "query",
"name": "stdin",
"description": "redirect the standard input stream of the pod for this call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stdout",
"description": "redirect the standard output stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stderr",
"description": "redirect the standard error stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "tty",
"description": "allocate a terminal for this attach call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "query",
"name": "container",
"description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "namespace",
"description": "object name and auth scope, such as for teams and projects",
"required": true,
"allowMultiple": false
},
{
"type": "string",
"paramType": "path",
"name": "name",
"description": "name of the Pod",
"required": true,
"allowMultiple": false
}
],
"produces": [
"*/*"
],
"consumes": [
"*/*"
]
}
]
},
{ {
"path": "/api/v1/namespaces/{namespace}/pods/{name}/binding", "path": "/api/v1/namespaces/{namespace}/pods/{name}/binding",
"description": "API at /api/v1 version v1", "description": "API at /api/v1 version v1",

View File

@@ -5,7 +5,8 @@ To build Kubernetes you need to have access to a Docker installation through eit
## Requirements ## Requirements
1. Be running Docker. 2 options supported/tested: 1. Be running Docker. 2 options supported/tested:
1. **Mac OS X** The best way to go is to use `boot2docker`. See instructions [here](https://docs.docker.com/installation/mac/). 1. **Mac OS X** The best way to go is to use `boot2docker`. See instructions [here](https://docs.docker.com/installation/mac/).
**Note**: You will want to set the boot2docker vm to have at least 3GB of initial memory or building will likely fail. (See: [#11852]( https://github.com/GoogleCloudPlatform/kubernetes/issues/11852))
2. **Linux with local Docker** Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS. The scripts here assume that they are using a local Docker server and that they can "reach around" docker and grab results directly from the file system. 2. **Linux with local Docker** Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS. The scripts here assume that they are using a local Docker server and that they can "reach around" docker and grab results directly from the file system.
2. Have python installed. Pretty much it is installed everywhere at this point so you can probably ignore this. 2. Have python installed. Pretty much it is installed everywhere at this point so you can probably ignore this.
3. *Optional* For uploading your release to Google Cloud Storage, have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed and configured. 3. *Optional* For uploading your release to Google Cloud Storage, have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed and configured.

View File

@@ -52,6 +52,7 @@ fi
release_branch="release-${VERSION_MAJOR}.${VERSION_MINOR}" release_branch="release-${VERSION_MAJOR}.${VERSION_MINOR}"
current_branch=$(git rev-parse --abbrev-ref HEAD) current_branch=$(git rev-parse --abbrev-ref HEAD)
head_commit=$(git rev-parse --short HEAD)
if [[ "${VERSION_PATCH}" != "0" ]]; then if [[ "${VERSION_PATCH}" != "0" ]]; then
# sorry, no going back in time, pull latest from upstream # sorry, no going back in time, pull latest from upstream
@@ -93,15 +94,13 @@ echo "+++ Running ./versionize-docs"
${KUBE_ROOT}/build/versionize-docs.sh ${NEW_VERSION} ${KUBE_ROOT}/build/versionize-docs.sh ${NEW_VERSION}
git commit -am "Versioning docs and examples for ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}" git commit -am "Versioning docs and examples for ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}"
dochash=$(git log -n1 --format=%H)
VERSION_FILE="${KUBE_ROOT}/pkg/version/base.go" VERSION_FILE="${KUBE_ROOT}/pkg/version/base.go"
GIT_MINOR="${VERSION_MINOR}.${VERSION_PATCH}" GIT_MINOR="${VERSION_MINOR}.${VERSION_PATCH}"
echo "+++ Updating to ${NEW_VERSION}" echo "+++ Updating to ${NEW_VERSION}"
$SED -ri -e "s/gitMajor\s+string = \"[^\"]*\"/gitMajor string = \"${VERSION_MAJOR}\"/" "${VERSION_FILE}" $SED -ri -e "s/gitMajor\s+string = \"[^\"]*\"/gitMajor string = \"${VERSION_MAJOR}\"/" "${VERSION_FILE}"
$SED -ri -e "s/gitMinor\s+string = \"[^\"]*\"/gitMinor string = \"${GIT_MINOR}\"/" "${VERSION_FILE}" $SED -ri -e "s/gitMinor\s+string = \"[^\"]*\"/gitMinor string = \"${GIT_MINOR}\"/" "${VERSION_FILE}"
$SED -ri -e "s/gitVersion\s+string = \"[^\"]*\"/gitVersion string = \"$NEW_VERSION\"/" "${VERSION_FILE}" $SED -ri -e "s/gitVersion\s+string = \"[^\"]*\"/gitVersion string = \"$NEW_VERSION-${release_branch}+\$Format:%h\$\"/" "${VERSION_FILE}"
gofmt -s -w "${VERSION_FILE}" gofmt -s -w "${VERSION_FILE}"
echo "+++ Committing version change" echo "+++ Committing version change"
@@ -110,35 +109,30 @@ git commit -m "Kubernetes version $NEW_VERSION"
echo "+++ Tagging version" echo "+++ Tagging version"
git tag -a -m "Kubernetes version $NEW_VERSION" "${NEW_VERSION}" git tag -a -m "Kubernetes version $NEW_VERSION" "${NEW_VERSION}"
newtag=$(git rev-parse --short HEAD)
echo "+++ Updating to ${NEW_VERSION}-dev" if [[ "${VERSION_PATCH}" == "0" ]]; then
$SED -ri -e "s/gitMajor\s+string = \"[^\"]*\"/gitMajor string = \"${VERSION_MAJOR}\"/" "${VERSION_FILE}" declare -r alpha_ver="v${VERSION_MAJOR}.$((${VERSION_MINOR}+1)).0-alpha.0"
$SED -ri -e "s/gitMinor\s+string = \"[^\"]*\"/gitMinor string = \"${GIT_MINOR}\+\"/" "${VERSION_FILE}" git tag -a -m "Kubernetes pre-release branch ${alpha-ver}" "${alpha_ver}" "${head_commit}"
$SED -ri -e "s/gitVersion\s+string = \"[^\"]*\"/gitVersion string = \"$NEW_VERSION-dev\"/" "${VERSION_FILE}" fi
gofmt -s -w "${VERSION_FILE}"
echo "+++ Committing version change"
git add "${VERSION_FILE}"
git commit -m "Kubernetes version ${NEW_VERSION}-dev"
echo "" echo ""
echo "Success you must now:" echo "Success you must now:"
echo "" echo ""
echo "- Push the tag:" echo "- Push the tag:"
echo " git push ${push_url} v${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}" echo " git push ${push_url} v${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}"
echo " - Please note you are pushing the tag live BEFORE your PRs."
echo " You need this so the builds pick up the right tag info (and so your reviewers can see it)."
echo " If something goes wrong further down please fix the tag!"
echo " Either delete this tag and give up, fix the tag before your next PR,"
echo " or find someone who can help solve the tag problem!"
echo ""
if [[ "${VERSION_PATCH}" == "0" ]]; then if [[ "${VERSION_PATCH}" == "0" ]]; then
echo "- Send branch: ${current_branch} as a PR to ${push_url}/master" echo "- Push the alpha tag:"
echo " For major/minor releases, this gets the branch tag merged and changes the version numbers." echo " git push ${push_url} ${alpha_ver}"
echo "- Push the new release branch:" echo "- Push the new release branch:"
echo " git push ${push_url} ${current_branch}:${release_branch}" echo " git push ${push_url} ${current_branch}:${release_branch}"
echo "- DO NOTHING TO MASTER. You were done with master when you pushed the alpha tag."
else else
echo "- Send branch: ${current_branch} as a PR to ${release_branch} <-- NOTE THIS" echo "- Send branch: ${current_branch} as a PR to ${release_branch} <-- NOTE THIS"
echo " Get someone to review and merge that PR" echo "- In the contents of the PR, include the PRs in the release:"
echo " hack/cherry_pick_list.sh ${current_branch}^1"
echo " This helps cross-link PRs to patch releases they're part of in GitHub."
echo "- Have someone review the PR. This is a mechanical review to ensure it contains"
echo " the ${NEW_VERSION} commit, which was tagged at ${newtag}."
fi fi

View File

@@ -33,3 +33,11 @@ spec:
- --sink=gcl - --sink=gcl
- --poll_duration=2m - --poll_duration=2m
- --stats_resolution=1m - --stats_resolution=1m
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"

View File

@@ -33,3 +33,11 @@ spec:
- --sink=influxdb:http://monitoring-influxdb:8086 - --sink=influxdb:http://monitoring-influxdb:8086
- --poll_duration=2m - --poll_duration=2m
- --stats_resolution=1m - --stats_resolution=1m
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"

View File

@@ -37,7 +37,7 @@ import (
kclientcmd "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" kclientcmd "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
kframework "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework" kframework "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework"
kSelector "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" kSelector "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
tools "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
etcd "github.com/coreos/go-etcd/etcd" etcd "github.com/coreos/go-etcd/etcd"
@@ -354,7 +354,7 @@ func newEtcdClient(etcdServer string) (*etcd.Client, error) {
err error err error
) )
for attempt := 1; attempt <= maxConnectAttempts; attempt++ { for attempt := 1; attempt <= maxConnectAttempts; attempt++ {
if _, err = tools.GetEtcdVersion(etcdServer); err == nil { if _, err = etcdstorage.GetEtcdVersion(etcdServer); err == nil {
break break
} }
if attempt == maxConnectAttempts { if attempt == maxConnectAttempts {

View File

@@ -1,7 +1,7 @@
.PHONY: build push .PHONY: build push
IMAGE = fluentd-elasticsearch IMAGE = fluentd-elasticsearch
TAG = 1.6 TAG = 1.7
build: build:
docker build -t gcr.io/google_containers/$(IMAGE):$(TAG) . docker build -t gcr.io/google_containers/$(IMAGE):$(TAG) .

View File

@@ -94,20 +94,13 @@
tag docker tag docker
</source> </source>
<match kubernetes.**> <source>
type elasticsearch type tail
log_level info format none
include_tag_key true path /varlog/etcd.log
host elasticsearch-logging pos_file /varlog/es-etcd.log.pos
port 9200 tag etcd
logstash_format true </source>
flush_interval 5s
# Never wait longer than 5 minutes between retries.
max_retry_wait 300
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
</match>
>>>>>>> Move things into a 'kube-system' namespace.
<source> <source>
type tail type tail

View File

@@ -15,7 +15,7 @@
.PHONY: kbuild kpush .PHONY: kbuild kpush
TAG = 1.9 TAG = 1.10
# Rules for building the test image for deployment to Dockerhub with user kubernetes. # Rules for building the test image for deployment to Dockerhub with user kubernetes.

View File

@@ -79,6 +79,14 @@
tag docker tag docker
</source> </source>
<source>
type tail
format none
path /varlog/etcd.log
pos_file /varlog/gcp-etcd.log.pos
tag etcd
</source>
<source> <source>
type tail type tail
format none format none

View File

@@ -1,7 +1,7 @@
# SaltStack configuration # SaltStack configuration
This is the root of the SaltStack configuration for Kubernetes. A high This is the root of the SaltStack configuration for Kubernetes. A high
level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](../../docs/salt.md) level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](../../docs/admin/salt.md)
This SaltStack configuration currently applies to default This SaltStack configuration currently applies to default
configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and

View File

@@ -6,7 +6,7 @@ metadata:
spec: spec:
containers: containers:
- name: fluentd-elasticsearch - name: fluentd-elasticsearch
image: gcr.io/google_containers/fluentd-elasticsearch:1.6 image: gcr.io/google_containers/fluentd-elasticsearch:1.7
resources: resources:
limits: limits:
cpu: 100m cpu: 100m

View File

@@ -6,7 +6,7 @@ metadata:
spec: spec:
containers: containers:
- name: fluentd-cloud-logging - name: fluentd-cloud-logging
image: gcr.io/google_containers/fluentd-gcp:1.9 image: gcr.io/google_containers/fluentd-gcp:1.10
resources: resources:
limits: limits:
cpu: 100m cpu: 100m

View File

@@ -41,6 +41,12 @@
{% endif -%} {% endif -%}
{% set config = "--config=/etc/kubernetes/manifests" -%} {% set config = "--config=/etc/kubernetes/manifests" -%}
{% set manifest_url = "" -%}
{% if grains['roles'][0] == 'kubernetes-master' and grains.cloud in ['gce'] -%}
{% set manifest_url = "--manifest-url=http://metadata.google.internal/computeMetadata/v1/instance/attributes/google-container-manifest --manifest-url-header=Metadata-Flavor:Google" -%}
{% endif -%}
{% set hostname_override = "" -%} {% set hostname_override = "" -%}
{% if grains.hostname_override is defined -%} {% if grains.hostname_override is defined -%}
{% set hostname_override = " --hostname_override=" + grains.hostname_override -%} {% set hostname_override = " --hostname_override=" + grains.hostname_override -%}
@@ -84,4 +90,4 @@
{% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %} {% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %}
{% endif %} {% endif %}
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}}" DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}}"

View File

@@ -42,6 +42,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller"
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication" replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication"
explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor"
@@ -132,10 +133,14 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion}) cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion})
etcdStorage, err := master.NewEtcdStorage(etcdClient, "", etcdtest.PathPrefix()) etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix())
if err != nil { if err != nil {
glog.Fatalf("Unable to get etcd storage: %v", err) glog.Fatalf("Unable to get etcd storage: %v", err)
} }
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix())
if err != nil {
glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
}
// Master // Master
host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://")) host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
@@ -155,11 +160,13 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
// Create a master and install handlers into mux. // Create a master and install handlers into mux.
m := master.New(&master.Config{ m := master.New(&master.Config{
DatabaseStorage: etcdStorage, DatabaseStorage: etcdStorage,
ExpDatabaseStorage: expEtcdStorage,
KubeletClient: fakeKubeletClient{}, KubeletClient: fakeKubeletClient{},
EnableCoreControllers: true, EnableCoreControllers: true,
EnableLogsSupport: false, EnableLogsSupport: false,
EnableProfiling: true, EnableProfiling: true,
APIPrefix: "/api", APIPrefix: "/api",
ExpAPIPrefix: "/experimental",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(), Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(), AdmissionControl: admit.NewAlwaysAdmit(),
ReadWritePort: portNumber, ReadWritePort: portNumber,

View File

@@ -32,12 +32,16 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/admission" "github.com/GoogleCloudPlatform/kubernetes/pkg/admission"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities" "github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master" "github.com/GoogleCloudPlatform/kubernetes/pkg/master"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports" "github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports"
"github.com/GoogleCloudPlatform/kubernetes/pkg/storage"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
forked "github.com/GoogleCloudPlatform/kubernetes/third_party/forked/coreos/go-etcd/etcd" forked "github.com/GoogleCloudPlatform/kubernetes/third_party/forked/coreos/go-etcd/etcd"
@@ -70,7 +74,9 @@ type APIServer struct {
TLSPrivateKeyFile string TLSPrivateKeyFile string
CertDirectory string CertDirectory string
APIPrefix string APIPrefix string
ExpAPIPrefix string
StorageVersion string StorageVersion string
ExpStorageVersion string
CloudProvider string CloudProvider string
CloudConfigFile string CloudConfigFile string
EventTTL time.Duration EventTTL time.Duration
@@ -114,6 +120,7 @@ func NewAPIServer() *APIServer {
APIRate: 10.0, APIRate: 10.0,
APIBurst: 200, APIBurst: 200,
APIPrefix: "/api", APIPrefix: "/api",
ExpAPIPrefix: "/experimental",
EventTTL: 1 * time.Hour, EventTTL: 1 * time.Hour,
AuthorizationMode: "AlwaysAllow", AuthorizationMode: "AlwaysAllow",
AdmissionControl: "AlwaysAdmit", AdmissionControl: "AlwaysAdmit",
@@ -171,6 +178,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.CertDirectory, "cert-dir", s.CertDirectory, "The directory where the TLS certs are located (by default /var/run/kubernetes). "+ fs.StringVar(&s.CertDirectory, "cert-dir", s.CertDirectory, "The directory where the TLS certs are located (by default /var/run/kubernetes). "+
"If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.")
fs.StringVar(&s.APIPrefix, "api-prefix", s.APIPrefix, "The prefix for API requests on the server. Default '/api'.") fs.StringVar(&s.APIPrefix, "api-prefix", s.APIPrefix, "The prefix for API requests on the server. Default '/api'.")
fs.StringVar(&s.ExpAPIPrefix, "experimental-prefix", s.ExpAPIPrefix, "The prefix for experimental API requests on the server. Default '/experimental'.")
fs.StringVar(&s.StorageVersion, "storage-version", s.StorageVersion, "The version to store resources with. Defaults to server preferred") fs.StringVar(&s.StorageVersion, "storage-version", s.StorageVersion, "The version to store resources with. Defaults to server preferred")
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
@@ -216,7 +224,7 @@ func (s *APIServer) verifyClusterIPFlags() {
} }
} }
func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersion string, pathPrefix string) (etcdStorage tools.StorageInterface, err error) { func newEtcd(etcdConfigFile string, etcdServerList util.StringList, interfacesFunc meta.VersionInterfacesFunc, defaultVersion, storageVersion, pathPrefix string) (etcdStorage storage.Interface, err error) {
var client tools.EtcdClient var client tools.EtcdClient
if etcdConfigFile != "" { if etcdConfigFile != "" {
client, err = etcd.NewClientFromFile(etcdConfigFile) client, err = etcd.NewClientFromFile(etcdConfigFile)
@@ -236,7 +244,10 @@ func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersi
client = etcdClient client = etcdClient
} }
return master.NewEtcdStorage(client, storageVersion, pathPrefix) if storageVersion == "" {
storageVersion = defaultVersion
}
return master.NewEtcdStorage(client, interfacesFunc, storageVersion, pathPrefix)
} }
// Run runs the specified APIServer. This should never exit. // Run runs the specified APIServer. This should never exit.
@@ -291,6 +302,10 @@ func (s *APIServer) Run(_ []string) error {
disableV1 := disableAllAPIs disableV1 := disableAllAPIs
disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1) disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1)
// "experimental/v1={true|false} allows users to enable/disable the experimental API.
// This takes preference over api/all, if specified.
enableExp := s.getRuntimeConfigValue("experimental/v1", false)
// TODO: expose same flags as client.BindClientConfigFlags but for a server // TODO: expose same flags as client.BindClientConfigFlags but for a server
clientConfig := &client.Config{ clientConfig := &client.Config{
Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
@@ -301,10 +316,14 @@ func (s *APIServer) Run(_ []string) error {
glog.Fatalf("Invalid server address: %v", err) glog.Fatalf("Invalid server address: %v", err)
} }
etcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, s.StorageVersion, s.EtcdPathPrefix) etcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, latest.InterfacesFor, latest.Version, s.StorageVersion, s.EtcdPathPrefix)
if err != nil { if err != nil {
glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err)
} }
expEtcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, explatest.InterfacesFor, explatest.Version, s.ExpStorageVersion, s.EtcdPathPrefix)
if err != nil {
glog.Fatalf("Invalid experimental storage version or misconfigured etcd: %v", err)
}
n := net.IPNet(s.ServiceClusterIPRange) n := net.IPNet(s.ServiceClusterIPRange)
@@ -359,7 +378,9 @@ func (s *APIServer) Run(_ []string) error {
} }
} }
config := &master.Config{ config := &master.Config{
DatabaseStorage: etcdStorage, DatabaseStorage: etcdStorage,
ExpDatabaseStorage: expEtcdStorage,
EventTTL: s.EventTTL, EventTTL: s.EventTTL,
KubeletClient: kubeletClient, KubeletClient: kubeletClient,
ServiceClusterIPRange: &n, ServiceClusterIPRange: &n,
@@ -370,6 +391,7 @@ func (s *APIServer) Run(_ []string) error {
EnableProfiling: s.EnableProfiling, EnableProfiling: s.EnableProfiling,
EnableIndex: true, EnableIndex: true,
APIPrefix: s.APIPrefix, APIPrefix: s.APIPrefix,
ExpAPIPrefix: s.ExpAPIPrefix,
CorsAllowedOriginList: s.CorsAllowedOriginList, CorsAllowedOriginList: s.CorsAllowedOriginList,
ReadWritePort: s.SecurePort, ReadWritePort: s.SecurePort,
PublicAddress: net.IP(s.AdvertiseAddress), PublicAddress: net.IP(s.AdvertiseAddress),
@@ -378,6 +400,7 @@ func (s *APIServer) Run(_ []string) error {
Authorizer: authorizer, Authorizer: authorizer,
AdmissionControl: admissionController, AdmissionControl: admissionController,
DisableV1: disableV1, DisableV1: disableV1,
EnableExp: enableExp,
MasterServiceNamespace: s.MasterServiceNamespace, MasterServiceNamespace: s.MasterServiceNamespace,
ClusterName: s.ClusterName, ClusterName: s.ClusterName,
ExternalHost: s.ExternalHost, ExternalHost: s.ExternalHost,

View File

@@ -319,7 +319,7 @@ func (s *KubeletServer) Run(_ []string) error {
mounter := mount.New() mounter := mount.New()
if s.Containerized { if s.Containerized {
glog.V(2).Info("Running kubelet in containerized mode (experimental)") glog.V(2).Info("Running kubelet in containerized mode (experimental)")
mounter = &mount.NsenterMounter{} mounter = mount.NewNsenterMounter()
} }
var dockerExecHandler dockertools.ExecHandler var dockerExecHandler dockertools.ExecHandler

View File

@@ -30,17 +30,20 @@ import (
kubeletapp "github.com/GoogleCloudPlatform/kubernetes/cmd/kubelet/app" kubeletapp "github.com/GoogleCloudPlatform/kubernetes/cmd/kubelet/app"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/servicecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/servicecontroller"
"github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication" "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication"
explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/master" "github.com/GoogleCloudPlatform/kubernetes/pkg/master"
"github.com/GoogleCloudPlatform/kubernetes/pkg/service" "github.com/GoogleCloudPlatform/kubernetes/pkg/service"
etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
@@ -78,14 +81,19 @@ func (h *delegateHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
func runApiServer(etcdClient tools.EtcdClient, addr net.IP, port int, masterServiceNamespace string) { func runApiServer(etcdClient tools.EtcdClient, addr net.IP, port int, masterServiceNamespace string) {
handler := delegateHandler{} handler := delegateHandler{}
etcdStorage, err := master.NewEtcdStorage(etcdClient, "", master.DefaultEtcdPathPrefix) etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, master.DefaultEtcdPathPrefix)
if err != nil { if err != nil {
glog.Fatalf("Unable to get etcd storage: %v", err) glog.Fatalf("Unable to get etcd storage: %v", err)
} }
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, master.DefaultEtcdPathPrefix)
if err != nil {
glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
}
// Create a master and install handlers into mux. // Create a master and install handlers into mux.
m := master.New(&master.Config{ m := master.New(&master.Config{
DatabaseStorage: etcdStorage, DatabaseStorage: etcdStorage,
ExpDatabaseStorage: expEtcdStorage,
KubeletClient: &client.HTTPKubeletClient{ KubeletClient: &client.HTTPKubeletClient{
Client: http.DefaultClient, Client: http.DefaultClient,
Config: &client.KubeletConfig{Port: 10250}, Config: &client.KubeletConfig{Port: 10250},
@@ -95,6 +103,7 @@ func runApiServer(etcdClient tools.EtcdClient, addr net.IP, port int, masterServ
EnableSwaggerSupport: true, EnableSwaggerSupport: true,
EnableProfiling: *enableProfiling, EnableProfiling: *enableProfiling,
APIPrefix: "/api", APIPrefix: "/api",
ExpAPIPrefix: "/experimental",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(), Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
ReadWritePort: port, ReadWritePort: port,
@@ -167,7 +176,7 @@ func main() {
defer util.FlushLogs() defer util.FlushLogs()
glog.Infof("Creating etcd client pointing to %v", *etcdServer) glog.Infof("Creating etcd client pointing to %v", *etcdServer)
etcdClient, err := tools.NewEtcdClientStartServerIfNecessary(*etcdServer) etcdClient, err := etcdstorage.NewEtcdClientStartServerIfNecessary(*etcdServer)
if err != nil { if err != nil {
glog.Fatalf("Failed to connect to etcd: %v", err) glog.Fatalf("Failed to connect to etcd: %v", err)
} }

22
cmd/mungedocs/README.md Normal file
View File

@@ -0,0 +1,22 @@
# Documentation Mungers
Basically this is like lint/gofmt for md docs.
It basically does the following:
- iterate over all files in the given doc root.
- for each file split it into a slice (mungeLines) of lines (mungeLine)
- a mungeline has metadata about each line typically determined by a 'fast' regex.
- metadata contains things like 'is inside a preformmatted block'
- contains a markdown header
- has a link to another file
- etc..
- if you have a really slow regex with a lot of backtracking you might want to write a fast one to limit how often you run the slow one.
- each munger is then called in turn
- they are given the mungeLines
- they create an entirely new set of mungeLines with their modifications
- the new set is returned
- the new set is then fed into the next munger.
- in the end we might commit the end mungeLines to the file or not (--verify)
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cmd/mungedocs/README.md?pixel)]()

View File

@@ -17,43 +17,42 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"fmt" "fmt"
"os" "strings"
"regexp"
) )
var ( const analyticsMungeTag = "GENERATED_ANALYTICS"
beginMungeExp = regexp.QuoteMeta(beginMungeTag("GENERATED_ANALYTICS")) const analyticsLinePrefix = "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/"
endMungeExp = regexp.QuoteMeta(endMungeTag("GENERATED_ANALYTICS"))
analyticsExp = regexp.QuoteMeta("[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/") +
"[^?]*" +
regexp.QuoteMeta("?pixel)]()")
// Matches the analytics blurb, with or without the munge headers. func updateAnalytics(fileName string, mlines mungeLines) (mungeLines, error) {
analyticsRE = regexp.MustCompile(`[\n]*` + analyticsExp + `[\n]?` + var out mungeLines
`|` + `[\n]*` + beginMungeExp + `[^<]*` + endMungeExp) fileName, err := makeRepoRelative(fileName, fileName)
) if err != nil {
return mlines, err
// This adds the analytics link to every .md file.
func checkAnalytics(fileName string, fileBytes []byte) (output []byte, err error) {
fileName = makeRepoRelative(fileName)
desired := fmt.Sprintf(`
`+beginMungeTag("GENERATED_ANALYTICS")+`
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/%s?pixel)]()
`+endMungeTag("GENERATED_ANALYTICS")+`
`, fileName)
if !analyticsRE.MatchString(desired) {
fmt.Printf("%q does not match %q", analyticsRE.String(), desired)
os.Exit(1)
} }
//output = replaceNonPreformattedRegexp(fileBytes, analyticsRE, func(in []byte) []byte {
output = analyticsRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte { link := fmt.Sprintf(analyticsLinePrefix+"%s?pixel)]()", fileName)
return []byte{} insertLines := getMungeLines(link)
}) mlines, err = removeMacroBlock(analyticsMungeTag, mlines)
output = bytes.TrimRight(output, "\n") if err != nil {
output = append(output, []byte(desired)...) return mlines, err
return output, nil }
// Remove floating analytics links not surrounded by the munge tags.
for _, mline := range mlines {
if mline.preformatted || mline.header || mline.beginTag || mline.endTag {
out = append(out, mline)
continue
}
if strings.HasPrefix(mline.data, analyticsLinePrefix) {
continue
}
out = append(out, mline)
}
out = appendMacroBlock(out, analyticsMungeTag)
out, err = updateMacroBlock(out, analyticsMungeTag, insertLines)
if err != nil {
return mlines, err
}
return out, nil
} }

View File

@@ -23,67 +23,71 @@ import (
) )
func TestAnalytics(t *testing.T) { func TestAnalytics(t *testing.T) {
b := beginMungeTag("GENERATED_ANALYTICS")
e := endMungeTag("GENERATED_ANALYTICS")
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{ {
"aoeu", "aoeu",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()", "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "aoeu" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n", e + "\n",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "\n" + "aoeu" + "\n" + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "\n" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n", e + "\n",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"prefix" + "\n" + "prefix" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + e +
"\n" + "suffix", "\n" + "suffix",
"prefix" + "\n" + "suffix" + "\n" + "\n" + "\n" + "prefix" + "\n" + "suffix" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
{ {
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n", e + "\n",
"aoeu" + "\n" + "\n" + "\n" + "aoeu" + "\n" + "\n" + "\n" +
beginMungeTag("GENERATED_ANALYTICS") + "\n" + b + "\n" +
"[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" +
endMungeTag("GENERATED_ANALYTICS") + "\n"}, e + "\n"},
} }
for _, c := range cases { for i, c := range cases {
out, err := checkAnalytics("path/to/file-name.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
out, err := updateAnalytics("path/to/file-name.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if string(out) != c.out { if !expected.Equal(out) {
t.Errorf("Expected \n\n%v\n\n but got \n\n%v\n\n", c.out, string(out)) t.Errorf("Case %d Expected \n\n%v\n\n but got \n\n%v\n\n", i, expected.String(), out.String())
} }
} }
} }

View File

@@ -17,15 +17,17 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"path"
"regexp" "regexp"
"strings" "strings"
) )
const exampleMungeTag = "EXAMPLE" const exampleToken = "EXAMPLE"
const exampleLineStart = "<!-- BEGIN MUNGE: EXAMPLE"
var exampleMungeTagRE = regexp.MustCompile(beginMungeTag(fmt.Sprintf("%s %s", exampleToken, `(([^ ])*.(yaml|json))`)))
// syncExamples updates all examples in markdown file. // syncExamples updates all examples in markdown file.
// //
@@ -43,75 +45,70 @@ const exampleMungeTag = "EXAMPLE"
// //
// [Download example](../../examples/guestbook/frontend-controller.yaml) // [Download example](../../examples/guestbook/frontend-controller.yaml)
// <!-- END MUNGE: EXAMPLE --> // <!-- END MUNGE: EXAMPLE -->
func syncExamples(filePath string, markdown []byte) ([]byte, error) { func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) {
// find the example syncer begin tag var err error
header := beginMungeTag(fmt.Sprintf("%s %s", exampleMungeTag, `(([^ ])*.(yaml|json))`)) type exampleTag struct {
exampleLinkRE := regexp.MustCompile(header) token string
lines := splitLines(markdown) linkText string
updatedMarkdown, err := updateExampleMacroBlock(filePath, lines, exampleLinkRE, endMungeTag(exampleMungeTag)) fileType string
if err != nil {
return updatedMarkdown, err
} }
return updatedMarkdown, nil exampleTags := []exampleTag{}
// collect all example Tags
for _, mline := range mlines {
if mline.preformatted || !mline.beginTag {
continue
}
line := mline.data
if !strings.HasPrefix(line, exampleLineStart) {
continue
}
match := exampleMungeTagRE.FindStringSubmatch(line)
if len(match) < 4 {
err = fmt.Errorf("Found unparsable EXAMPLE munge line %v", line)
return mlines, err
}
tag := exampleTag{
token: exampleToken + " " + match[1],
linkText: match[1],
fileType: match[3],
}
exampleTags = append(exampleTags, tag)
}
// update all example Tags
for _, tag := range exampleTags {
example, err := exampleContent(filePath, tag.linkText, tag.fileType)
if err != nil {
return mlines, err
}
mlines, err = updateMacroBlock(mlines, tag.token, example)
if err != nil {
return mlines, err
}
}
return mlines, nil
} }
// exampleContent retrieves the content of the file at linkPath // exampleContent retrieves the content of the file at linkPath
func exampleContent(filePath, linkPath, fileType string) (content string, err error) { func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) {
realRoot := path.Join(*rootDir, *repoRoot) + "/" repoRel, err := makeRepoRelative(linkPath, filePath)
path := path.Join(realRoot, path.Dir(filePath), linkPath)
dat, err := ioutil.ReadFile(path)
if err != nil { if err != nil {
return content, err return nil, err
} }
fileRel, err := makeFileRelative(linkPath, filePath)
if err != nil {
return nil, err
}
dat, err := ioutil.ReadFile(repoRel)
if err != nil {
return nil, err
}
// remove leading and trailing spaces and newlines // remove leading and trailing spaces and newlines
trimmedFileContent := strings.TrimSpace(string(dat)) trimmedFileContent := strings.TrimSpace(string(dat))
content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, linkPath) content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel)
return out := getMungeLines(content)
} return out, nil
// updateExampleMacroBlock sync the yaml/json example between begin tag and end tag
func updateExampleMacroBlock(filePath string, lines []string, beginMarkExp *regexp.Regexp, endMark string) ([]byte, error) {
var buffer bytes.Buffer
betweenBeginAndEnd := false
for _, line := range lines {
trimmedLine := strings.Trim(line, " \n")
if beginMarkExp.Match([]byte(trimmedLine)) {
if betweenBeginAndEnd {
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
}
betweenBeginAndEnd = true
buffer.WriteString(line)
buffer.WriteString("\n")
match := beginMarkExp.FindStringSubmatch(line)
if len(match) < 4 {
return nil, fmt.Errorf("failed to parse the link in example header")
}
// match[0] is the entire expression; [1] is the link text and [3] is the file type (yaml or json).
linkText := match[1]
fileType := match[3]
example, err := exampleContent(filePath, linkText, fileType)
if err != nil {
return nil, err
}
buffer.WriteString(example)
} else if trimmedLine == endMark {
if !betweenBeginAndEnd {
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
}
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
buffer.WriteString("\n")
buffer.WriteString(line)
buffer.WriteString("\n")
betweenBeginAndEnd = false
} else {
if !betweenBeginAndEnd {
buffer.WriteString(line)
buffer.WriteString("\n")
}
}
}
if betweenBeginAndEnd {
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
}
return buffer.Bytes(), nil
} }

View File

@@ -35,24 +35,27 @@ spec:
- containerPort: 80 - containerPort: 80
` `
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", ""}, {"", ""},
{ {
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE testdata/pod.yaml -->\n",
}, },
{ {
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n", "<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n",
}, },
} }
repoRoot = ""
for _, c := range cases { for _, c := range cases {
actual, err := syncExamples("mungedocs/filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := syncExamples("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if c.out != string(actual) { if !expected.Equal(actual) {
t.Errorf("Expected example \n'%v' but got \n'%v'", c.out, string(actual)) t.Errorf("Expected example \n'%q' but got \n'%q'", expected.String(), actual.String())
} }
} }
} }

View File

@@ -19,53 +19,56 @@ package main
import ( import (
"fmt" "fmt"
"regexp" "regexp"
"strings"
) )
var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`) var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`)
var whitespaceRegex = regexp.MustCompile(`^\s*$`)
func fixHeaderLines(fileBytes []byte) []byte { func fixHeaderLine(mlines mungeLines, newlines mungeLines, linenum int) mungeLines {
lines := splitLines(fileBytes) var out mungeLines
out := []string{}
for i := range lines { mline := mlines[linenum]
matches := headerRegex.FindStringSubmatch(lines[i]) line := mlines[linenum].data
if matches == nil {
out = append(out, lines[i]) matches := headerRegex.FindStringSubmatch(line)
continue if matches == nil {
} out = append(out, mline)
if i > 0 && !whitespaceRegex.Match([]byte(out[len(out)-1])) { return out
out = append(out, "") }
}
out = append(out, fmt.Sprintf("%s %s", matches[1], matches[2])) // There must be a blank line before the # (unless first line in file)
if i+1 < len(lines) && !whitespaceRegex.Match([]byte(lines[i+1])) { if linenum != 0 {
out = append(out, "") newlen := len(newlines)
if newlines[newlen-1].data != "" {
out = append(out, blankMungeLine)
} }
} }
final := strings.Join(out, "\n")
// Preserve the end of the file. // There must be a space AFTER the ##'s
if len(fileBytes) > 0 && fileBytes[len(fileBytes)-1] == '\n' { newline := fmt.Sprintf("%s %s", matches[1], matches[2])
final += "\n" newmline := newMungeLine(newline)
out = append(out, newmline)
// The next line needs to be a blank line (unless last line in file)
if len(mlines) > linenum+1 && mlines[linenum+1].data != "" {
out = append(out, blankMungeLine)
} }
return []byte(final) return out
} }
// Header lines need whitespace around them and after the #s. // Header lines need whitespace around them and after the #s.
func checkHeaderLines(filePath string, fileBytes []byte) ([]byte, error) { func updateHeaderLines(filePath string, mlines mungeLines) (mungeLines, error) {
fbs := splitByPreformatted(fileBytes) var out mungeLines
fbs = append([]fileBlock{{false, []byte{}}}, fbs...) for i, mline := range mlines {
fbs = append(fbs, fileBlock{false, []byte{}}) if mline.preformatted {
out = append(out, mline)
for i := range fbs {
block := &fbs[i]
if block.preformatted {
continue continue
} }
block.data = fixHeaderLines(block.data) if !mline.header {
out = append(out, mline)
continue
}
newLines := fixHeaderLine(mlines, out, i)
out = append(out, newLines...)
} }
output := []byte{} return out, nil
for _, block := range fbs {
output = append(output, block.data...)
}
return output, nil
} }

View File

@@ -24,8 +24,8 @@ import (
func TestHeaderLines(t *testing.T) { func TestHeaderLines(t *testing.T) {
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", ""}, {"", ""},
{ {
@@ -62,10 +62,12 @@ func TestHeaderLines(t *testing.T) {
}, },
} }
for i, c := range cases { for i, c := range cases {
actual, err := checkHeaderLines("filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateHeaderLines("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if string(actual) != c.out { if !actual.Equal(expected) {
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual)) t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
} }
} }
} }

View File

@@ -25,29 +25,25 @@ import (
// Looks for lines that have kubectl commands with -f flags and files that // Looks for lines that have kubectl commands with -f flags and files that
// don't exist. // don't exist.
func checkKubectlFileTargets(file string, markdown []byte) ([]byte, error) { func updateKubectlFileTargets(file string, mlines mungeLines) (mungeLines, error) {
inside := false var errors []string
lines := splitLines(markdown) for i, mline := range mlines {
errors := []string{} if !mline.preformatted {
for i := range lines { continue
if strings.HasPrefix(lines[i], "```") {
inside = !inside
} }
if inside { if err := lookForKubectl(mline.data, i); err != nil {
if err := lookForKubectl(lines, i); err != nil { errors = append(errors, err.Error())
errors = append(errors, err.Error())
}
} }
} }
err := error(nil) err := error(nil)
if len(errors) != 0 { if len(errors) != 0 {
err = fmt.Errorf("%s", strings.Join(errors, "\n")) err = fmt.Errorf("%s", strings.Join(errors, "\n"))
} }
return markdown, err return mlines, err
} }
func lookForKubectl(lines []string, lineNum int) error { func lookForKubectl(line string, lineNum int) error {
fields := strings.Fields(lines[lineNum]) fields := strings.Fields(line)
for i := range fields { for i := range fields {
if fields[i] == "kubectl" { if fields[i] == "kubectl" {
return gotKubectl(lineNum, fields, i) return gotKubectl(lineNum, fields, i)
@@ -56,26 +52,26 @@ func lookForKubectl(lines []string, lineNum int) error {
return nil return nil
} }
func gotKubectl(line int, fields []string, fieldNum int) error { func gotKubectl(lineNum int, fields []string, fieldNum int) error {
for i := fieldNum + 1; i < len(fields); i++ { for i := fieldNum + 1; i < len(fields); i++ {
switch fields[i] { switch fields[i] {
case "create", "update", "replace", "delete": case "create", "update", "replace", "delete":
return gotCommand(line, fields, i) return gotCommand(lineNum, fields, i)
} }
} }
return nil return nil
} }
func gotCommand(line int, fields []string, fieldNum int) error { func gotCommand(lineNum int, fields []string, fieldNum int) error {
for i := fieldNum + 1; i < len(fields); i++ { for i := fieldNum + 1; i < len(fields); i++ {
if strings.HasPrefix(fields[i], "-f") { if strings.HasPrefix(fields[i], "-f") {
return gotDashF(line, fields, i) return gotDashF(lineNum, fields, i)
} }
} }
return nil return nil
} }
func gotDashF(line int, fields []string, fieldNum int) error { func gotDashF(lineNum int, fields []string, fieldNum int) error {
target := "" target := ""
if fields[fieldNum] == "-f" { if fields[fieldNum] == "-f" {
if fieldNum+1 == len(fields) { if fieldNum+1 == len(fields) {
@@ -112,9 +108,9 @@ func gotDashF(line int, fields []string, fieldNum int) error {
} }
// If we got here we expect the file to exist. // If we got here we expect the file to exist.
_, err := os.Stat(path.Join(*rootDir, *repoRoot, target)) _, err := os.Stat(path.Join(repoRoot, target))
if os.IsNotExist(err) { if os.IsNotExist(err) {
return fmt.Errorf("%d: target file %q does not exist", line, target) return fmt.Errorf("%d: target file %q does not exist", lineNum, target)
} }
return err return err
} }

View File

@@ -130,9 +130,9 @@ func TestKubectlDashF(t *testing.T) {
}, },
} }
for i, c := range cases { for i, c := range cases {
*rootDir = "" repoRoot = ""
*repoRoot = "" in := getMungeLines(c.in)
_, err := checkKubectlFileTargets("filename.md", []byte(c.in)) _, err := updateKubectlFileTargets("filename.md", in)
if err != nil && c.ok { if err != nil && c.ok {
t.Errorf("case[%d]: expected success, got %v", i, err) t.Errorf("case[%d]: expected success, got %v", i, err)
} }

View File

@@ -29,20 +29,20 @@ var (
// Finds markdown links of the form [foo](bar "alt-text"). // Finds markdown links of the form [foo](bar "alt-text").
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`) linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
// Splits the link target into link target and alt-text. // Splits the link target into link target and alt-text.
altTextRE = regexp.MustCompile(`(.*)( ".*")`) altTextRE = regexp.MustCompile(`([^)]*)( ".*")`)
) )
// checkLinks assumes fileBytes has links in markdown syntax, and verifies that func processLink(in string, filePath string) (string, error) {
// any relative links actually point to files that exist. var err error
func checkLinks(filePath string, fileBytes []byte) ([]byte, error) { out := linkRE.ReplaceAllStringFunc(in, func(in string) string {
dir := path.Dir(filePath) match := linkRE.FindStringSubmatch(in)
errors := []string{} if match == nil {
err = fmt.Errorf("Detected this line had a link, but unable to parse, %v", in)
output := replaceNonPreformattedRegexp(fileBytes, linkRE, func(in []byte) (out []byte) { return ""
match := linkRE.FindSubmatch(in) }
// match[0] is the entire expression; [1] is the visible text and [2] is the link text. // match[0] is the entire expression;
visibleText := string(match[1]) visibleText := match[1]
linkText := string(match[2]) linkText := match[2]
altText := "" altText := ""
if parts := altTextRE.FindStringSubmatch(linkText); parts != nil { if parts := altTextRE.FindStringSubmatch(linkText); parts != nil {
linkText = parts[1] linkText = parts[1]
@@ -54,13 +54,10 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
linkText = strings.Trim(linkText, "\n") linkText = strings.Trim(linkText, "\n")
linkText = strings.Trim(linkText, " ") linkText = strings.Trim(linkText, " ")
u, err := url.Parse(linkText) u, terr := url.Parse(linkText)
if err != nil { if terr != nil {
errors = append( err = fmt.Errorf("link %q is unparsable: %v", linkText, terr)
errors, return ""
fmt.Sprintf("link %q is unparsable: %v", linkText, err),
)
return in
} }
if u.Host != "" && u.Host != "github.com" { if u.Host != "" && u.Host != "github.com" {
@@ -72,10 +69,8 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") { if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") {
newPath, targetExists := checkPath(filePath, path.Clean(u.Path)) newPath, targetExists := checkPath(filePath, path.Clean(u.Path))
if !targetExists { if !targetExists {
errors = append( err = fmt.Errorf("%q: target not found", linkText)
errors, return ""
fmt.Sprintf("%q: target not found", linkText),
)
} }
u.Path = newPath u.Path = newPath
if strings.HasPrefix(u.Path, "/") { if strings.HasPrefix(u.Path, "/") {
@@ -89,11 +84,16 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
// Make the visible text show the absolute path if it's // Make the visible text show the absolute path if it's
// not nested in or beneath the current directory. // not nested in or beneath the current directory.
if strings.HasPrefix(u.Path, "..") { if strings.HasPrefix(u.Path, "..") {
suggestedVisibleText = makeRepoRelative(path.Join(dir, u.Path)) dir := path.Dir(filePath)
suggestedVisibleText, err = makeRepoRelative(path.Join(dir, u.Path), filePath)
if err != nil {
return ""
}
} else { } else {
suggestedVisibleText = u.Path suggestedVisibleText = u.Path
} }
if unescaped, err := url.QueryUnescape(u.String()); err != nil { var unescaped string
if unescaped, err = url.QueryUnescape(u.String()); err != nil {
// Remove %28 type stuff, be nice to humans. // Remove %28 type stuff, be nice to humans.
// And don't fight with the toc generator. // And don't fight with the toc generator.
linkText = unescaped linkText = unescaped
@@ -107,18 +107,37 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
visibleText = suggestedVisibleText visibleText = suggestedVisibleText
} }
return []byte(fmt.Sprintf("[%s](%s)", visibleText, linkText+altText)) return fmt.Sprintf("[%s](%s)", visibleText, linkText+altText)
}) })
if out == "" {
return in, err
}
return out, nil
}
// updateLinks assumes lines has links in markdown syntax, and verifies that
// any relative links actually point to files that exist.
func updateLinks(filePath string, mlines mungeLines) (mungeLines, error) {
var out mungeLines
errors := []string{}
for _, mline := range mlines {
if mline.preformatted || !mline.link {
out = append(out, mline)
continue
}
line, err := processLink(mline.data, filePath)
if err != nil {
errors = append(errors, err.Error())
}
ml := newMungeLine(line)
out = append(out, ml)
}
err := error(nil) err := error(nil)
if len(errors) != 0 { if len(errors) != 0 {
err = fmt.Errorf("%s", strings.Join(errors, "\n")) err = fmt.Errorf("%s", strings.Join(errors, "\n"))
} }
return output, err return out, err
}
func makeRepoRelative(filePath string) string {
realRoot := path.Join(*rootDir, *repoRoot) + "/"
return strings.TrimPrefix(filePath, realRoot)
} }
// We have to append together before path.Clean will be able to tell that stuff // We have to append together before path.Clean will be able to tell that stuff

View File

@@ -0,0 +1,76 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
var _ = fmt.Printf
func TestBadLinks(t *testing.T) {
var cases = []struct {
in string
}{
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/NOTREADME.md)"},
{"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/NOTREADME.md)"},
{"[NOTREADME](../NOTREADME.md)"},
}
for _, c := range cases {
in := getMungeLines(c.in)
_, err := updateLinks("filename.md", in)
assert.Error(t, err)
}
}
func TestGoodLinks(t *testing.T) {
var cases = []struct {
in string
expected string
}{
{"", ""},
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/README.md)",
"[README](README.md)"},
{"[README](../README.md)",
"[README](README.md)"},
{"[README](https://lwn.net)",
"[README](https://lwn.net)"},
// _ to -
{"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/devel/cli_roadmap.md)",
"[README](../../docs/devel/cli-roadmap.md)"},
// - to _
{"[README](../../docs/devel/api-changes.md)",
"[README](../../docs/devel/api_changes.md)"},
// Does this even make sense? i dunno
{"[README](/docs/README.md)",
"[README](https://github.com/docs/README.md)"},
{"[README](/GoogleCloudPlatform/kubernetes/tree/master/docs/README.md)",
"[README](../../docs/README.md)"},
}
for i, c := range cases {
in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateLinks("filename.md", in)
assert.NoError(t, err)
if !actual.Equal(expected) {
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
}
}
}

View File

@@ -17,7 +17,6 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
@@ -30,28 +29,31 @@ import (
) )
var ( var (
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.") verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.") rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
repoRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root. // "repo-root" seems like a dumb name, this is the relative path (from rootDir) to get to the repoRoot
relRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root.
It's done this way so that generally you just have to set --root-dir. It's done this way so that generally you just have to set --root-dir.
Examples: Examples:
* --root-dir=docs/ --repo-root=.. means the repository root is ./ * --root-dir=docs/ --repo-root=.. means the repository root is ./
* --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/ * --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/
* --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`) * --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`)
skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList) skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList)
repoRoot string
ErrChangesNeeded = errors.New("mungedocs: changes required") ErrChangesNeeded = errors.New("mungedocs: changes required")
// All of the munge operations to perform. // All of the munge operations to perform.
// TODO: allow selection from command line. (e.g., just check links in the examples directory.) // TODO: allow selection from command line. (e.g., just check links in the examples directory.)
allMunges = []munge{ allMunges = []munge{
{"remove-whitespace", updateWhitespace},
{"table-of-contents", updateTOC}, {"table-of-contents", updateTOC},
{"unversioned-warning", updateUnversionedWarning}, {"unversioned-warning", updateUnversionedWarning},
{"check-links", checkLinks}, {"md-links", updateLinks},
{"blank-lines-surround-preformatted", checkPreformatted}, {"blank-lines-surround-preformatted", updatePreformatted},
{"header-lines", checkHeaderLines}, {"header-lines", updateHeaderLines},
{"analytics", checkAnalytics}, {"analytics", updateAnalytics},
{"kubectl-dash-f", checkKubectlFileTargets}, {"kubectl-dash-f", updateKubectlFileTargets},
{"sync-examples", syncExamples}, {"sync-examples", syncExamples},
} }
availableMungeList = func() string { availableMungeList = func() string {
@@ -68,7 +70,7 @@ Examples:
// data into a new byte array and return that. // data into a new byte array and return that.
type munge struct { type munge struct {
name string name string
fn func(filePath string, before []byte) (after []byte, err error) fn func(filePath string, mlines mungeLines) (after mungeLines, err error)
} }
type fileProcessor struct { type fileProcessor struct {
@@ -90,12 +92,14 @@ func (f fileProcessor) visit(path string) error {
return err return err
} }
mungeLines := getMungeLines(string(fileBytes))
modificationsMade := false modificationsMade := false
errFound := false errFound := false
filePrinted := false filePrinted := false
for _, munge := range f.munges { for _, munge := range f.munges {
after, err := munge.fn(path, fileBytes) after, err := munge.fn(path, mungeLines)
if err != nil || !bytes.Equal(after, fileBytes) { if err != nil || !after.Equal(mungeLines) {
if !filePrinted { if !filePrinted {
fmt.Printf("%s\n----\n", path) fmt.Printf("%s\n----\n", path)
filePrinted = true filePrinted = true
@@ -110,7 +114,7 @@ func (f fileProcessor) visit(path string) error {
} }
fmt.Println("") fmt.Println("")
} }
fileBytes = after mungeLines = after
} }
// Write out new file with any changes. // Write out new file with any changes.
@@ -119,7 +123,7 @@ func (f fileProcessor) visit(path string) error {
// We're not allowed to make changes. // We're not allowed to make changes.
return ErrChangesNeeded return ErrChangesNeeded
} }
ioutil.WriteFile(path, fileBytes, 0644) ioutil.WriteFile(path, mungeLines.Bytes(), 0644)
} }
if errFound { if errFound {
return ErrChangesNeeded return ErrChangesNeeded
@@ -165,6 +169,7 @@ func wantedMunges() (filtered []munge) {
} }
func main() { func main() {
var err error
flag.Parse() flag.Parse()
if *rootDir == "" { if *rootDir == "" {
@@ -172,11 +177,9 @@ func main() {
os.Exit(1) os.Exit(1)
} }
// Split the root dir of "foo/docs" into "foo" and "docs". We repoRoot = path.Join(*rootDir, *relRoot)
// chdir into "foo" and walk "docs" so the walk is always at a repoRoot, err = filepath.Abs(repoRoot)
// relative path. if err != nil {
stem, leaf := path.Split(strings.TrimRight(*rootDir, "/"))
if err := os.Chdir(stem); err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(2) os.Exit(2)
} }
@@ -194,7 +197,7 @@ func main() {
// changes needed, exit 1 if manual changes are needed. // changes needed, exit 1 if manual changes are needed.
var changesNeeded bool var changesNeeded bool
err := filepath.Walk(leaf, newWalkFunc(&fp, &changesNeeded)) err = filepath.Walk(*rootDir, newWalkFunc(&fp, &changesNeeded))
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
os.Exit(2) os.Exit(2)

View File

@@ -16,40 +16,26 @@ limitations under the License.
package main package main
import "bytes"
// Blocks of ``` need to have blank lines on both sides or they don't look // Blocks of ``` need to have blank lines on both sides or they don't look
// right in HTML. // right in HTML.
func checkPreformatted(filePath string, fileBytes []byte) ([]byte, error) { func updatePreformatted(filePath string, mlines mungeLines) (mungeLines, error) {
f := splitByPreformatted(fileBytes) var out mungeLines
f = append(fileBlocks{{false, []byte{}}}, f...) inpreformat := false
f = append(f, fileBlock{false, []byte{}}) for i, mline := range mlines {
if !inpreformat && mline.preformatted {
output := []byte(nil) if i == 0 || out[len(out)-1].data != "" {
for i := 1; i < len(f)-1; i++ { out = append(out, blankMungeLine)
prev := &f[i-1]
block := &f[i]
next := &f[i+1]
if !block.preformatted {
continue
}
neededSuffix := []byte("\n\n")
for !bytes.HasSuffix(prev.data, neededSuffix) {
prev.data = append(prev.data, '\n')
}
for !bytes.HasSuffix(block.data, neededSuffix) {
block.data = append(block.data, '\n')
if bytes.HasPrefix(next.data, []byte("\n")) {
// don't change the number of newlines unless needed.
next.data = next.data[1:]
if len(next.data) == 0 {
f = append(f[:i+1], f[i+2:]...)
}
} }
// start of a preformat block
inpreformat = true
}
out = append(out, mline)
if inpreformat && !mline.preformatted {
if i >= len(mlines)-2 || mlines[i+1].data != "" {
out = append(out, blankMungeLine)
}
inpreformat = false
} }
} }
for _, block := range f { return out, nil
output = append(output, block.data...)
}
return output, nil
} }

View File

@@ -0,0 +1,57 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPreformatted(t *testing.T) {
var cases = []struct {
in string
expected string
}{
{"", ""},
{
"```\nbob\n```",
"\n```\nbob\n```\n\n",
},
{
"```\nbob\n```\n```\nnotbob\n```\n",
"\n```\nbob\n```\n\n```\nnotbob\n```\n\n",
},
{
"```bob```\n",
"```bob```\n",
},
{
" ```\n bob\n ```",
"\n ```\n bob\n ```\n\n",
},
}
for i, c := range cases {
in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updatePreformatted("filename.md", in)
assert.NoError(t, err)
if !actual.Equal(expected) {
t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String())
}
}
}

View File

@@ -17,8 +17,6 @@ limitations under the License.
package main package main
import ( import (
"bufio"
"bytes"
"fmt" "fmt"
"regexp" "regexp"
"strings" "strings"
@@ -26,6 +24,8 @@ import (
const tocMungeTag = "GENERATED_TOC" const tocMungeTag = "GENERATED_TOC"
var r = regexp.MustCompile("[^A-Za-z0-9-]")
// inserts/updates a table of contents in markdown file. // inserts/updates a table of contents in markdown file.
// //
// First, builds a ToC. // First, builds a ToC.
@@ -33,15 +33,11 @@ const tocMungeTag = "GENERATED_TOC"
// the ToC, thereby updating any previously inserted ToC. // the ToC, thereby updating any previously inserted ToC.
// //
// TODO(erictune): put this in own package with tests // TODO(erictune): put this in own package with tests
func updateTOC(filePath string, markdown []byte) ([]byte, error) { func updateTOC(filePath string, mlines mungeLines) (mungeLines, error) {
toc, err := buildTOC(markdown) toc := buildTOC(mlines)
updatedMarkdown, err := updateMacroBlock(mlines, tocMungeTag, toc)
if err != nil { if err != nil {
return nil, err return mlines, err
}
lines := splitLines(markdown)
updatedMarkdown, err := updateMacroBlock(lines, beginMungeTag(tocMungeTag), endMungeTag(tocMungeTag), string(toc))
if err != nil {
return nil, err
} }
return updatedMarkdown, nil return updatedMarkdown, nil
} }
@@ -52,24 +48,19 @@ func updateTOC(filePath string, markdown []byte) ([]byte, error) {
// and builds a table of contents from those. Assumes bookmarks for those will be // and builds a table of contents from those. Assumes bookmarks for those will be
// like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces. // like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces.
// builds the ToC. // builds the ToC.
func buildTOC(markdown []byte) ([]byte, error) {
var buffer bytes.Buffer func buildTOC(mlines mungeLines) mungeLines {
buffer.WriteString("\n") var out mungeLines
scanner := bufio.NewScanner(bytes.NewReader(markdown))
inBlockQuotes := false for _, mline := range mlines {
for scanner.Scan() { if mline.preformatted || !mline.header {
line := scanner.Text()
match, err := regexp.Match("^```", []byte(line))
if err != nil {
return nil, err
}
if match {
inBlockQuotes = !inBlockQuotes
continue continue
} }
if inBlockQuotes { // Add a blank line after the munge start tag
continue if len(out) == 0 {
out = append(out, blankMungeLine)
} }
line := mline.data
noSharps := strings.TrimLeft(line, "#") noSharps := strings.TrimLeft(line, "#")
numSharps := len(line) - len(noSharps) numSharps := len(line) - len(noSharps)
heading := strings.Trim(noSharps, " \n") heading := strings.Trim(noSharps, " \n")
@@ -77,16 +68,15 @@ func buildTOC(markdown []byte) ([]byte, error) {
indent := strings.Repeat(" ", numSharps-1) indent := strings.Repeat(" ", numSharps-1)
bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1) bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1)
// remove symbols (except for -) in bookmarks // remove symbols (except for -) in bookmarks
r := regexp.MustCompile("[^A-Za-z0-9-]")
bookmark = r.ReplaceAllString(bookmark, "") bookmark = r.ReplaceAllString(bookmark, "")
tocLine := fmt.Sprintf("%s- [%s](#%s)\n", indent, heading, bookmark) tocLine := fmt.Sprintf("%s- [%s](#%s)", indent, heading, bookmark)
buffer.WriteString(tocLine) out = append(out, newMungeLine(tocLine))
} }
} }
if err := scanner.Err(); err != nil { // Add a blank line before the munge end tag
return []byte{}, err if len(out) != 0 {
out = append(out, blankMungeLine)
} }
return out
return buffer.Bytes(), nil
} }

View File

@@ -24,37 +24,38 @@ import (
func Test_buildTOC(t *testing.T) { func Test_buildTOC(t *testing.T) {
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", "\n"}, {"", ""},
{"Lorem ipsum\ndolor sit amet\n", "\n"}, {"Lorem ipsum\ndolor sit amet\n", ""},
{ {
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n", "# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n",
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n", "\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
}, },
{ {
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```", "# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```",
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n", "\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n",
}, },
{ {
"# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n", "# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n",
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n", "\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n\n",
}, },
} }
for _, c := range cases { for i, c := range cases {
actual, err := buildTOC([]byte(c.in)) in := getMungeLines(c.in)
assert.NoError(t, err) expected := getMungeLines(c.expected)
if c.out != string(actual) { actual := buildTOC(in)
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual)) if !expected.Equal(actual) {
t.Errorf("Case[%d] Expected TOC '%v' but got '%v'", i, expected.String(), actual.String())
} }
} }
} }
func Test_updateTOC(t *testing.T) { func Test_updateTOC(t *testing.T) {
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", ""}, {"", ""},
{ {
@@ -67,10 +68,12 @@ func Test_updateTOC(t *testing.T) {
}, },
} }
for _, c := range cases { for _, c := range cases {
actual, err := updateTOC("filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateTOC("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if c.out != string(actual) { if !expected.Equal(actual) {
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual)) t.Errorf("Expected TOC '%v' but got '%v'", expected.String(), actual.String())
} }
} }
} }

View File

@@ -20,10 +20,7 @@ import "fmt"
const unversionedWarningTag = "UNVERSIONED_WARNING" const unversionedWarningTag = "UNVERSIONED_WARNING"
var beginUnversionedWarning = beginMungeTag(unversionedWarningTag) const unversionedWarningPre = `
var endUnversionedWarning = endMungeTag(unversionedWarningTag)
const unversionedWarningFmt = `
<!-- BEGIN STRIP_FOR_RELEASE --> <!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING" <img src="http://kubernetes.io/img/warning.png" alt="WARNING"
@@ -44,7 +41,11 @@ refer to the docs that go with that version.
<strong> <strong>
The latest 1.0.x release of this document can be found The latest 1.0.x release of this document can be found
[here](http://releases.k8s.io/release-1.0/%s). `
const unversionedWarningFmt = `[here](http://releases.k8s.io/release-1.0/%s).`
const unversionedWarningPost = `
Documentation for other releases can be found at Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io). [releases.k8s.io](http://releases.k8s.io).
@@ -52,21 +53,31 @@ Documentation for other releases can be found at
-- --
<!-- END STRIP_FOR_RELEASE --> <!-- END STRIP_FOR_RELEASE -->
` `
func makeUnversionedWarning(fileName string) string { func makeUnversionedWarning(fileName string) mungeLines {
return fmt.Sprintf(unversionedWarningFmt, fileName) insert := unversionedWarningPre + fmt.Sprintf(unversionedWarningFmt, fileName) + unversionedWarningPost
return getMungeLines(insert)
} }
// inserts/updates a warning for unversioned docs // inserts/updates a warning for unversioned docs
func updateUnversionedWarning(file string, markdown []byte) ([]byte, error) { func updateUnversionedWarning(file string, mlines mungeLines) (mungeLines, error) {
lines := splitLines(markdown) file, err := makeRepoRelative(file, file)
if hasLine(lines, "<!-- TAG IS_VERSIONED -->") { if err != nil {
return mlines, err
}
if hasLine(mlines, "<!-- TAG IS_VERSIONED -->") {
// No warnings on release branches // No warnings on release branches
return markdown, nil return mlines, nil
} }
if !hasMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning) { if !hasMacroBlock(mlines, unversionedWarningTag) {
lines = append([]string{beginUnversionedWarning, endUnversionedWarning}, lines...) mlines = prependMacroBlock(unversionedWarningTag, mlines)
} }
return updateMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning, makeUnversionedWarning(file))
mlines, err = updateMacroBlock(mlines, unversionedWarningTag, makeUnversionedWarning(file))
if err != nil {
return mlines, err
}
return mlines, nil
} }

View File

@@ -23,30 +23,34 @@ import (
) )
func TestUnversionedWarning(t *testing.T) { func TestUnversionedWarning(t *testing.T) {
warningBlock := beginUnversionedWarning + "\n" + makeUnversionedWarning("filename.md") + "\n" + endUnversionedWarning + "\n" beginMark := beginMungeTag(unversionedWarningTag)
endMark := endMungeTag(unversionedWarningTag)
warningString := makeUnversionedWarning("filename.md").String()
warningBlock := beginMark + "\n" + warningString + endMark + "\n"
var cases = []struct { var cases = []struct {
in string in string
out string expected string
}{ }{
{"", warningBlock}, {"", warningBlock},
{ {
"Foo\nBar\n", "Foo\nBar\n",
warningBlock + "Foo\nBar\n", warningBlock + "\nFoo\nBar\n",
}, },
{ {
"Foo\n<!-- TAG IS_VERSIONED -->\nBar", "Foo\n<!-- TAG IS_VERSIONED -->\nBar",
"Foo\n<!-- TAG IS_VERSIONED -->\nBar", "Foo\n<!-- TAG IS_VERSIONED -->\nBar",
}, },
{ {
beginUnversionedWarning + "\n" + endUnversionedWarning + "\n", beginMark + "\n" + endMark + "\n",
warningBlock, warningBlock,
}, },
{ {
beginUnversionedWarning + "\n" + "something\n" + endUnversionedWarning + "\n", beginMark + "\n" + "something\n" + endMark + "\n",
warningBlock, warningBlock,
}, },
{ {
"Foo\n" + beginUnversionedWarning + "\n" + endUnversionedWarning + "\nBar\n", "Foo\n" + beginMark + "\n" + endMark + "\nBar\n",
"Foo\n" + warningBlock + "Bar\n", "Foo\n" + warningBlock + "Bar\n",
}, },
{ {
@@ -55,10 +59,12 @@ func TestUnversionedWarning(t *testing.T) {
}, },
} }
for i, c := range cases { for i, c := range cases {
actual, err := updateUnversionedWarning("filename.md", []byte(c.in)) in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateUnversionedWarning("filename.md", in)
assert.NoError(t, err) assert.NoError(t, err)
if string(actual) != c.out { if !expected.Equal(actual) {
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual)) t.Errorf("case[%d]: expected %v got %v", i, expected.String(), actual.String())
} }
} }
} }

View File

@@ -17,83 +17,140 @@ limitations under the License.
package main package main
import ( import (
"bytes"
"fmt" "fmt"
"path"
"path/filepath"
"regexp" "regexp"
"strings" "strings"
"unicode"
) )
// Splits a document up into a slice of lines.
func splitLines(document []byte) []string {
lines := strings.Split(string(document), "\n")
// Skip trailing empty string from Split-ing
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
}
// Replaces the text between matching "beginMark" and "endMark" within the // Replaces the text between matching "beginMark" and "endMark" within the
// document represented by "lines" with "insertThis". // document represented by "lines" with "insertThis".
// //
// Delimiters should occupy own line. // Delimiters should occupy own line.
// Returns copy of document with modifications. // Returns copy of document with modifications.
func updateMacroBlock(lines []string, beginMark, endMark, insertThis string) ([]byte, error) { func updateMacroBlock(mlines mungeLines, token string, insertThis mungeLines) (mungeLines, error) {
var buffer bytes.Buffer beginMark := beginMungeTag(token)
endMark := endMungeTag(token)
var out mungeLines
betweenBeginAndEnd := false betweenBeginAndEnd := false
for _, line := range lines { for _, mline := range mlines {
trimmedLine := strings.Trim(line, " \n") if mline.preformatted && !betweenBeginAndEnd {
if trimmedLine == beginMark { out = append(out, mline)
continue
}
line := mline.data
if mline.beginTag && line == beginMark {
if betweenBeginAndEnd { if betweenBeginAndEnd {
return nil, fmt.Errorf("found second begin mark while updating macro blocks") return nil, fmt.Errorf("found second begin mark while updating macro blocks")
} }
betweenBeginAndEnd = true betweenBeginAndEnd = true
buffer.WriteString(line) out = append(out, mline)
buffer.WriteString("\n") } else if mline.endTag && line == endMark {
} else if trimmedLine == endMark {
if !betweenBeginAndEnd { if !betweenBeginAndEnd {
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks") return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
} }
buffer.WriteString(insertThis)
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
buffer.WriteString("\n")
buffer.WriteString(line)
buffer.WriteString("\n")
betweenBeginAndEnd = false betweenBeginAndEnd = false
out = append(out, insertThis...)
out = append(out, mline)
} else { } else {
if !betweenBeginAndEnd { if !betweenBeginAndEnd {
buffer.WriteString(line) out = append(out, mline)
buffer.WriteString("\n")
} }
} }
} }
if betweenBeginAndEnd { if betweenBeginAndEnd {
return nil, fmt.Errorf("never found closing end mark while updating macro blocks") return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
} }
return buffer.Bytes(), nil return out, nil
} }
// Tests that a document, represented as a slice of lines, has a line. Ignores // Tests that a document, represented as a slice of lines, has a line. Ignores
// leading and trailing space. // leading and trailing space.
func hasLine(lines []string, needle string) bool { func hasLine(lines mungeLines, needle string) bool {
for _, line := range lines { for _, mline := range lines {
trimmedLine := strings.Trim(line, " \n") haystack := strings.TrimSpace(mline.data)
if trimmedLine == needle { if haystack == needle {
return true return true
} }
} }
return false return false
} }
func removeMacroBlock(token string, mlines mungeLines) (mungeLines, error) {
beginMark := beginMungeTag(token)
endMark := endMungeTag(token)
var out mungeLines
betweenBeginAndEnd := false
for _, mline := range mlines {
if mline.preformatted {
out = append(out, mline)
continue
}
line := mline.data
if mline.beginTag && line == beginMark {
if betweenBeginAndEnd {
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
}
betweenBeginAndEnd = true
} else if mline.endTag && line == endMark {
if !betweenBeginAndEnd {
return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks")
}
betweenBeginAndEnd = false
} else {
if !betweenBeginAndEnd {
out = append(out, mline)
}
}
}
if betweenBeginAndEnd {
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
}
return out, nil
}
// Add a macro block to the beginning of a set of lines
func prependMacroBlock(token string, mlines mungeLines) mungeLines {
beginLine := newMungeLine(beginMungeTag(token))
endLine := newMungeLine(endMungeTag(token))
out := mungeLines{beginLine, endLine}
if len(mlines) > 0 && mlines[0].data != "" {
out = append(out, blankMungeLine)
}
return append(out, mlines...)
}
// Add a macro block to the end of a set of lines
func appendMacroBlock(mlines mungeLines, token string) mungeLines {
beginLine := newMungeLine(beginMungeTag(token))
endLine := newMungeLine(endMungeTag(token))
out := mlines
if len(mlines) > 0 && mlines[len(mlines)-1].data != "" {
out = append(out, blankMungeLine)
}
return append(out, beginLine, endLine)
}
// Tests that a document, represented as a slice of lines, has a macro block. // Tests that a document, represented as a slice of lines, has a macro block.
func hasMacroBlock(lines []string, begin string, end string) bool { func hasMacroBlock(lines mungeLines, token string) bool {
beginMark := beginMungeTag(token)
endMark := endMungeTag(token)
foundBegin := false foundBegin := false
for _, line := range lines { for _, mline := range lines {
trimmedLine := strings.Trim(line, " \n") if mline.preformatted {
continue
}
if !mline.beginTag && !mline.endTag {
continue
}
line := mline.data
switch { switch {
case !foundBegin && trimmedLine == begin: case !foundBegin && line == beginMark:
foundBegin = true foundBegin = true
case foundBegin && trimmedLine == end: case foundBegin && line == endMark:
return true return true
} }
} }
@@ -112,72 +169,123 @@ func endMungeTag(desc string) string {
return fmt.Sprintf("<!-- END MUNGE: %s -->", desc) return fmt.Sprintf("<!-- END MUNGE: %s -->", desc)
} }
// Calls 'replace' for all sections of the document not in ``` / ``` blocks. So type mungeLine struct {
// that you don't have false positives inside those blocks. data string
func replaceNonPreformatted(input []byte, replace func([]byte) []byte) []byte { preformatted bool
f := splitByPreformatted(input) header bool
output := []byte(nil) link bool
for _, block := range f { beginTag bool
if block.preformatted { endTag bool
output = append(output, block.data...) }
} else {
output = append(output, replace(block.data)...) type mungeLines []mungeLine
func (m1 mungeLines) Equal(m2 mungeLines) bool {
if len(m1) != len(m2) {
return false
}
for i := range m1 {
if m1[i].data != m2[i].data {
return false
} }
} }
return output return true
} }
type fileBlock struct { func (mlines mungeLines) String() string {
preformatted bool slice := []string{}
data []byte for _, mline := range mlines {
slice = append(slice, mline.data)
}
s := strings.Join(slice, "\n")
// We need to tack on an extra newline at the end of the file
return s + "\n"
} }
type fileBlocks []fileBlock func (mlines mungeLines) Bytes() []byte {
return []byte(mlines.String())
}
var ( var (
// Finds all preformatted block start/stops. // Finds all preformatted block start/stops.
preformatRE = regexp.MustCompile("^\\s*```") preformatRE = regexp.MustCompile("^\\s*```")
notPreformatRE = regexp.MustCompile("^\\s*```.*```") notPreformatRE = regexp.MustCompile("^\\s*```.*```")
// Is this line a header?
mlHeaderRE = regexp.MustCompile(`^#`)
// Is there a link on this line?
mlLinkRE = regexp.MustCompile(`\[[^]]*\]\([^)]*\)`)
beginTagRE = regexp.MustCompile(`<!-- BEGIN MUNGE:`)
endTagRE = regexp.MustCompile(`<!-- END MUNGE:`)
blankMungeLine = newMungeLine("")
) )
func splitByPreformatted(input []byte) fileBlocks { // Does not set 'preformatted'
f := fileBlocks{} func newMungeLine(line string) mungeLine {
return mungeLine{
data: line,
header: mlHeaderRE.MatchString(line),
link: mlLinkRE.MatchString(line),
beginTag: beginTagRE.MatchString(line),
endTag: endTagRE.MatchString(line),
}
}
cur := []byte(nil) func trimRightSpace(in string) string {
return strings.TrimRightFunc(in, unicode.IsSpace)
}
// Splits a document up into a slice of lines.
func splitLines(document string) []string {
lines := strings.Split(document, "\n")
// Skip trailing empty string from Split-ing
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
}
func getMungeLines(in string) mungeLines {
var out mungeLines
preformatted := false preformatted := false
// SplitAfter keeps the newline, so you don't have to worry about
// omitting it on the last line or anything. Also, the documentation lines := splitLines(in)
// claims it's unicode safe. // We indicate if any given line is inside a preformatted block or
for _, line := range bytes.SplitAfter(input, []byte("\n")) { // outside a preformatted block
for _, line := range lines {
if !preformatted { if !preformatted {
if preformatRE.Match(line) && !notPreformatRE.Match(line) { if preformatRE.MatchString(line) && !notPreformatRE.MatchString(line) {
if len(cur) > 0 {
f = append(f, fileBlock{false, cur})
}
cur = []byte{}
preformatted = true preformatted = true
} }
cur = append(cur, line...)
} else { } else {
cur = append(cur, line...) if preformatRE.MatchString(line) {
if preformatRE.Match(line) {
if len(cur) > 0 {
f = append(f, fileBlock{true, cur})
}
cur = []byte{}
preformatted = false preformatted = false
} }
} }
ml := newMungeLine(line)
ml.preformatted = preformatted
out = append(out, ml)
} }
if len(cur) > 0 { return out
f = append(f, fileBlock{preformatted, cur})
}
return f
} }
// As above, but further uses exp to parse the non-preformatted sections. // filePath is the file we are looking for
func replaceNonPreformattedRegexp(input []byte, exp *regexp.Regexp, replace func([]byte) []byte) []byte { // inFile is the file where we found the link. So if we are processing
return replaceNonPreformatted(input, func(in []byte) []byte { // /path/to/repoRoot/docs/admin/README.md and are looking for
return exp.ReplaceAllFunc(in, replace) // ../../file.json we can find that location.
}) // In many cases filePath and processingFile may be the same
func makeRepoRelative(filePath string, processingFile string) (string, error) {
if filePath, err := filepath.Rel(repoRoot, filePath); err == nil {
return filePath, nil
}
cwd := path.Dir(processingFile)
return filepath.Rel(repoRoot, path.Join(cwd, filePath))
}
func makeFileRelative(filePath string, processingFile string) (string, error) {
cwd := path.Dir(processingFile)
if filePath, err := filepath.Rel(cwd, filePath); err == nil {
return filePath, nil
}
return filepath.Rel(cwd, path.Join(cwd, filePath))
} }

View File

@@ -17,13 +17,17 @@ limitations under the License.
package main package main
import ( import (
"reflect" "strings"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func Test_updateMacroBlock(t *testing.T) { func Test_updateMacroBlock(t *testing.T) {
token := "TOKEN"
BEGIN := beginMungeTag(token)
END := endMungeTag(token)
var cases = []struct { var cases = []struct {
in string in string
out string out string
@@ -31,149 +35,135 @@ func Test_updateMacroBlock(t *testing.T) {
{"", ""}, {"", ""},
{"Lorem ipsum\ndolor sit amet\n", {"Lorem ipsum\ndolor sit amet\n",
"Lorem ipsum\ndolor sit amet\n"}, "Lorem ipsum\ndolor sit amet\n"},
{"Lorem ipsum \n BEGIN\ndolor\nEND\nsit amet\n", {"Lorem ipsum \n" + BEGIN + "\ndolor\n" + END + "\nsit amet\n",
"Lorem ipsum \n BEGIN\nfoo\n\nEND\nsit amet\n"}, "Lorem ipsum \n" + BEGIN + "\nfoo\n" + END + "\nsit amet\n"},
} }
for _, c := range cases { for _, c := range cases {
actual, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo\n") in := getMungeLines(c.in)
expected := getMungeLines(c.out)
actual, err := updateMacroBlock(in, token, getMungeLines("foo"))
assert.NoError(t, err) assert.NoError(t, err)
if c.out != string(actual) { if !expected.Equal(actual) {
t.Errorf("Expected '%v' but got '%v'", c.out, string(actual)) t.Errorf("Expected '%v' but got '%v'", expected.String(), expected.String())
} }
} }
} }
func Test_updateMacroBlock_errors(t *testing.T) { func Test_updateMacroBlock_errors(t *testing.T) {
token := "TOKEN"
b := beginMungeTag(token)
e := endMungeTag(token)
var cases = []struct { var cases = []struct {
in string in string
}{ }{
{"BEGIN\n"}, {b + "\n"},
{"blah\nBEGIN\nblah"}, {"blah\n" + b + "\nblah"},
{"END\n"}, {e + "\n"},
{"blah\nEND\nblah\n"}, {"blah\n" + e + "\nblah\n"},
{"END\nBEGIN"}, {e + "\n" + b},
{"BEGIN\nEND\nEND"}, {b + "\n" + e + "\n" + e},
{"BEGIN\nBEGIN\nEND"}, {b + "\n" + b + "\n" + e},
{"BEGIN\nBEGIN\nEND\nEND"}, {b + "\n" + b + "\n" + e + "\n" + e},
} }
for _, c := range cases { for _, c := range cases {
_, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo") in := getMungeLines(c.in)
_, err := updateMacroBlock(in, token, getMungeLines("foo"))
assert.Error(t, err) assert.Error(t, err)
} }
} }
func TestHasLine(t *testing.T) { func TestHasLine(t *testing.T) {
cases := []struct { cases := []struct {
lines []string haystack string
needle string needle string
expected bool expected bool
}{ }{
{[]string{"abc", "def", "ghi"}, "abc", true}, {"abc\ndef\nghi", "abc", true},
{[]string{" abc", "def", "ghi"}, "abc", true}, {" abc\ndef\nghi", "abc", true},
{[]string{"abc ", "def", "ghi"}, "abc", true}, {"abc \ndef\nghi", "abc", true},
{[]string{"\n abc", "def", "ghi"}, "abc", true}, {"\n abc\ndef\nghi", "abc", true},
{[]string{"abc \n", "def", "ghi"}, "abc", true}, {"abc \n\ndef\nghi", "abc", true},
{[]string{"abc", "def", "ghi"}, "def", true}, {"abc\ndef\nghi", "def", true},
{[]string{"abc", "def", "ghi"}, "ghi", true}, {"abc\ndef\nghi", "ghi", true},
{[]string{"abc", "def", "ghi"}, "xyz", false}, {"abc\ndef\nghi", "xyz", false},
} }
for i, c := range cases { for i, c := range cases {
if hasLine(c.lines, c.needle) != c.expected { in := getMungeLines(c.haystack)
if hasLine(in, c.needle) != c.expected {
t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected) t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected)
} }
} }
} }
func TestHasMacroBlock(t *testing.T) { func TestHasMacroBlock(t *testing.T) {
token := "<<<"
b := beginMungeTag(token)
e := endMungeTag(token)
cases := []struct { cases := []struct {
lines []string lines []string
begin string
end string
expected bool expected bool
}{ }{
{[]string{"<<<", ">>>"}, "<<<", ">>>", true}, {[]string{b, e}, true},
{[]string{"<<<", "abc", ">>>"}, "<<<", ">>>", true}, {[]string{b, "abc", e}, true},
{[]string{"<<<", "<<<", "abc", ">>>"}, "<<<", ">>>", true}, {[]string{b, b, "abc", e}, true},
{[]string{"<<<", "abc", ">>>", ">>>"}, "<<<", ">>>", true}, {[]string{b, "abc", e, e}, true},
{[]string{"<<<", ">>>", "<<<", ">>>"}, "<<<", ">>>", true}, {[]string{b, e, b, e}, true},
{[]string{"<<<"}, "<<<", ">>>", false}, {[]string{b}, false},
{[]string{">>>"}, "<<<", ">>>", false}, {[]string{e}, false},
{[]string{"<<<", "abc"}, "<<<", ">>>", false}, {[]string{b, "abc"}, false},
{[]string{"abc", ">>>"}, "<<<", ">>>", false}, {[]string{"abc", e}, false},
} }
for i, c := range cases { for i, c := range cases {
if hasMacroBlock(c.lines, c.begin, c.end) != c.expected { in := getMungeLines(strings.Join(c.lines, "\n"))
t.Errorf("case[%d]: %q,%q, expected %t, got %t", i, c.begin, c.end, c.expected, !c.expected) if hasMacroBlock(in, token) != c.expected {
t.Errorf("case[%d]: expected %t, got %t", i, c.expected, !c.expected)
} }
} }
} }
func TestReplaceNonPreformatted(t *testing.T) { func TestAppendMacroBlock(t *testing.T) {
token := "<<<"
b := beginMungeTag(token)
e := endMungeTag(token)
cases := []struct { cases := []struct {
in string in []string
out string expected []string
}{ }{
{"aoeu", ""}, {[]string{}, []string{b, e}},
{"aoeu\n```\naoeu\n```\naoeu", "```\naoeu\n```\n"}, {[]string{"bob"}, []string{"bob", "", b, e}},
{"ao\neu\n```\naoeu\n\n\n", "```\naoeu\n\n\n"}, {[]string{b, e}, []string{b, e, "", b, e}},
{"aoeu ```aoeu``` aoeu", ""},
} }
for i, c := range cases { for i, c := range cases {
out := string(replaceNonPreformatted([]byte(c.in), func([]byte) []byte { return nil })) in := getMungeLines(strings.Join(c.in, "\n"))
if out != c.out { expected := getMungeLines(strings.Join(c.expected, "\n"))
t.Errorf("%v: got %q, wanted %q", i, out, c.out) out := appendMacroBlock(in, token)
if !out.Equal(expected) {
t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
} }
} }
} }
func TestReplaceNonPreformattedNoChange(t *testing.T) { func TestPrependMacroBlock(t *testing.T) {
token := "<<<"
b := beginMungeTag(token)
e := endMungeTag(token)
cases := []struct { cases := []struct {
in string in []string
expected []string
}{ }{
{"aoeu"}, {[]string{}, []string{b, e}},
{"aoeu\n```\naoeu\n```\naoeu"}, {[]string{"bob"}, []string{b, e, "", "bob"}},
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu"}, {[]string{b, e}, []string{b, e, "", b, e}},
{"ao\neu\n```\naoeu\n\n\n"},
{"aoeu ```aoeu``` aoeu"},
{"aoeu\n```\naoeu\n```"},
{"aoeu\n```\naoeu\n```\n"},
{"aoeu\n```\naoeu\n```\n\n"},
} }
for i, c := range cases { for i, c := range cases {
out := string(replaceNonPreformatted([]byte(c.in), func(in []byte) []byte { return in })) in := getMungeLines(strings.Join(c.in, "\n"))
if out != c.in { expected := getMungeLines(strings.Join(c.expected, "\n"))
t.Errorf("%v: got %q, wanted %q", i, out, c.in) out := prependMacroBlock(token, in)
} if !out.Equal(expected) {
} t.Errorf("Case[%d]: expected '%q' but got '%q'", i, expected.String(), out.String())
}
func TestReplaceNonPreformattedCallOrder(t *testing.T) {
cases := []struct {
in string
expect []string
}{
{"aoeu", []string{"aoeu"}},
{"aoeu\n```\naoeu\n```\naoeu", []string{"aoeu\n", "aoeu"}},
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu", []string{"aoeu\n\n", "\naoeu"}},
{"ao\neu\n```\naoeu\n\n\n", []string{"ao\neu\n"}},
{"aoeu ```aoeu``` aoeu", []string{"aoeu ```aoeu``` aoeu"}},
{"aoeu\n```\naoeu\n```", []string{"aoeu\n"}},
{"aoeu\n```\naoeu\n```\n", []string{"aoeu\n"}},
{"aoeu\n```\naoeu\n```\n\n", []string{"aoeu\n", "\n"}},
}
for i, c := range cases {
got := []string{}
replaceNonPreformatted([]byte(c.in), func(in []byte) []byte {
got = append(got, string(in))
return in
})
if e, a := c.expect, got; !reflect.DeepEqual(e, a) {
t.Errorf("%v: got %q, wanted %q", i, a, e)
} }
} }
} }

View File

@@ -0,0 +1,31 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// Remove all trailing whitespace
func updateWhitespace(file string, mlines mungeLines) (mungeLines, error) {
var out mungeLines
for _, mline := range mlines {
if mline.preformatted {
out = append(out, mline)
continue
}
newline := trimRightSpace(mline.data)
out = append(out, newMungeLine(newline))
}
return out, nil
}

View File

@@ -0,0 +1,45 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_updateWhiteSpace(t *testing.T) {
var cases = []struct {
in string
expected string
}{
{"", ""},
{"\n", "\n"},
{" \t \t \n", "\n"},
{"bob \t", "bob"},
{"```\n \n```\n", "```\n \n```\n"},
}
for i, c := range cases {
in := getMungeLines(c.in)
expected := getMungeLines(c.expected)
actual, err := updateWhitespace("filename.md", in)
assert.NoError(t, err)
if !expected.Equal(actual) {
t.Errorf("Case[%d] Expected Whitespace '%v' but got '%v'", i, string(expected.Bytes()), string(actual.Bytes()))
}
}
}

View File

@@ -499,12 +499,36 @@ _kubectl_scale()
two_word_flags+=("-o") two_word_flags+=("-o")
flags+=("--replicas=") flags+=("--replicas=")
flags+=("--resource-version=") flags+=("--resource-version=")
flags+=("--timeout=")
must_have_one_flag=() must_have_one_flag=()
must_have_one_flag+=("--replicas=") must_have_one_flag+=("--replicas=")
must_have_one_noun=() must_have_one_noun=()
} }
_kubectl_attach()
{
last_command="kubectl_attach"
commands=()
flags=()
two_word_flags=()
flags_with_completion=()
flags_completion=()
flags+=("--container=")
two_word_flags+=("-c")
flags+=("--help")
flags+=("-h")
flags+=("--stdin")
flags+=("-i")
flags+=("--tty")
flags+=("-t")
must_have_one_flag=()
must_have_one_noun=()
}
_kubectl_exec() _kubectl_exec()
{ {
last_command="kubectl_exec" last_command="kubectl_exec"
@@ -946,6 +970,7 @@ _kubectl()
commands+=("logs") commands+=("logs")
commands+=("rolling-update") commands+=("rolling-update")
commands+=("scale") commands+=("scale")
commands+=("attach")
commands+=("exec") commands+=("exec")
commands+=("port-forward") commands+=("port-forward")
commands+=("proxy") commands+=("proxy")

View File

@@ -0,0 +1,8 @@
FROM busybox
MAINTAINER Muhammed Uluyol "uluyol@google.com"
ADD dc /diurnal
RUN chown root:users /diurnal && chmod 755 /diurnal
ENTRYPOINT ["/diurnal"]

24
contrib/diurnal/Makefile Normal file
View File

@@ -0,0 +1,24 @@
.PHONY: build push vet test clean
TAG = 0.5
REPO = uluyol/kube-diurnal
BIN = dc
dc: dc.go time.go
CGO_ENABLED=0 godep go build -a -installsuffix cgo -o dc dc.go time.go
vet:
godep go vet .
test:
godep go test .
build: $(BIN)
docker build -t $(REPO):$(TAG) .
push:
docker push $(REPO):$(TAG)
clean:
rm -f $(BIN)

44
contrib/diurnal/README.md Normal file
View File

@@ -0,0 +1,44 @@
## Diurnal Controller
This controller manipulates the number of replicas maintained by a replication controller throughout the day based on a provided list of times of day (according to ISO 8601) and replica counts. It should be run under a replication controller that is in the same namespace as the replication controller that it is manipulating.
For example, to set the replica counts of the pods with the labels "tier=backend,track=canary" to 10 at noon UTC and 6 at midnight UTC, we can use `-labels tier=backend,track=canary -times 00:00Z,12:00Z -counts 6,10`. An example replication controller config can be found [here](example-diurnal-controller.yaml).
Instead of providing replica counts and times of day directly, you may use a script like the one below to generate them using mathematical functions.
```python
from math import *
import os
import sys
def _day_to_2pi(t):
return float(t) * 2 * pi / (24*3600)
def main(args):
if len(args) < 3:
print "Usage: %s sample_interval func" % (args[0],)
print "func should be a function of the variable t, where t will range from 0"
print "to 2pi over the course of the day"
sys.exit(1)
sampling_interval = int(args[1])
exec "def f(t): return " + args[2]
i = 0
times = []
counts = []
while i < 24*60*60:
hours = i / 3600
left = i - hours*3600
min = left / 60
sec = left - min*60
times.append("%dh%dm%ds" % (hours, min, sec))
count = int(round(f(_day_to_2pi(i))))
counts.append(str(count))
i += sampling_interval
print "-times %s -counts %s" % (",".join(times), ",".join(counts))
if __name__ == "__main__":
main(sys.argv)
```
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/contrib/diurnal/README.md?pixel)]()

283
contrib/diurnal/dc.go Normal file
View File

@@ -0,0 +1,283 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// An external diurnal controller for kubernetes. With this, it's possible to manage
// known replica counts that vary throughout the day.
package main
import (
"errors"
"flag"
"fmt"
"os"
"os/signal"
"sort"
"strconv"
"strings"
"syscall"
"time"
kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/golang/glog"
)
const dayPeriod = 24 * time.Hour
type timeCount struct {
time time.Duration
count int
}
func (tc timeCount) String() string {
h := tc.time / time.Hour
m := (tc.time % time.Hour) / time.Minute
s := (tc.time % time.Minute) / time.Second
if m == 0 && s == 0 {
return fmt.Sprintf("(%02dZ, %d)", h, tc.count)
} else if s == 0 {
return fmt.Sprintf("(%02d:%02dZ, %d)", h, m, tc.count)
}
return fmt.Sprintf("(%02d:%02d:%02dZ, %d)", h, m, s, tc.count)
}
type byTime []timeCount
func (tc byTime) Len() int { return len(tc) }
func (tc byTime) Swap(i, j int) { tc[i], tc[j] = tc[j], tc[i] }
func (tc byTime) Less(i, j int) bool { return tc[i].time < tc[j].time }
func timeMustParse(layout, s string) time.Time {
t, err := time.Parse(layout, s)
if err != nil {
panic(err)
}
return t
}
// first argument is a format string equivalent to HHMMSS. See time.Parse for details.
var epoch = timeMustParse("150405", "000000")
func parseTimeRelative(s string) (time.Duration, error) {
t, err := parseTimeISO8601(s)
if err != nil {
return 0, fmt.Errorf("unable to parse %s: %v", s, err)
}
return (t.Sub(epoch) + dayPeriod) % dayPeriod, nil
}
func parseTimeCounts(times string, counts string) ([]timeCount, error) {
ts := strings.Split(times, ",")
cs := strings.Split(counts, ",")
if len(ts) != len(cs) {
return nil, fmt.Errorf("provided %d times but %d replica counts", len(ts), len(cs))
}
var tc []timeCount
for i := range ts {
t, err := parseTimeRelative(ts[i])
if err != nil {
return nil, err
}
c, err := strconv.ParseInt(cs[i], 10, 64)
if c < 0 {
return nil, errors.New("counts must be non-negative")
}
if err != nil {
return nil, err
}
tc = append(tc, timeCount{t, int(c)})
}
sort.Sort(byTime(tc))
return tc, nil
}
type Scaler struct {
timeCounts []timeCount
selector labels.Selector
start time.Time
pos int
done chan struct{}
}
var posError = errors.New("could not find position")
func findPos(tc []timeCount, cur int, offset time.Duration) int {
first := true
for i := cur; i != cur || first; i = (i + 1) % len(tc) {
if tc[i].time > offset {
return i
}
first = false
}
return 0
}
func (s *Scaler) setCount(c int) {
glog.Infof("scaling to %d replicas", c)
rcList, err := client.ReplicationControllers(namespace).List(s.selector)
if err != nil {
glog.Errorf("could not get replication controllers: %v", err)
return
}
for _, rc := range rcList.Items {
rc.Spec.Replicas = c
if _, err = client.ReplicationControllers(namespace).Update(&rc); err != nil {
glog.Errorf("unable to scale replication controller: %v", err)
}
}
}
func (s *Scaler) timeOffset() time.Duration {
return time.Since(s.start) % dayPeriod
}
func (s *Scaler) curpos(offset time.Duration) int {
return findPos(s.timeCounts, s.pos, offset)
}
func (s *Scaler) scale() {
for {
select {
case <-s.done:
return
default:
offset := s.timeOffset()
s.pos = s.curpos(offset)
if s.timeCounts[s.pos].time < offset {
time.Sleep(dayPeriod - offset)
continue
}
time.Sleep(s.timeCounts[s.pos].time - offset)
s.setCount(s.timeCounts[s.pos].count)
}
}
}
func (s *Scaler) Start() error {
now := time.Now().UTC()
s.start = time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location())
if *startNow {
s.start = now
}
// set initial count
pos := s.curpos(s.timeOffset())
// add the len to avoid getting a negative index
pos = (pos - 1 + len(s.timeCounts)) % len(s.timeCounts)
s.setCount(s.timeCounts[pos].count)
s.done = make(chan struct{})
go s.scale()
return nil
}
func safeclose(c chan<- struct{}) (err error) {
defer func() {
if e := recover(); e != nil {
err = e.(error)
}
}()
close(c)
return nil
}
func (s *Scaler) Stop() error {
if err := safeclose(s.done); err != nil {
return errors.New("already stopped scaling")
}
return nil
}
var (
counts = flag.String("counts", "", "replica counts, must have at least one (csv)")
times = flag.String("times", "", "times to set replica counts relative to UTC following ISO 8601 (csv)")
userLabels = flag.String("labels", "", "replication controller labels, syntax should follow https://godoc.org/github.com/GoogleCloudPlatform/kubernetes/pkg/labels#Parse")
startNow = flag.Bool("now", false, "times are relative to now not 0:00 UTC (for demos)")
local = flag.Bool("local", false, "set to true if running on local machine not within cluster")
localPort = flag.Int("localport", 8001, "port that kubectl proxy is running on (local must be true)")
namespace string = os.Getenv("POD_NAMESPACE")
client *kclient.Client
)
const usageNotes = `
counts and times must both be set and be of equal length. Example usage:
diurnal -labels name=redis-slave -times 00:00:00Z,06:00:00Z -counts 3,9
diurnal -labels name=redis-slave -times 0600-0500,0900-0500,1700-0500,2200-0500 -counts 15,20,13,6
`
func usage() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
fmt.Fprint(os.Stderr, usageNotes)
}
func main() {
flag.Usage = usage
flag.Parse()
var (
cfg *kclient.Config
err error
)
if *local {
cfg = &kclient.Config{Host: fmt.Sprintf("http://localhost:%d", *localPort)}
} else {
cfg, err = kclient.InClusterConfig()
if err != nil {
glog.Errorf("failed to load config: %v", err)
flag.Usage()
os.Exit(1)
}
}
client, err = kclient.New(cfg)
selector, err := labels.Parse(*userLabels)
if err != nil {
glog.Fatal(err)
}
tc, err := parseTimeCounts(*times, *counts)
if err != nil {
glog.Fatal(err)
}
if namespace == "" {
glog.Fatal("POD_NAMESPACE is not set. Set to the namespace of the replication controller if running locally.")
}
scaler := Scaler{timeCounts: tc, selector: selector}
if err != nil {
glog.Fatal(err)
}
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGQUIT,
syscall.SIGTERM)
glog.Info("starting scaling")
if err := scaler.Start(); err != nil {
glog.Fatal(err)
}
<-sigChan
glog.Info("stopping scaling")
if err := scaler.Stop(); err != nil {
glog.Fatal(err)
}
}

100
contrib/diurnal/dc_test.go Normal file
View File

@@ -0,0 +1,100 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"testing"
"time"
)
func equalsTimeCounts(a, b []timeCount) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i].time != b[i].time || a[i].count != b[i].count {
return false
}
}
return true
}
func TestParseTimeCounts(t *testing.T) {
cases := []struct {
times string
counts string
out []timeCount
err bool
}{
{
"00:00:01Z,00:02Z,03:00Z,04:00Z", "1,4,1,8", []timeCount{
{time.Second, 1},
{2 * time.Minute, 4},
{3 * time.Hour, 1},
{4 * time.Hour, 8},
}, false,
},
{
"00:01Z,00:02Z,00:05Z,00:03Z", "1,2,3,4", []timeCount{
{1 * time.Minute, 1},
{2 * time.Minute, 2},
{3 * time.Minute, 4},
{5 * time.Minute, 3},
}, false,
},
{"00:00Z,00:01Z", "1,0", []timeCount{{0, 1}, {1 * time.Minute, 0}}, false},
{"00:00+00,00:01+00:00,01:00Z", "0,-1,0", nil, true},
{"-00:01Z,01:00Z", "0,1", nil, true},
{"00:00Z", "1,2,3", nil, true},
}
for i, test := range cases {
out, err := parseTimeCounts(test.times, test.counts)
if test.err && err == nil {
t.Errorf("case %d: expected error", i)
} else if !test.err && err != nil {
t.Errorf("case %d: unexpected error: %v", i, err)
}
if !test.err {
if !equalsTimeCounts(test.out, out) {
t.Errorf("case %d: expected timeCounts: %v got %v", i, test.out, out)
}
}
}
}
func TestFindPos(t *testing.T) {
cases := []struct {
tc []timeCount
cur int
offset time.Duration
expected int
}{
{[]timeCount{{0, 1}, {4, 0}}, 1, 1, 1},
{[]timeCount{{0, 1}, {4, 0}}, 0, 1, 1},
{[]timeCount{{0, 1}, {4, 0}}, 1, 70, 0},
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 0, 0, 0},
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 1, 5000, 3},
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 2, 10000000, 0},
{[]timeCount{{5, 1}, {100, 9000}, {4000, 2}, {10000, 4}}, 0, 50, 1},
}
for i, test := range cases {
pos := findPos(test.tc, test.cur, test.offset)
if pos != test.expected {
t.Errorf("case %d: expected %d got %d", i, test.expected, pos)
}
}
}

View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: ReplicationController
metadata:
labels:
name: diurnal-controller
name: diurnal-controller
spec:
replicas: 1
selector:
name: diurnal-controller
template:
metadata:
labels:
name: diurnal-controller
spec:
containers:
- args: ["-labels", "name=redis-slave", "-times", "00:00Z,00:02Z,01:00Z,02:30Z", "-counts", "3,7,6,9"]
resources:
limits:
cpu: 0.1
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: uluyol/kube-diurnal:0.5
name: diurnal-controller

Some files were not shown because too many files have changed in this diff Show More