diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index a68293e5fed..170d184aab2 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -34,6 +34,10 @@ "Comment": "release-96", "Rev": "98c78185197025f935947caac56a7b6d022f89d2" }, + { + "ImportPath": "github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata", + "Rev": "e34a32f9b0ecbc0784865fb2d47f3818c09521d4" + }, { "ImportPath": "github.com/Sirupsen/logrus", "Comment": "v0.6.2-10-g51fe59a", @@ -246,93 +250,93 @@ }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "0.15.1", - "Rev": "ec588def40e1bb59f28f5a293b279f6762d13d44" + "Comment": "0.16.0-51-g78419de", + "Rev": "78419de3ea9c2d23cb04ec9d63f8899de34ebd43" }, { "ImportPath": "github.com/google/go-github/github", @@ -425,10 +429,10 @@ "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf" }, - { - "ImportPath": "github.com/mxk/go-flowrate/flowrate", - "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" - }, + { + "ImportPath": "github.com/mxk/go-flowrate/flowrate", + "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" + }, { "ImportPath": "github.com/onsi/ginkgo", "Comment": "v1.2.0-6-gd981d36", @@ -573,6 +577,10 @@ "ImportPath": "gopkg.in/yaml.v2", "Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4" }, + { + "ImportPath": "gopkg.in/natefinch/lumberjack.v2/", + "Rev": "20b71e5b60d756d3d2f80def009790325acc2b23" + }, { "ImportPath": "speter.net/go/exp/math/dec/inf", "Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7" diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go new file mode 100644 index 00000000000..b007cde6366 --- /dev/null +++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata/metadata.go @@ -0,0 +1,279 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "strings" + "sync" + "time" + + "google.golang.org/cloud/internal" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 750 * time.Millisecond, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 750 * time.Millisecond, + }, + }, +} + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv("GCE_METADATA_HOST") + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = "169.254.169.254" + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := metaClient.Do(req) + if err != nil { + return "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", err + } + return string(all), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var onGCE struct { + sync.Mutex + set bool + v bool +} + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + defer onGCE.Unlock() + onGCE.Lock() + if onGCE.set { + return onGCE.v + } + onGCE.set = true + + // We use the DNS name of the metadata service here instead of the IP address + // because we expect that to fail faster in the not-on-GCE case. + res, err := metaClient.Get("http://metadata.google.internal") + if err != nil { + return false + } + onGCE.v = res.Header.Get("Metadata-Flavor") == "Google" + return onGCE.v +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go index bac878e4fd5..8546e358de4 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go @@ -39,6 +39,7 @@ const ( attributesApi = "attributes" versionApi = "version" psApi = "ps" + customMetricsApi = "appmetrics" ) // Interface for a cAdvisor API version @@ -305,7 +306,7 @@ func (self *version2_0) Version() string { } func (self *version2_0) SupportedRequestTypes() []string { - return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi, psApi} + return []string{versionApi, attributesApi, eventsApi, machineApi, summaryApi, statsApi, specApi, storageApi, psApi, customMetricsApi} } func (self *version2_0) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error { @@ -364,6 +365,32 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma contStats[name] = convertStats(cont) } return writeResult(contStats, w) + case customMetricsApi: + containerName := getContainerName(request) + glog.V(4).Infof("Api - Custom Metrics: Looking for metrics for container %q, options %+v", containerName, opt) + conts, err := m.GetRequestedContainersInfo(containerName, opt) + if err != nil { + return err + } + specs, err := m.GetContainerSpec(containerName, opt) + if err != nil { + return err + } + contMetrics := make(map[string]map[string][]info.MetricVal, 0) + for _, cont := range conts { + metrics := map[string][]info.MetricVal{} + contStats := convertStats(cont) + spec := specs[cont.Name] + for _, contStat := range contStats { + for _, ms := range spec.CustomMetrics { + if contStat.HasCustomMetrics && !contStat.CustomMetrics[ms.Name].Timestamp.IsZero() { + metrics[ms.Name] = append(metrics[ms.Name], contStat.CustomMetrics[ms.Name]) + } + } + } + contMetrics[containerName] = metrics + } + return writeResult(contMetrics, w) case specApi: containerName := getContainerName(request) glog.V(4).Infof("Api - Spec for container %q, options %+v", containerName, opt) @@ -412,12 +439,13 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats { stats := []v2.ContainerStats{} for _, val := range cont.Stats { stat := v2.ContainerStats{ - Timestamp: val.Timestamp, - HasCpu: cont.Spec.HasCpu, - HasMemory: cont.Spec.HasMemory, - HasNetwork: cont.Spec.HasNetwork, - HasFilesystem: cont.Spec.HasFilesystem, - HasDiskIo: cont.Spec.HasDiskIo, + Timestamp: val.Timestamp, + HasCpu: cont.Spec.HasCpu, + HasMemory: cont.Spec.HasMemory, + HasNetwork: cont.Spec.HasNetwork, + HasFilesystem: cont.Spec.HasFilesystem, + HasDiskIo: cont.Spec.HasDiskIo, + HasCustomMetrics: cont.Spec.HasCustomMetrics, } if stat.HasCpu { stat.Cpu = val.Cpu @@ -434,6 +462,9 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats { if stat.HasDiskIo { stat.DiskIo = val.DiskIo } + if stat.HasCustomMetrics { + stat.CustomMetrics = val.CustomMetrics + } // TODO(rjnagal): Handle load stats. stats = append(stats, stat) } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager.go b/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager.go index a3df12ccbd9..bac016b33b2 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager.go @@ -19,14 +19,15 @@ import ( "strings" "time" - "github.com/google/cadvisor/info/v2" + "github.com/google/cadvisor/info/v1" ) -type collectorManager struct { - collectors []*collectorData -} +const metricLabelPrefix = "io.cadvisor.metric." -var _ CollectorManager = &collectorManager{} +type GenericCollectorManager struct { + Collectors []*collectorData + NextCollectionTime time.Time +} type collectorData struct { collector Collector @@ -35,33 +36,54 @@ type collectorData struct { // Returns a new CollectorManager that is thread-compatible. func NewCollectorManager() (CollectorManager, error) { - return &collectorManager{ - collectors: []*collectorData{}, + return &GenericCollectorManager{ + Collectors: []*collectorData{}, + NextCollectionTime: time.Now(), }, nil } -func (cm *collectorManager) RegisterCollector(collector Collector) error { - cm.collectors = append(cm.collectors, &collectorData{ +func GetCollectorConfigs(labels map[string]string) map[string]string { + configs := map[string]string{} + for k, v := range labels { + if strings.HasPrefix(k, metricLabelPrefix) { + name := strings.TrimPrefix(k, metricLabelPrefix) + configs[name] = v + } + } + return configs +} + +func (cm *GenericCollectorManager) RegisterCollector(collector Collector) error { + cm.Collectors = append(cm.Collectors, &collectorData{ collector: collector, nextCollectionTime: time.Now(), }) return nil } -func (cm *collectorManager) Collect() (time.Time, []v2.Metric, error) { +func (cm *GenericCollectorManager) GetSpec() ([]v1.MetricSpec, error) { + metricSpec := []v1.MetricSpec{} + for _, c := range cm.Collectors { + specs := c.collector.GetSpec() + metricSpec = append(metricSpec, specs...) + } + + return metricSpec, nil +} + +func (cm *GenericCollectorManager) Collect() (time.Time, map[string]v1.MetricVal, error) { var errors []error // Collect from all collectors that are ready. var next time.Time - var metrics []v2.Metric - for _, c := range cm.collectors { + metrics := map[string]v1.MetricVal{} + for _, c := range cm.Collectors { if c.nextCollectionTime.Before(time.Now()) { - nextCollection, newMetrics, err := c.collector.Collect() + var err error + c.nextCollectionTime, metrics, err = c.collector.Collect(metrics) if err != nil { errors = append(errors, err) } - metrics = append(metrics, newMetrics...) - c.nextCollectionTime = nextCollection } // Keep track of the next collector that will be ready. @@ -69,7 +91,7 @@ func (cm *collectorManager) Collect() (time.Time, []v2.Metric, error) { next = c.nextCollectionTime } } - + cm.NextCollectionTime = next return next, metrics, compileErrors(errors) } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager_test.go index 49877032e4f..2ffb0087e47 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/collector_manager_test.go @@ -18,7 +18,7 @@ import ( "testing" "time" - "github.com/google/cadvisor/info/v2" + "github.com/google/cadvisor/info/v1" "github.com/stretchr/testify/assert" ) @@ -28,17 +28,21 @@ type fakeCollector struct { collectedFrom int } -func (fc *fakeCollector) Collect() (time.Time, []v2.Metric, error) { +func (fc *fakeCollector) Collect(metric map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error) { fc.collectedFrom++ - return fc.nextCollectionTime, []v2.Metric{}, fc.err + return fc.nextCollectionTime, metric, fc.err } func (fc *fakeCollector) Name() string { return "fake-collector" } +func (fc *fakeCollector) GetSpec() []v1.MetricSpec { + return []v1.MetricSpec{} +} + func TestCollect(t *testing.T) { - cm := &collectorManager{} + cm := &GenericCollectorManager{} firstTime := time.Now().Add(-time.Hour) secondTime := time.Now().Add(time.Hour) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/config.go b/Godeps/_workspace/src/github.com/google/cadvisor/collector/config.go new file mode 100644 index 00000000000..fc1207031ef --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/config.go @@ -0,0 +1,50 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "github.com/google/cadvisor/info/v1" + "time" +) + +type Config struct { + //the endpoint to hit to scrape metrics + Endpoint string `json:"endpoint"` + + //holds information about different metrics that can be collected + MetricsConfig []MetricConfig `json:"metrics_config"` +} + +// metricConfig holds information extracted from the config file about a metric +type MetricConfig struct { + //the name of the metric + Name string `json:"name"` + + //enum type for the metric type + MetricType v1.MetricType `json:"metric_type"` + + // metric units to display on UI and in storage (eg: MB, cores) + // this is only used for display. + Units string `json:"units"` + + //data type of the metric (eg: int, float) + DataType v1.DataType `json:"data_type"` + + //the frequency at which the metric should be collected + PollingFrequency time.Duration `json:"polling_frequency"` + + //the regular expression that can be used to extract the metric + Regex string `json:"regex"` +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/config/sample_config.json b/Godeps/_workspace/src/github.com/google/cadvisor/collector/config/sample_config.json new file mode 100644 index 00000000000..d1f9000cab4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/config/sample_config.json @@ -0,0 +1,34 @@ +{ + "endpoint" : "http://localhost:8000/nginx_status", + "metrics_config" : [ + { "name" : "activeConnections", + "metric_type" : "gauge", + "units" : "number of active connections", + "data_type" : "int", + "polling_frequency" : 10, + "regex" : "Active connections: ([0-9]+)" + }, + { "name" : "reading", + "metric_type" : "gauge", + "units" : "number of reading connections", + "data_type" : "int", + "polling_frequency" : 10, + "regex" : "Reading: ([0-9]+) .*" + }, + { "name" : "writing", + "metric_type" : "gauge", + "data_type" : "int", + "units" : "number of writing connections", + "polling_frequency" : 10, + "regex" : ".*Writing: ([0-9]+).*" + }, + { "name" : "waiting", + "metric_type" : "gauge", + "units" : "number of waiting connections", + "data_type" : "int", + "polling_frequency" : 10, + "regex" : ".*Waiting: ([0-9]+)" + } + ] + +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/fakes.go b/Godeps/_workspace/src/github.com/google/cadvisor/collector/fakes.go index 388f3fc0bfe..6b11acbb3ea 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/collector/fakes.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/fakes.go @@ -17,7 +17,7 @@ package collector import ( "time" - "github.com/google/cadvisor/info/v2" + "github.com/google/cadvisor/info/v1" ) type FakeCollectorManager struct { @@ -27,7 +27,11 @@ func (fkm *FakeCollectorManager) RegisterCollector(collector Collector) error { return nil } -func (fkm *FakeCollectorManager) Collect() (time.Time, []v2.Metric, error) { - var zero time.Time - return zero, []v2.Metric{}, nil +func (fkm *FakeCollectorManager) GetSpec() ([]v1.MetricSpec, error) { + return []v1.MetricSpec{}, nil +} + +func (fkm *FakeCollectorManager) Collect(metric map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error) { + var zero time.Time + return zero, metric, nil } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/generic_collector.go b/Godeps/_workspace/src/github.com/google/cadvisor/collector/generic_collector.go new file mode 100644 index 00000000000..6f724cc0230 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/generic_collector.go @@ -0,0 +1,165 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/google/cadvisor/info/v1" +) + +type GenericCollector struct { + //name of the collector + name string + + //holds information extracted from the config file for a collector + configFile Config + + //holds information necessary to extract metrics + info *collectorInfo +} + +type collectorInfo struct { + //minimum polling frequency among all metrics + minPollingFrequency time.Duration + + //regular expresssions for all metrics + regexps []*regexp.Regexp +} + +//Returns a new collector using the information extracted from the configfile +func NewCollector(collectorName string, configFile []byte) (*GenericCollector, error) { + var configInJSON Config + err := json.Unmarshal(configFile, &configInJSON) + if err != nil { + return nil, err + } + + //TODO : Add checks for validity of config file (eg : Accurate JSON fields) + + if len(configInJSON.MetricsConfig) == 0 { + return nil, fmt.Errorf("No metrics provided in config") + } + + minPollFrequency := time.Duration(0) + regexprs := make([]*regexp.Regexp, len(configInJSON.MetricsConfig)) + + for ind, metricConfig := range configInJSON.MetricsConfig { + // Find the minimum specified polling frequency in metric config. + if metricConfig.PollingFrequency != 0 { + if minPollFrequency == 0 || metricConfig.PollingFrequency < minPollFrequency { + minPollFrequency = metricConfig.PollingFrequency + } + } + + regexprs[ind], err = regexp.Compile(metricConfig.Regex) + if err != nil { + return nil, fmt.Errorf("Invalid regexp %v for metric %v", metricConfig.Regex, metricConfig.Name) + } + } + + // Minimum supported polling frequency is 1s. + minSupportedFrequency := 1 * time.Second + if minPollFrequency < minSupportedFrequency { + minPollFrequency = minSupportedFrequency + } + + return &GenericCollector{ + name: collectorName, + configFile: configInJSON, + info: &collectorInfo{ + minPollingFrequency: minPollFrequency, + regexps: regexprs}, + }, nil +} + +//Returns name of the collector +func (collector *GenericCollector) Name() string { + return collector.name +} + +func (collector *GenericCollector) configToSpec(config MetricConfig) v1.MetricSpec { + return v1.MetricSpec{ + Name: config.Name, + Type: config.MetricType, + Format: config.DataType, + Units: config.Units, + } +} + +func (collector *GenericCollector) GetSpec() []v1.MetricSpec { + specs := []v1.MetricSpec{} + for _, metricConfig := range collector.configFile.MetricsConfig { + spec := collector.configToSpec(metricConfig) + specs = append(specs, spec) + } + return specs +} + +//Returns collected metrics and the next collection time of the collector +func (collector *GenericCollector) Collect(metrics map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error) { + currentTime := time.Now() + nextCollectionTime := currentTime.Add(time.Duration(collector.info.minPollingFrequency)) + + uri := collector.configFile.Endpoint + response, err := http.Get(uri) + if err != nil { + return nextCollectionTime, nil, err + } + + defer response.Body.Close() + + pageContent, err := ioutil.ReadAll(response.Body) + if err != nil { + return nextCollectionTime, nil, err + } + + var errorSlice []error + for ind, metricConfig := range collector.configFile.MetricsConfig { + matchString := collector.info.regexps[ind].FindStringSubmatch(string(pageContent)) + if matchString != nil { + if metricConfig.DataType == v1.FloatType { + regVal, err := strconv.ParseFloat(strings.TrimSpace(matchString[1]), 64) + if err != nil { + errorSlice = append(errorSlice, err) + } + metrics[metricConfig.Name] = v1.MetricVal{ + FloatValue: regVal, Timestamp: currentTime, + } + } else if metricConfig.DataType == v1.IntType { + regVal, err := strconv.ParseInt(strings.TrimSpace(matchString[1]), 10, 64) + if err != nil { + errorSlice = append(errorSlice, err) + } + metrics[metricConfig.Name] = v1.MetricVal{ + IntValue: regVal, Timestamp: currentTime, + } + + } else { + errorSlice = append(errorSlice, fmt.Errorf("Unexpected value of 'data_type' for metric '%v' in config ", metricConfig.Name)) + } + } else { + errorSlice = append(errorSlice, fmt.Errorf("No match found for regexp: %v for metric '%v' in config", metricConfig.Regex, metricConfig.Name)) + } + } + return nextCollectionTime, metrics, compileErrors(errorSlice) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/generic_collector_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/collector/generic_collector_test.go new file mode 100644 index 00000000000..a1f850de229 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/generic_collector_test.go @@ -0,0 +1,167 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package collector + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/google/cadvisor/info/v1" + "github.com/stretchr/testify/assert" +) + +func TestEmptyConfig(t *testing.T) { + assert := assert.New(t) + + emptyConfig := ` + { + "endpoint" : "http://localhost:8000/nginx_status", + "metrics_config" : [ + ] + } + ` + + //Create a temporary config file 'temp.json' with invalid json format + assert.NoError(ioutil.WriteFile("temp.json", []byte(emptyConfig), 0777)) + + configFile, err := ioutil.ReadFile("temp.json") + assert.NoError(err) + + _, err = NewCollector("tempCollector", configFile) + assert.Error(err) + + assert.NoError(os.Remove("temp.json")) +} + +func TestConfigWithErrors(t *testing.T) { + assert := assert.New(t) + + //Syntax error: Missed '"' after activeConnections + invalid := ` + { + "endpoint" : "http://localhost:8000/nginx_status", + "metrics_config" : [ + { + "name" : "activeConnections, + "metric_type" : "gauge", + "data_type" : "int", + "polling_frequency" : 10, + "regex" : "Active connections: ([0-9]+)" + } + ] + } + ` + + //Create a temporary config file 'temp.json' with invalid json format + assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777)) + configFile, err := ioutil.ReadFile("temp.json") + assert.NoError(err) + + _, err = NewCollector("tempCollector", configFile) + assert.Error(err) + + assert.NoError(os.Remove("temp.json")) +} + +func TestConfigWithRegexErrors(t *testing.T) { + assert := assert.New(t) + + //Error: Missed operand for '+' in activeConnections regex + invalid := ` + { + "endpoint" : "host:port/nginx_status", + "metrics_config" : [ + { + "name" : "activeConnections", + "metric_type" : "gauge", + "data_type" : "int", + "polling_frequency" : 10, + "regex" : "Active connections: (+)" + }, + { + "name" : "reading", + "metric_type" : "gauge", + "data_type" : "int", + "polling_frequency" : 10, + "regex" : "Reading: ([0-9]+) .*" + } + ] + } + ` + + //Create a temporary config file 'temp.json' + assert.NoError(ioutil.WriteFile("temp.json", []byte(invalid), 0777)) + + configFile, err := ioutil.ReadFile("temp.json") + assert.NoError(err) + + _, err = NewCollector("tempCollector", configFile) + assert.Error(err) + + assert.NoError(os.Remove("temp.json")) +} + +func TestConfig(t *testing.T) { + assert := assert.New(t) + + //Create an nginx collector using the config file 'sample_config.json' + configFile, err := ioutil.ReadFile("config/sample_config.json") + assert.NoError(err) + + collector, err := NewCollector("nginx", configFile) + assert.NoError(err) + assert.Equal(collector.name, "nginx") + assert.Equal(collector.configFile.Endpoint, "http://localhost:8000/nginx_status") + assert.Equal(collector.configFile.MetricsConfig[0].Name, "activeConnections") +} + +func TestMetricCollection(t *testing.T) { + assert := assert.New(t) + + //Collect nginx metrics from a fake nginx endpoint + configFile, err := ioutil.ReadFile("config/sample_config.json") + assert.NoError(err) + + fakeCollector, err := NewCollector("nginx", configFile) + assert.NoError(err) + + tempServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintln(w, "Active connections: 3\nserver accepts handled requests") + fmt.Fprintln(w, "5 5 32\nReading: 0 Writing: 1 Waiting: 2") + })) + defer tempServer.Close() + fakeCollector.configFile.Endpoint = tempServer.URL + + metrics := map[string]v1.MetricVal{} + _, metrics, errMetric := fakeCollector.Collect(metrics) + assert.NoError(errMetric) + metricNames := []string{"activeConnections", "reading", "writing", "waiting"} + // activeConnections = 3 + assert.Equal(metrics[metricNames[0]].IntValue, 3) + assert.Equal(metrics[metricNames[0]].FloatValue, 0) + // reading = 0 + assert.Equal(metrics[metricNames[1]].IntValue, 0) + assert.Equal(metrics[metricNames[1]].FloatValue, 0) + // writing = 1 + assert.Equal(metrics[metricNames[2]].IntValue, 1) + assert.Equal(metrics[metricNames[2]].FloatValue, 0) + // waiting = 2 + assert.Equal(metrics[metricNames[3]].IntValue, 2) + assert.Equal(metrics[metricNames[3]].FloatValue, 0) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/collector/types.go b/Godeps/_workspace/src/github.com/google/cadvisor/collector/types.go index 8bd29d0604a..60851bee2b0 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/collector/types.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/collector/types.go @@ -15,7 +15,7 @@ package collector import ( - "github.com/google/cadvisor/info/v2" + "github.com/google/cadvisor/info/v1" "time" ) @@ -27,7 +27,10 @@ type Collector interface { // Returns the next time this collector should be collected from. // Next collection time is always returned, even when an error occurs. // A collection time of zero means no more collection. - Collect() (time.Time, []v2.Metric, error) + Collect(map[string]v1.MetricVal) (time.Time, map[string]v1.MetricVal, error) + + // Return spec for all metrics associated with this collector + GetSpec() []v1.MetricSpec // Name of this collector. Name() string @@ -42,5 +45,8 @@ type CollectorManager interface { // at which a collector will be ready to collect from. // Next collection time is always returned, even when an error occurs. // A collection time of zero means no more collection. - Collect() (time.Time, []v2.Metric, error) + Collect() (time.Time, map[string]v1.MetricVal, error) + + // Get metric spec from all registered collectors. + GetSpec() ([]v1.MetricSpec, error) } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go index b62e3e9661c..2ab5dfdfb39 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/container.go @@ -73,6 +73,9 @@ type ContainerHandler interface { // Returns absolute cgroup path for the requested resource. GetCgroupPath(resource string) (string, error) + // Returns container labels, if available. + GetContainerLabels() map[string]string + // Returns whether the container still exists. Exists() bool } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go index 6d8ca8c7f09..2344fc05b53 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go @@ -167,7 +167,7 @@ func libcontainerConfigToContainerSpec(config *libcontainerConfigs.Config, mi *i } spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores) - spec.HasNetwork = true + spec.HasNetwork = len(config.Networks) > 0 spec.HasDiskIo = true return spec @@ -276,7 +276,7 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) { } func convertInterfaceStats(stats *info.InterfaceStats) { - net := stats + net := *stats // Ingress for host veth is from the container. // Hence tx_bytes stat on the host veth is actually number of bytes received by the container. @@ -332,6 +332,10 @@ func (self *dockerContainerHandler) ListThreads(listType container.ListType) ([] return nil, nil } +func (self *dockerContainerHandler) GetContainerLabels() map[string]string { + return self.labels +} + func (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) { return containerLibcontainer.GetProcesses(self.cgroupManager) } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatability.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatibility.go similarity index 100% rename from Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatability.go rename to Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/compatibility.go diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go index bb6e9ee3091..0cd4c119b9d 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -93,7 +93,7 @@ func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info. } stats.Network.Interfaces[i] = interfaceStats } - // For backwards compatability. + // For backwards compatibility. if len(networkInterfaces) > 0 { stats.Network.InterfaceStats = stats.Network.Interfaces[0] } @@ -233,7 +233,7 @@ func toContainerStats3(libcontainerStats *libcontainer.Stats, ret *info.Containe } } - // Add to base struct for backwards compatability. + // Add to base struct for backwards compatibility. if len(ret.Network.Interfaces) > 0 { ret.Network.InterfaceStats = ret.Network.Interfaces[0] } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go index 1d5498854f2..7422b3ddbc1 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go @@ -95,6 +95,11 @@ func (self *MockContainerHandler) GetCgroupPath(path string) (string, error) { return args.Get(0).(string), args.Error(1) } +func (self *MockContainerHandler) GetContainerLabels() map[string]string { + args := self.Called() + return args.Get(0).(map[string]string) +} + type FactoryForMockContainerHandler struct { Name string PrepareContainerHandlerFunc func(name string, handler *MockContainerHandler) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go index 33f1de677e0..51405db74f7 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go @@ -33,6 +33,7 @@ import ( "github.com/google/cadvisor/fs" info "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/utils" + "github.com/google/cadvisor/utils/machine" "golang.org/x/exp/inotify" ) @@ -210,13 +211,33 @@ func (self *rawContainerHandler) GetSpec() (info.ContainerSpec, error) { } } - // Memory. - memoryRoot, ok := self.cgroupPaths["memory"] - if ok { - if utils.FileExists(memoryRoot) { + // Memory + if self.name == "/" { + // Get memory and swap limits of the running machine + memLimit, err := machine.GetMachineMemoryCapacity() + if err != nil { + glog.Warningf("failed to obtain memory limit for machine container") + spec.HasMemory = false + } else { + spec.Memory.Limit = uint64(memLimit) + // Spec is marked to have memory only if the memory limit is set spec.HasMemory = true - spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes") - spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes") + } + + swapLimit, err := machine.GetMachineSwapCapacity() + if err != nil { + glog.Warningf("failed to obtain swap limit for machine container") + } else { + spec.Memory.SwapLimit = uint64(swapLimit) + } + } else { + memoryRoot, ok := self.cgroupPaths["memory"] + if ok { + if utils.FileExists(memoryRoot) { + spec.HasMemory = true + spec.Memory.Limit = readInt64(memoryRoot, "memory.limit_in_bytes") + spec.Memory.SwapLimit = readInt64(memoryRoot, "memory.memsw.limit_in_bytes") + } } } @@ -335,6 +356,10 @@ func (self *rawContainerHandler) GetCgroupPath(resource string) (string, error) return path, nil } +func (self *rawContainerHandler) GetContainerLabels() map[string]string { + return map[string]string{} +} + // Lists all directories under "path" and outputs the results as children of "parent". func listDirectories(dirpath string, parent string, recursive bool, output map[string]struct{}) error { // Ignore if this hierarchy does not exist. diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go index e9afe26a713..1788d5818ff 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go @@ -58,6 +58,9 @@ type ContainerSpec struct { // HasDiskIo when true, indicates that DiskIo stats will be available. HasDiskIo bool `json:"has_diskio"` + + HasCustomMetrics bool `json:"has_custom_metrics"` + CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"` } // Container reference contains enough information to uniquely identify a container @@ -190,6 +193,9 @@ func (self *ContainerSpec) Eq(b *ContainerSpec) bool { if self.HasDiskIo != b.HasDiskIo { return false } + if self.HasCustomMetrics != b.HasCustomMetrics { + return false + } return true } @@ -419,6 +425,9 @@ type ContainerStats struct { // Task load stats TaskStats LoadStats `json:"task_stats,omitempty"` + + //Custom metrics from all collectors + CustomMetrics map[string]MetricVal `json:"custom_metrics,omitempty"` } func timeEq(t1, t2 time.Time, tolerance time.Duration) bool { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go index ddc7452bfd8..dc07ffa67a2 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/machine.go @@ -112,6 +112,22 @@ type NetInfo struct { Mtu int64 `json:"mtu"` } +type CloudProvider string + +const ( + GCE CloudProvider = "GCE" + AWS = "AWS" + Baremetal = "Baremetal" + UnkownProvider = "Unknown" +) + +type InstanceType string + +const ( + NoInstance InstanceType = "None" + UnknownInstance = "Unknown" +) + type MachineInfo struct { // The number of cores in this machine. NumCores int `json:"num_cores"` @@ -143,6 +159,12 @@ type MachineInfo struct { // Machine Topology // Describes cpu/memory layout and hierarchy. Topology []Node `json:"topology"` + + // Cloud provider the machine belongs to. + CloudProvider CloudProvider `json:"cloud_provider"` + + // Type of cloud instance (e.g. GCE standard) the machine is. + InstanceType InstanceType `json:"instance_type"` } type VersionInfo struct { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/metric.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/metric.go similarity index 63% rename from Godeps/_workspace/src/github.com/google/cadvisor/info/v2/metric.go rename to Godeps/_workspace/src/github.com/google/cadvisor/info/v1/metric.go index 1057980ce37..f1ffd7be472 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/metric.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/metric.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v2 +package v1 import ( "time" @@ -32,38 +32,35 @@ const ( MetricDelta = "delta" ) -// An exported metric. -type Metric struct { +// DataType for metric being exported. +type DataType string + +const ( + IntType DataType = "int" + FloatType = "float" +) + +// Spec for custom metric. +type MetricSpec struct { // The name of the metric. Name string `json:"name"` // Type of the metric. Type MetricType `json:"type"` - // Metadata associated with this metric. - Labels map[string]string + // Data Type for the stats. + Format DataType `json:"format"` - // Value of the metric. Only one of these values will be - // available according to the output type of the metric. - // If no values are available, there are no data points. - IntPoints []IntPoint `json:"int_points,omitempty"` - FloatPoints []FloatPoint `json:"float_points,omitempty"` + // Display Units for the stats. + Units string `json:"units"` } -// An integer metric data point. -type IntPoint struct { +// An exported metric. +type MetricVal struct { // Time at which the metric was queried Timestamp time.Time `json:"timestamp"` // The value of the metric at this point. - Value int64 `json:"value"` -} - -// A float metric data point. -type FloatPoint struct { - // Time at which the metric was queried - Timestamp time.Time `json:"timestamp"` - - // The value of the metric at this point. - Value float64 `json:"value"` + IntValue int64 `json:"int_value,omitempty"` + FloatValue float64 `json:"float_value,omitempty"` } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go index 7d9c388ce11..e4bda16e13a 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go @@ -73,6 +73,9 @@ type ContainerSpec struct { HasMemory bool `json:"has_memory"` Memory MemorySpec `json:"memory,omitempty"` + HasCustomMetrics bool `json:"has_custom_metrics"` + CustomMetrics []v1.MetricSpec `json:"custom_metrics,omitempty"` + // Following resources have no associated spec, but are being isolated. HasNetwork bool `json:"has_network"` HasFilesystem bool `json:"has_filesystem"` @@ -100,6 +103,9 @@ type ContainerStats struct { // Task load statistics HasLoad bool `json:"has_load"` Load v1.LoadStats `json:"load_stats,omitempty"` + // Custom Metrics + HasCustomMetrics bool `json:"has_custom_metrics"` + CustomMetrics map[string]v1.MetricVal `json:"custom_metrics,omitempty"` } type Percentiles struct { @@ -110,8 +116,12 @@ type Percentiles struct { Mean uint64 `json:"mean"` // Max seen over the collected sample. Max uint64 `json:"max"` + // 50th percentile over the collected sample. + Fifty uint64 `json:"fifty"` // 90th percentile over the collected sample. Ninety uint64 `json:"ninety"` + // 95th percentile over the collected sample. + NinetyFive uint64 `json:"ninetyfive"` } type Usage struct { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go index 219297933c7..4aef3d835fb 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go @@ -59,6 +59,12 @@ type Attributes struct { // Machine Topology // Describes cpu/memory layout and hierarchy. Topology []v1.Node `json:"topology"` + + // Cloud provider the machine belongs to + CloudProvider v1.CloudProvider `json:"cloud_provider"` + + // Type of cloud instance (e.g. GCE standard) the machine is. + InstanceType v1.InstanceType `json:"instance_type"` } func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes { @@ -76,5 +82,7 @@ func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes { DiskMap: mi.DiskMap, NetworkDevices: mi.NetworkDevices, Topology: mi.Topology, + CloudProvider: mi.CloudProvider, + InstanceType: mi.InstanceType, } } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go index 5498c674915..e6084801058 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go @@ -17,8 +17,10 @@ package manager import ( "flag" "fmt" + "io/ioutil" "math" "os/exec" + "path" "regexp" "sort" "strconv" @@ -39,8 +41,6 @@ import ( // Housekeeping interval. var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings") -var maxHousekeepingInterval = flag.Duration("max_housekeeping_interval", 60*time.Second, "Largest interval to allow between container housekeepings") -var allowDynamicHousekeeping = flag.Bool("allow_dynamic_housekeeping", true, "Whether to allow the housekeeping interval to be dynamic") var cgroupPathRegExp = regexp.MustCompile(".*:devices:(.*?),.*") @@ -54,16 +54,18 @@ type containerInfo struct { } type containerData struct { - handler container.ContainerHandler - info containerInfo - memoryCache *memory.InMemoryCache - lock sync.Mutex - loadReader cpuload.CpuLoadReader - summaryReader *summary.StatsSummary - loadAvg float64 // smoothed load average seen so far. - housekeepingInterval time.Duration - lastUpdatedTime time.Time - lastErrorTime time.Time + handler container.ContainerHandler + info containerInfo + memoryCache *memory.InMemoryCache + lock sync.Mutex + loadReader cpuload.CpuLoadReader + summaryReader *summary.StatsSummary + loadAvg float64 // smoothed load average seen so far. + housekeepingInterval time.Duration + maxHousekeepingInterval time.Duration + allowDynamicHousekeeping bool + lastUpdatedTime time.Time + lastErrorTime time.Time // Whether to log the usage of this container when it is updated. logUsage bool @@ -136,11 +138,32 @@ func (c *containerData) getCgroupPath(cgroups string) (string, error) { return string(matches[1]), nil } -func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) { - // report all processes for root. - isRoot := c.info.Name == "/" - // TODO(rjnagal): Take format as an option? - format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup" +// Returns contents of a file inside the container root. +// Takes in a path relative to container root. +func (c *containerData) ReadFile(filepath string, inHostNamespace bool) ([]byte, error) { + pids, err := c.getContainerPids(inHostNamespace) + if err != nil { + return nil, err + } + // TODO(rjnagal): Optimize by just reading container's cgroup.proc file when in host namespace. + rootfs := "/" + if !inHostNamespace { + rootfs = "/rootfs" + } + for _, pid := range pids { + filePath := path.Join(rootfs, "/proc", pid, "/root", filepath) + glog.V(3).Infof("Trying path %q", filePath) + data, err := ioutil.ReadFile(filePath) + if err == nil { + return data, err + } + } + // No process paths could be found. Declare config non-existent. + return nil, fmt.Errorf("file %q does not exist.", filepath) +} + +// Return output for ps command in host /proc with specified format +func (c *containerData) getPsOutput(inHostNamespace bool, format string) ([]byte, error) { args := []string{} command := "ps" if !inHostNamespace { @@ -148,11 +171,53 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace args = append(args, "/rootfs", "ps") } args = append(args, "-e", "-o", format) - expectedFields := 12 out, err := exec.Command(command, args...).Output() if err != nil { return nil, fmt.Errorf("failed to execute %q command: %v", command, err) } + return out, err +} + +// Get pids of processes in this container. +// A slightly lighterweight call than GetProcessList if other details are not required. +func (c *containerData) getContainerPids(inHostNamespace bool) ([]string, error) { + format := "pid,cgroup" + out, err := c.getPsOutput(inHostNamespace, format) + if err != nil { + return nil, err + } + expectedFields := 2 + lines := strings.Split(string(out), "\n") + pids := []string{} + for _, line := range lines[1:] { + if len(line) == 0 { + continue + } + fields := strings.Fields(line) + if len(fields) < expectedFields { + return nil, fmt.Errorf("expected at least %d fields, found %d: output: %q", expectedFields, len(fields), line) + } + pid := fields[0] + cgroup, err := c.getCgroupPath(fields[1]) + if err != nil { + return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[1], err) + } + if c.info.Name == cgroup { + pids = append(pids, pid) + } + } + return pids, nil +} + +func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace bool) ([]v2.ProcessInfo, error) { + // report all processes for root. + isRoot := c.info.Name == "/" + format := "user,pid,ppid,stime,pcpu,pmem,rss,vsz,stat,time,comm,cgroup" + out, err := c.getPsOutput(inHostNamespace, format) + if err != nil { + return nil, err + } + expectedFields := 12 processes := []v2.ProcessInfo{} lines := strings.Split(string(out), "\n") for _, line := range lines[1:] { @@ -183,13 +248,17 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace if err != nil { return nil, fmt.Errorf("invalid rss %q: %v", fields[6], err) } + // convert to bytes + rss *= 1024 vs, err := strconv.ParseUint(fields[7], 0, 64) if err != nil { return nil, fmt.Errorf("invalid virtual size %q: %v", fields[7], err) } + // convert to bytes + vs *= 1024 cgroup, err := c.getCgroupPath(fields[11]) if err != nil { - return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[10], err) + return nil, fmt.Errorf("could not parse cgroup path from %q: %v", fields[11], err) } // Remove the ps command we just ran from cadvisor container. // Not necessary, but makes the cadvisor page look cleaner. @@ -221,7 +290,7 @@ func (c *containerData) GetProcessList(cadvisorContainer string, inHostNamespace return processes, nil } -func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool, collectorManager collector.CollectorManager) (*containerData, error) { +func newContainerData(containerName string, memoryCache *memory.InMemoryCache, handler container.ContainerHandler, loadReader cpuload.CpuLoadReader, logUsage bool, collectorManager collector.CollectorManager, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (*containerData, error) { if memoryCache == nil { return nil, fmt.Errorf("nil memory storage") } @@ -234,14 +303,16 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h } cont := &containerData{ - handler: handler, - memoryCache: memoryCache, - housekeepingInterval: *HousekeepingInterval, - loadReader: loadReader, - logUsage: logUsage, - loadAvg: -1.0, // negative value indicates uninitialized. - stop: make(chan bool, 1), - collectorManager: collectorManager, + handler: handler, + memoryCache: memoryCache, + housekeepingInterval: *HousekeepingInterval, + maxHousekeepingInterval: maxHousekeepingInterval, + allowDynamicHousekeeping: allowDynamicHousekeeping, + loadReader: loadReader, + logUsage: logUsage, + loadAvg: -1.0, // negative value indicates uninitialized. + stop: make(chan bool, 1), + collectorManager: collectorManager, } cont.info.ContainerReference = ref @@ -260,7 +331,7 @@ func newContainerData(containerName string, memoryCache *memory.InMemoryCache, h // Determine when the next housekeeping should occur. func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Time { - if *allowDynamicHousekeeping { + if self.allowDynamicHousekeeping { var empty time.Time stats, err := self.memoryCache.RecentStats(self.info.Name, empty, empty, 2) if err != nil { @@ -270,10 +341,10 @@ func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Tim } else if len(stats) == 2 { // TODO(vishnuk): Use no processes as a signal. // Raise the interval if usage hasn't changed in the last housekeeping. - if stats[0].StatsEq(stats[1]) && (self.housekeepingInterval < *maxHousekeepingInterval) { + if stats[0].StatsEq(stats[1]) && (self.housekeepingInterval < self.maxHousekeepingInterval) { self.housekeepingInterval *= 2 - if self.housekeepingInterval > *maxHousekeepingInterval { - self.housekeepingInterval = *maxHousekeepingInterval + if self.housekeepingInterval > self.maxHousekeepingInterval { + self.housekeepingInterval = self.maxHousekeepingInterval } } else if self.housekeepingInterval != *HousekeepingInterval { // Lower interval back to the baseline. @@ -340,19 +411,7 @@ func (c *containerData) housekeeping() { } } - // TODO(vmarmol): Export metrics. - // Run custom collectors. - nextCollectionTime, _, err := c.collectorManager.Collect() - if err != nil && c.allowErrorLogging() { - glog.Warningf("[%s] Collection failed: %v", c.info.Name, err) - } - - // Next housekeeping is the first of the stats or the custom collector's housekeeping. - nextHousekeeping := c.nextHousekeeping(lastHousekeeping) - next := nextHousekeeping - if !nextCollectionTime.IsZero() && nextCollectionTime.Before(nextHousekeeping) { - next = nextCollectionTime - } + next := c.nextHousekeeping(lastHousekeeping) // Schedule the next housekeeping. Sleep until that time. if time.Now().Before(next) { @@ -380,6 +439,12 @@ func (c *containerData) updateSpec() error { } return err } + + customMetrics, err := c.collectorManager.GetSpec() + if len(customMetrics) > 0 { + spec.HasCustomMetrics = true + spec.CustomMetrics = customMetrics + } c.lock.Lock() defer c.lock.Unlock() c.info.Spec = spec @@ -432,6 +497,20 @@ func (c *containerData) updateStats() error { glog.V(2).Infof("Failed to add summary stats for %q: %v", c.info.Name, err) } } + var customStatsErr error + cm := c.collectorManager.(*collector.GenericCollectorManager) + if len(cm.Collectors) > 0 { + if cm.NextCollectionTime.Before(time.Now()) { + customStats, err := c.updateCustomStats() + if customStats != nil { + stats.CustomMetrics = customStats + } + if err != nil { + customStatsErr = err + } + } + } + ref, err := c.handler.ContainerReference() if err != nil { // Ignore errors if the container is dead. @@ -444,7 +523,21 @@ func (c *containerData) updateStats() error { if err != nil { return err } - return statsErr + if statsErr != nil { + return statsErr + } + return customStatsErr +} + +func (c *containerData) updateCustomStats() (map[string]info.MetricVal, error) { + _, customStats, customStatsErr := c.collectorManager.Collect() + if customStatsErr != nil { + if !c.handler.Exists() { + return customStats, nil + } + customStatsErr = fmt.Errorf("%v, continuing to push custom stats", customStatsErr) + } + return customStats, customStatsErr } func (c *containerData) updateSubcontainers() error { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container_test.go index f332dc51674..c5bcabc6b74 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/container_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/container_test.go @@ -41,7 +41,7 @@ func setupContainerData(t *testing.T, spec info.ContainerSpec) (*containerData, nil, ) memoryCache := memory.New(60, nil) - ret, err := newContainerData(containerName, memoryCache, mockHandler, nil, false, &collector.FakeCollectorManager{}) + ret, err := newContainerData(containerName, memoryCache, mockHandler, nil, false, &collector.GenericCollectorManager{}, 60*time.Second, true) if err != nil { t.Fatal(err) } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go index cbc2f186d6a..5ff60b65ba1 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/machine.go @@ -17,10 +17,7 @@ package manager import ( "bytes" "flag" - "fmt" "io/ioutil" - "regexp" - "strconv" "strings" "syscall" @@ -29,193 +26,16 @@ import ( "github.com/google/cadvisor/container/docker" "github.com/google/cadvisor/fs" info "github.com/google/cadvisor/info/v1" - "github.com/google/cadvisor/utils" + "github.com/google/cadvisor/utils/cloudinfo" + "github.com/google/cadvisor/utils/machine" "github.com/google/cadvisor/utils/sysfs" "github.com/google/cadvisor/utils/sysinfo" version "github.com/google/cadvisor/version" ) -var cpuRegExp = regexp.MustCompile("processor\\t*: +([0-9]+)") -var coreRegExp = regexp.MustCompile("core id\\t*: +([0-9]+)") -var nodeRegExp = regexp.MustCompile("physical id\\t*: +([0-9]+)") -var CpuClockSpeedMHz = regexp.MustCompile("cpu MHz\\t*: +([0-9]+.[0-9]+)") -var memoryCapacityRegexp = regexp.MustCompile("MemTotal: *([0-9]+) kB") - var machineIdFilePath = flag.String("machine_id_file", "/etc/machine-id,/var/lib/dbus/machine-id", "Comma-separated list of files to check for machine-id. Use the first one that exists.") var bootIdFilePath = flag.String("boot_id_file", "/proc/sys/kernel/random/boot_id", "Comma-separated list of files to check for boot-id. Use the first one that exists.") -func getClockSpeed(procInfo []byte) (uint64, error) { - // First look through sys to find a max supported cpu frequency. - const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq" - if utils.FileExists(maxFreqFile) { - val, err := ioutil.ReadFile(maxFreqFile) - if err != nil { - return 0, err - } - var maxFreq uint64 - n, err := fmt.Sscanf(string(val), "%d", &maxFreq) - if err != nil || n != 1 { - return 0, fmt.Errorf("could not parse frequency %q", val) - } - return maxFreq, nil - } - // Fall back to /proc/cpuinfo - matches := CpuClockSpeedMHz.FindSubmatch(procInfo) - if len(matches) != 2 { - //Check if we are running on Power systems which have a different format - CpuClockSpeedMHz, _ = regexp.Compile("clock\\t*: +([0-9]+.[0-9]+)MHz") - matches = CpuClockSpeedMHz.FindSubmatch(procInfo) - if len(matches) != 2 { - return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo)) - } - } - speed, err := strconv.ParseFloat(string(matches[1]), 64) - if err != nil { - return 0, err - } - // Convert to kHz - return uint64(speed * 1000), nil -} - -func getMemoryCapacity(b []byte) (int64, error) { - matches := memoryCapacityRegexp.FindSubmatch(b) - if len(matches) != 2 { - return -1, fmt.Errorf("failed to find memory capacity in output: %q", string(b)) - } - m, err := strconv.ParseInt(string(matches[1]), 10, 64) - if err != nil { - return -1, err - } - - // Convert to bytes. - return m * 1024, err -} - -func extractValue(s string, r *regexp.Regexp) (bool, int, error) { - matches := r.FindSubmatch([]byte(s)) - if len(matches) == 2 { - val, err := strconv.ParseInt(string(matches[1]), 10, 32) - if err != nil { - return true, -1, err - } - return true, int(val), nil - } - return false, -1, nil -} - -func findNode(nodes []info.Node, id int) (bool, int) { - for i, n := range nodes { - if n.Id == id { - return true, i - } - } - return false, -1 -} - -func addNode(nodes *[]info.Node, id int) (int, error) { - var idx int - if id == -1 { - // Some VMs don't fill topology data. Export single package. - id = 0 - } - - ok, idx := findNode(*nodes, id) - if !ok { - // New node - node := info.Node{Id: id} - // Add per-node memory information. - meminfo := fmt.Sprintf("/sys/devices/system/node/node%d/meminfo", id) - out, err := ioutil.ReadFile(meminfo) - // Ignore if per-node info is not available. - if err == nil { - m, err := getMemoryCapacity(out) - if err != nil { - return -1, err - } - node.Memory = uint64(m) - } - *nodes = append(*nodes, node) - idx = len(*nodes) - 1 - } - return idx, nil -} - -func getTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) { - nodes := []info.Node{} - numCores := 0 - lastThread := -1 - lastCore := -1 - lastNode := -1 - for _, line := range strings.Split(cpuinfo, "\n") { - ok, val, err := extractValue(line, cpuRegExp) - if err != nil { - return nil, -1, fmt.Errorf("could not parse cpu info from %q: %v", line, err) - } - if ok { - thread := val - numCores++ - if lastThread != -1 { - // New cpu section. Save last one. - nodeIdx, err := addNode(&nodes, lastNode) - if err != nil { - return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err) - } - nodes[nodeIdx].AddThread(lastThread, lastCore) - lastCore = -1 - lastNode = -1 - } - lastThread = thread - } - ok, val, err = extractValue(line, coreRegExp) - if err != nil { - return nil, -1, fmt.Errorf("could not parse core info from %q: %v", line, err) - } - if ok { - lastCore = val - } - ok, val, err = extractValue(line, nodeRegExp) - if err != nil { - return nil, -1, fmt.Errorf("could not parse node info from %q: %v", line, err) - } - if ok { - lastNode = val - } - } - nodeIdx, err := addNode(&nodes, lastNode) - if err != nil { - return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err) - } - nodes[nodeIdx].AddThread(lastThread, lastCore) - if numCores < 1 { - return nil, numCores, fmt.Errorf("could not detect any cores") - } - for idx, node := range nodes { - caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0]) - if err != nil { - glog.Errorf("failed to get cache information for node %d: %v", node.Id, err) - continue - } - numThreadsPerCore := len(node.Cores[0].Threads) - numThreadsPerNode := len(node.Cores) * numThreadsPerCore - for _, cache := range caches { - c := info.Cache{ - Size: cache.Size, - Level: cache.Level, - Type: cache.Type, - } - if cache.Cpus == numThreadsPerNode && cache.Level > 2 { - // Add a node-level cache. - nodes[idx].AddNodeCache(c) - } else if cache.Cpus == numThreadsPerCore { - // Add to each core. - nodes[idx].AddPerCoreCache(c) - } - // Ignore unknown caches. - } - } - return nodes, numCores, nil -} - func getInfoFromFiles(filePaths string) string { if len(filePaths) == 0 { return "" @@ -232,18 +52,12 @@ func getInfoFromFiles(filePaths string) string { func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, error) { cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") - clockSpeed, err := getClockSpeed(cpuinfo) + clockSpeed, err := machine.GetClockSpeed(cpuinfo) if err != nil { return nil, err } - // Get the amount of usable memory from /proc/meminfo. - out, err := ioutil.ReadFile("/proc/meminfo") - if err != nil { - return nil, err - } - - memoryCapacity, err := getMemoryCapacity(out) + memoryCapacity, err := machine.GetMachineMemoryCapacity() if err != nil { return nil, err } @@ -263,7 +77,7 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, err glog.Errorf("Failed to get network devices: %v", err) } - topology, numCores, err := getTopology(sysFs, string(cpuinfo)) + topology, numCores, err := machine.GetTopology(sysFs, string(cpuinfo)) if err != nil { glog.Errorf("Failed to get topology information: %v", err) } @@ -273,6 +87,10 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, err glog.Errorf("Failed to get system UUID: %v", err) } + realCloudInfo := cloudinfo.NewRealCloudInfo() + cloudProvider := realCloudInfo.GetCloudProvider() + instanceType := realCloudInfo.GetInstanceType() + machineInfo := &info.MachineInfo{ NumCores: numCores, CpuFrequency: clockSpeed, @@ -283,6 +101,8 @@ func getMachineInfo(sysFs sysfs.SysFs, fsInfo fs.FsInfo) (*info.MachineInfo, err MachineID: getInfoFromFiles(*machineIdFilePath), SystemUUID: systemUUID, BootID: getInfoFromFiles(*bootIdFilePath), + CloudProvider: cloudProvider, + InstanceType: instanceType, } for _, fs := range filesystems { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go index 62255147f22..751acb97071 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go @@ -114,7 +114,7 @@ type Manager interface { } // New takes a memory storage and returns a new manager. -func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs) (Manager, error) { +func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs, maxHousekeepingInterval time.Duration, allowDynamicHousekeeping bool) (Manager, error) { if memoryCache == nil { return nil, fmt.Errorf("manager requires memory storage") } @@ -139,13 +139,15 @@ func New(memoryCache *memory.InMemoryCache, sysfs sysfs.SysFs) (Manager, error) inHostNamespace = true } newManager := &manager{ - containers: make(map[namespacedContainerName]*containerData), - quitChannels: make([]chan error, 0, 2), - memoryCache: memoryCache, - fsInfo: fsInfo, - cadvisorContainer: selfContainer, - inHostNamespace: inHostNamespace, - startupTime: time.Now(), + containers: make(map[namespacedContainerName]*containerData), + quitChannels: make([]chan error, 0, 2), + memoryCache: memoryCache, + fsInfo: fsInfo, + cadvisorContainer: selfContainer, + inHostNamespace: inHostNamespace, + startupTime: time.Now(), + maxHousekeepingInterval: maxHousekeepingInterval, + allowDynamicHousekeeping: allowDynamicHousekeeping, } machineInfo, err := getMachineInfo(sysfs, fsInfo) @@ -176,19 +178,21 @@ type namespacedContainerName struct { } type manager struct { - containers map[namespacedContainerName]*containerData - containersLock sync.RWMutex - memoryCache *memory.InMemoryCache - fsInfo fs.FsInfo - machineInfo info.MachineInfo - versionInfo info.VersionInfo - quitChannels []chan error - cadvisorContainer string - inHostNamespace bool - dockerContainersRegexp *regexp.Regexp - loadReader cpuload.CpuLoadReader - eventHandler events.EventManager - startupTime time.Time + containers map[namespacedContainerName]*containerData + containersLock sync.RWMutex + memoryCache *memory.InMemoryCache + fsInfo fs.FsInfo + machineInfo info.MachineInfo + versionInfo info.VersionInfo + quitChannels []chan error + cadvisorContainer string + inHostNamespace bool + dockerContainersRegexp *regexp.Regexp + loadReader cpuload.CpuLoadReader + eventHandler events.EventManager + startupTime time.Time + maxHousekeepingInterval time.Duration + allowDynamicHousekeeping bool } // Start the container manager. @@ -371,12 +375,13 @@ func (self *manager) GetContainerSpec(containerName string, options v2.RequestOp func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec { specV1 := self.getAdjustedSpec(cinfo) specV2 := v2.ContainerSpec{ - CreationTime: specV1.CreationTime, - HasCpu: specV1.HasCpu, - HasMemory: specV1.HasMemory, - HasFilesystem: specV1.HasFilesystem, - HasNetwork: specV1.HasNetwork, - HasDiskIo: specV1.HasDiskIo, + CreationTime: specV1.CreationTime, + HasCpu: specV1.HasCpu, + HasMemory: specV1.HasMemory, + HasFilesystem: specV1.HasFilesystem, + HasNetwork: specV1.HasNetwork, + HasDiskIo: specV1.HasDiskIo, + HasCustomMetrics: specV1.HasCustomMetrics, } if specV1.HasCpu { specV2.Cpu.Limit = specV1.Cpu.Limit @@ -388,6 +393,9 @@ func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec { specV2.Memory.Reservation = specV1.Memory.Reservation specV2.Memory.SwapLimit = specV1.Memory.SwapLimit } + if specV1.HasCustomMetrics { + specV2.CustomMetrics = specV1.CustomMetrics + } specV2.Aliases = cinfo.Aliases specV2.Namespace = cinfo.Namespace return specV2 @@ -689,6 +697,28 @@ func (m *manager) GetProcessList(containerName string, options v2.RequestOptions return ps, nil } +func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *containerData) error { + for k, v := range collectorConfigs { + configFile, err := cont.ReadFile(v, m.inHostNamespace) + if err != nil { + return fmt.Errorf("failed to read config file %q for config %q, container %q: %v", k, v, cont.info.Name, err) + } + glog.V(3).Infof("Got config from %q: %q", v, configFile) + + newCollector, err := collector.NewCollector(k, configFile) + if err != nil { + glog.Infof("failed to create collector for container %q, config %q: %v", cont.info.Name, k, err) + return err + } + err = cont.collectorManager.RegisterCollector(newCollector) + if err != nil { + glog.Infof("failed to register collector for container %q, config %q: %v", cont.info.Name, k, err) + return err + } + } + return nil +} + // Create a container. func (m *manager) createContainer(containerName string) error { handler, accept, err := container.NewContainerHandler(containerName) @@ -700,17 +730,26 @@ func (m *manager) createContainer(containerName string) error { glog.V(4).Infof("ignoring container %q", containerName) return nil } - // TODO(vmarmol): Register collectors. collectorManager, err := collector.NewCollectorManager() if err != nil { return err } + logUsage := *logCadvisorUsage && containerName == m.cadvisorContainer - cont, err := newContainerData(containerName, m.memoryCache, handler, m.loadReader, logUsage, collectorManager) + cont, err := newContainerData(containerName, m.memoryCache, handler, m.loadReader, logUsage, collectorManager, m.maxHousekeepingInterval, m.allowDynamicHousekeeping) if err != nil { return err } + // Add collectors + labels := handler.GetContainerLabels() + collectorConfigs := collector.GetCollectorConfigs(labels) + err = m.registerCollectors(collectorConfigs, cont) + if err != nil { + glog.Infof("failed to register collectors for %q: %v", containerName, err) + return err + } + // Add to the containers map. alreadyExists := func() bool { m.containersLock.Lock() diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_test.go index 6cfdc6373f3..b1600b88aa1 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager_test.go @@ -53,7 +53,7 @@ func createManagerAndAddContainers( spec, nil, ).Once() - cont, err := newContainerData(name, memoryCache, mockHandler, nil, false, &collector.FakeCollectorManager{}) + cont, err := newContainerData(name, memoryCache, mockHandler, nil, false, &collector.GenericCollectorManager{}, 60*time.Second, true) if err != nil { t.Fatal(err) } @@ -205,7 +205,7 @@ func TestDockerContainersInfo(t *testing.T) { } func TestNewNilManager(t *testing.T) { - _, err := New(nil, nil) + _, err := New(nil, nil, 60*time.Second, true) if err == nil { t.Fatalf("Expected nil manager to return error") } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/pages/containers.go b/Godeps/_workspace/src/github.com/google/cadvisor/pages/containers.go index 4a83c4835e9..394c5d1628c 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/pages/containers.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/pages/containers.go @@ -18,7 +18,6 @@ package pages import ( "fmt" "html/template" - "math" "net/http" "net/url" "path" @@ -149,15 +148,19 @@ func toMegabytes(bytes uint64) float64 { return float64(bytes) / (1 << 20) } +// Size after which we consider memory to be "unlimited". This is not +// MaxInt64 due to rounding by the kernel. +const maxMemorySize = uint64(1 << 62) + func printSize(bytes uint64) string { - if bytes >= math.MaxInt64 { + if bytes >= maxMemorySize { return "unlimited" } return ByteSize(bytes).Size() } func printUnit(bytes uint64) string { - if bytes >= math.MaxInt64 { + if bytes >= maxMemorySize { return "" } return ByteSize(bytes).Unit() @@ -229,7 +232,7 @@ func serveContainersPage(m manager.Manager, w http.ResponseWriter, u *url.URL) e data := &pageData{ DisplayName: displayName, - ContainerName: cont.Name, + ContainerName: escapeContainerName(cont.Name), ParentContainers: parentContainers, Subcontainers: subcontainerLinks, Spec: cont.Spec, diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/pages/docker.go b/Godeps/_workspace/src/github.com/google/cadvisor/pages/docker.go index d02f1ceebdb..599cb0ac81b 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/pages/docker.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/pages/docker.go @@ -130,7 +130,7 @@ func serveDockerPage(m manager.Manager, w http.ResponseWriter, u *url.URL) error } data = &pageData{ DisplayName: displayName, - ContainerName: cont.Name, + ContainerName: escapeContainerName(cont.Name), ParentContainers: parentContainers, Spec: cont.Spec, Stats: cont.Stats, diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/pages/pages.go b/Godeps/_workspace/src/github.com/google/cadvisor/pages/pages.go index fed0401f27f..662be0a304f 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/pages/pages.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/pages/pages.go @@ -18,6 +18,7 @@ import ( "fmt" "html/template" "net/http" + "net/url" "strings" auth "github.com/abbot/go-http-auth" @@ -159,3 +160,12 @@ func getContainerDisplayName(cont info.ContainerReference) string { return displayName } + +// Escape the non-path characters on a container name. +func escapeContainerName(containerName string) string { + parts := strings.Split(containerName, "/") + for i := range parts { + parts[i] = url.QueryEscape(parts[i]) + } + return strings.Join(parts, "/") +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/redis/redis.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/redis/redis.go index 5d1e4ce480f..d66190c1a10 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/storage/redis/redis.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/redis/redis.go @@ -44,8 +44,8 @@ func (self *redisStorage) defaultReadyToFlush() bool { return time.Since(self.lastWrite) >= self.bufferDuration } -//We must add some defaut params (for example: MachineName,ContainerName...)because containerStats do not include them -func (self *redisStorage) containerStatsAndDefautValues(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec { +//We must add some default params (for example: MachineName,ContainerName...)because containerStats do not include them +func (self *redisStorage) containerStatsAndDefaultValues(ref info.ContainerReference, stats *info.ContainerStats) *detailSpec { timestamp := stats.Timestamp.UnixNano() / 1E3 var containerName string if len(ref.Aliases) > 0 { @@ -72,8 +72,8 @@ func (self *redisStorage) AddStats(ref info.ContainerReference, stats *info.Cont // AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write. self.lock.Lock() defer self.lock.Unlock() - // Add some defaut params based on containerStats - detail := self.containerStatsAndDefautValues(ref, stats) + // Add some default params based on containerStats + detail := self.containerStatsAndDefaultValues(ref, stats) //To json b, _ := json.Marshal(detail) if self.readyToFlush() { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/client.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/client/client.go similarity index 61% rename from Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/client.go rename to Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/client/client.go index ffef57ff469..958468ad554 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/client.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/client/client.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package statsd +package client import ( "fmt" @@ -22,8 +22,9 @@ import ( ) type Client struct { - HostPort string - conn net.Conn + HostPort string + Namespace string + conn net.Conn } func (self *Client) Open() error { @@ -36,38 +37,28 @@ func (self *Client) Open() error { return nil } -func (self *Client) Close() { +func (self *Client) Close() error { self.conn.Close() + self.conn = nil + return nil } -func (self *Client) UpdateGauge(name, value string) error { - stats := make(map[string]string) - val := fmt.Sprintf("%s|g", value) - stats[name] = val - if err := self.send(stats); err != nil { +// Simple send to statsd daemon without sampling. +func (self *Client) Send(namespace, containerName, key string, value uint64) error { + // only send counter value + formatted := fmt.Sprintf("%s.%s.%s:%d|g", namespace, containerName, key, value) + _, err := fmt.Fprintf(self.conn, formatted) + if err != nil { + glog.V(3).Infof("failed to send data %q: %v", formatted, err) return err } return nil } -// Simple send to statsd daemon without sampling. -func (self *Client) send(data map[string]string) error { - for k, v := range data { - formatted := fmt.Sprintf("%s:%s", k, v) - _, err := fmt.Fprintf(self.conn, formatted) - if err != nil { - glog.V(3).Infof("failed to send data %q: %v", formatted, err) - // return on first error. - return err - } - } - return nil -} - func New(hostPort string) (*Client, error) { - client := Client{HostPort: hostPort} - if err := client.Open(); err != nil { + Client := Client{HostPort: hostPort} + if err := Client.Open(); err != nil { return nil, err } - return &client, nil + return &Client, nil } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/statsd.go b/Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/statsd.go new file mode 100644 index 00000000000..0b4ce9f4db8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/storage/statsd/statsd.go @@ -0,0 +1,127 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package statsd + +import ( + info "github.com/google/cadvisor/info/v1" + client "github.com/google/cadvisor/storage/statsd/client" +) + +type statsdStorage struct { + client *client.Client + Namespace string +} + +const ( + colCpuCumulativeUsage string = "cpu_cumulative_usage" + // Memory Usage + colMemoryUsage string = "memory_usage" + // Working set size + colMemoryWorkingSet string = "memory_working_set" + // Cumulative count of bytes received. + colRxBytes string = "rx_bytes" + // Cumulative count of receive errors encountered. + colRxErrors string = "rx_errors" + // Cumulative count of bytes transmitted. + colTxBytes string = "tx_bytes" + // Cumulative count of transmit errors encountered. + colTxErrors string = "tx_errors" + // Filesystem summary + colFsSummary = "fs_summary" + // Filesystem limit. + colFsLimit = "fs_limit" + // Filesystem usage. + colFsUsage = "fs_usage" +) + +func (self *statsdStorage) containerStatsToValues( + stats *info.ContainerStats, +) (series map[string]uint64) { + series = make(map[string]uint64) + + // Cumulative Cpu Usage + series[colCpuCumulativeUsage] = stats.Cpu.Usage.Total + + // Memory Usage + series[colMemoryUsage] = stats.Memory.Usage + + // Working set size + series[colMemoryWorkingSet] = stats.Memory.WorkingSet + + // Network stats. + series[colRxBytes] = stats.Network.RxBytes + series[colRxErrors] = stats.Network.RxErrors + series[colTxBytes] = stats.Network.TxBytes + series[colTxErrors] = stats.Network.TxErrors + + return series +} + +func (self *statsdStorage) containerFsStatsToValues( + series *map[string]uint64, + stats *info.ContainerStats, +) { + for _, fsStat := range stats.Filesystem { + // Summary stats. + (*series)[colFsSummary+"."+colFsLimit] += fsStat.Limit + (*series)[colFsSummary+"."+colFsUsage] += fsStat.Usage + + // Per device stats. + (*series)[fsStat.Device+"."+colFsLimit] = fsStat.Limit + (*series)[fsStat.Device+"."+colFsUsage] = fsStat.Usage + } +} + +//Push the data into redis +func (self *statsdStorage) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error { + if stats == nil { + return nil + } + + var containerName string + if len(ref.Aliases) > 0 { + containerName = ref.Aliases[0] + } else { + containerName = ref.Name + } + + series := self.containerStatsToValues(stats) + self.containerFsStatsToValues(&series, stats) + for key, value := range series { + err := self.client.Send(self.Namespace, containerName, key, value) + if err != nil { + return err + } + } + return nil +} + +func (self *statsdStorage) Close() error { + self.client.Close() + self.client = nil + return nil +} + +func New(namespace, hostPort string) (*statsdStorage, error) { + statsdClient, err := client.New(hostPort) + if err != nil { + return nil, err + } + statsdStorage := &statsdStorage{ + client: statsdClient, + Namespace: namespace, + } + return statsdStorage, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles.go b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles.go index 1893a65c0c0..de92bc3a251 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles.go @@ -28,20 +28,23 @@ const secondsToMilliSeconds = 1000 const milliSecondsToNanoSeconds = 1000000 const secondsToNanoSeconds = secondsToMilliSeconds * milliSecondsToNanoSeconds -type uint64Slice []uint64 +type Uint64Slice []uint64 -func (a uint64Slice) Len() int { return len(a) } -func (a uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a uint64Slice) Less(i, j int) bool { return a[i] < a[j] } +func (a Uint64Slice) Len() int { return len(a) } +func (a Uint64Slice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Uint64Slice) Less(i, j int) bool { return a[i] < a[j] } -// Get 90th percentile of the provided samples. Round to integer. -func (self uint64Slice) Get90Percentile() uint64 { +// Get percentile of the provided samples. Round to integer. +func (self Uint64Slice) GetPercentile(d float64) uint64 { + if d < 0.0 || d > 1.0 { + return 0 + } count := self.Len() if count == 0 { return 0 } sort.Sort(self) - n := float64(0.9 * (float64(count) + 1)) + n := float64(d * (float64(count) + 1)) idx, frac := math.Modf(n) index := int(idx) percentile := float64(self[index-1]) @@ -71,7 +74,7 @@ func (self *mean) Add(value uint64) { type resource struct { // list of samples being tracked. - samples uint64Slice + samples Uint64Slice // average from existing samples. mean mean // maximum value seen so far in the added samples. @@ -94,27 +97,31 @@ func (self *resource) Add(p info.Percentiles) { // Add a single sample. Internally, we convert it to a fake percentile sample. func (self *resource) AddSample(val uint64) { sample := info.Percentiles{ - Present: true, - Mean: val, - Max: val, - Ninety: val, + Present: true, + Mean: val, + Max: val, + Fifty: val, + Ninety: val, + NinetyFive: val, } self.Add(sample) } // Get max, average, and 90p from existing samples. -func (self *resource) GetPercentile() info.Percentiles { +func (self *resource) GetAllPercentiles() info.Percentiles { p := info.Percentiles{} p.Mean = uint64(self.mean.Mean) p.Max = self.max - p.Ninety = self.samples.Get90Percentile() + p.Fifty = self.samples.GetPercentile(0.5) + p.Ninety = self.samples.GetPercentile(0.9) + p.NinetyFive = self.samples.GetPercentile(0.95) p.Present = true return p } func NewResource(size int) *resource { return &resource{ - samples: make(uint64Slice, 0, size), + samples: make(Uint64Slice, 0, size), mean: mean{count: 0, Mean: 0}, } } @@ -128,8 +135,8 @@ func GetDerivedPercentiles(stats []*info.Usage) info.Usage { memory.Add(stat.Memory) } usage := info.Usage{} - usage.Cpu = cpu.GetPercentile() - usage.Memory = memory.GetPercentile() + usage.Cpu = cpu.GetAllPercentiles() + usage.Memory = memory.GetAllPercentiles() return usage } @@ -183,7 +190,7 @@ func GetMinutePercentiles(stats []*secondSample) info.Usage { percent := getPercentComplete(stats) return info.Usage{ PercentComplete: percent, - Cpu: cpu.GetPercentile(), - Memory: memory.GetPercentile(), + Cpu: cpu.GetAllPercentiles(), + Memory: memory.GetAllPercentiles(), } } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles_test.go index 53b6a29f2c2..4dbe3665d35 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/summary/percentiles_test.go @@ -23,25 +23,29 @@ import ( const Nanosecond = 1000000000 -func Test90Percentile(t *testing.T) { +func assertPercentile(t *testing.T, s Uint64Slice, f float64, want uint64) { + if got := s.GetPercentile(f); got != want { + t.Errorf("GetPercentile(%f) is %d, should be %d.", f, got, want) + } +} + +func TestPercentile(t *testing.T) { N := 100 - stats := make(uint64Slice, 0, N) + s := make(Uint64Slice, 0, N) for i := N; i > 0; i-- { - stats = append(stats, uint64(i)) + s = append(s, uint64(i)) } - p := stats.Get90Percentile() - if p != 90 { - t.Errorf("90th percentile is %d, should be 90.", p) - } - // 90p should be between 94 and 95. Promoted to 95. + assertPercentile(t, s, 0.2, 20) + assertPercentile(t, s, 0.7, 70) + assertPercentile(t, s, 0.9, 90) N = 105 for i := 101; i <= N; i++ { - stats = append(stats, uint64(i)) - } - p = stats.Get90Percentile() - if p != 95 { - t.Errorf("90th percentile is %d, should be 95.", p) + s = append(s, uint64(i)) } + // 90p should be between 94 and 95. Promoted to 95. + assertPercentile(t, s, 0.2, 21) + assertPercentile(t, s, 0.7, 74) + assertPercentile(t, s, 0.9, 95) } func TestMean(t *testing.T) { @@ -74,19 +78,23 @@ func TestAggregates(t *testing.T) { usage := GetMinutePercentiles(stats) // Cpu mean, max, and 90p should all be 1000 ms/s. cpuExpected := info.Percentiles{ - Present: true, - Mean: 1000, - Max: 1000, - Ninety: 1000, + Present: true, + Mean: 1000, + Max: 1000, + Fifty: 1000, + Ninety: 1000, + NinetyFive: 1000, } if usage.Cpu != cpuExpected { t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) } memExpected := info.Percentiles{ - Present: true, - Mean: 50 * 1024, - Max: 99 * 1024, - Ninety: 90 * 1024, + Present: true, + Mean: 50 * 1024, + Max: 99 * 1024, + Fifty: 50 * 1024, + Ninety: 90 * 1024, + NinetyFive: 95 * 1024, } if usage.Memory != memExpected { t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) @@ -119,19 +127,23 @@ func TestSamplesCloseInTimeIgnored(t *testing.T) { usage := GetMinutePercentiles(stats) // Cpu mean, max, and 90p should all be 1000 ms/s. All high-value samples are discarded. cpuExpected := info.Percentiles{ - Present: true, - Mean: 1000, - Max: 1000, - Ninety: 1000, + Present: true, + Mean: 1000, + Max: 1000, + Fifty: 1000, + Ninety: 1000, + NinetyFive: 1000, } if usage.Cpu != cpuExpected { t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) } memExpected := info.Percentiles{ - Present: true, - Mean: 50 * 1024, - Max: 99 * 1024, - Ninety: 90 * 1024, + Present: true, + Mean: 50 * 1024, + Max: 99 * 1024, + Fifty: 50 * 1024, + Ninety: 90 * 1024, + NinetyFive: 95 * 1024, } if usage.Memory != memExpected { t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) @@ -146,35 +158,43 @@ func TestDerivedStats(t *testing.T) { s := &info.Usage{ PercentComplete: 100, Cpu: info.Percentiles{ - Present: true, - Mean: i * Nanosecond, - Max: i * Nanosecond, - Ninety: i * Nanosecond, + Present: true, + Mean: i * Nanosecond, + Max: i * Nanosecond, + Fifty: i * Nanosecond, + Ninety: i * Nanosecond, + NinetyFive: i * Nanosecond, }, Memory: info.Percentiles{ - Present: true, - Mean: i * 1024, - Max: i * 1024, - Ninety: i * 1024, + Present: true, + Mean: i * 1024, + Max: i * 1024, + Fifty: i * 1024, + Ninety: i * 1024, + NinetyFive: i * 1024, }, } stats = append(stats, s) } usage := GetDerivedPercentiles(stats) cpuExpected := info.Percentiles{ - Present: true, - Mean: 50 * Nanosecond, - Max: 99 * Nanosecond, - Ninety: 90 * Nanosecond, + Present: true, + Mean: 50 * Nanosecond, + Max: 99 * Nanosecond, + Fifty: 50 * Nanosecond, + Ninety: 90 * Nanosecond, + NinetyFive: 95 * Nanosecond, } if usage.Cpu != cpuExpected { t.Errorf("cpu stats are %+v. Expected %+v", usage.Cpu, cpuExpected) } memExpected := info.Percentiles{ - Present: true, - Mean: 50 * 1024, - Max: 99 * 1024, - Ninety: 90 * 1024, + Present: true, + Mean: 50 * 1024, + Max: 99 * 1024, + Fifty: 50 * 1024, + Ninety: 90 * 1024, + NinetyFive: 95 * 1024, } if usage.Memory != memExpected { t.Errorf("memory stats are mean %+v. Expected %+v", usage.Memory, memExpected) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go new file mode 100644 index 00000000000..e073fae622b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cloudinfo/cloudinfo.go @@ -0,0 +1,87 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Get information about the cloud provider (if any) cAdvisor is running on. + +package cloudinfo + +import ( + info "github.com/google/cadvisor/info/v1" +) + +type CloudInfo interface { + GetCloudProvider() info.CloudProvider + GetInstanceType() info.InstanceType +} + +type realCloudInfo struct { + cloudProvider info.CloudProvider + instanceType info.InstanceType +} + +func NewRealCloudInfo() CloudInfo { + cloudProvider := detectCloudProvider() + instanceType := detectInstanceType(cloudProvider) + return &realCloudInfo{ + cloudProvider: cloudProvider, + instanceType: instanceType, + } +} + +func (self *realCloudInfo) GetCloudProvider() info.CloudProvider { + return self.cloudProvider +} + +func (self *realCloudInfo) GetInstanceType() info.InstanceType { + return self.instanceType +} + +func detectCloudProvider() info.CloudProvider { + switch { + case onGCE(): + return info.GCE + case onAWS(): + return info.AWS + case onBaremetal(): + return info.Baremetal + } + return info.UnkownProvider +} + +func detectInstanceType(cloudProvider info.CloudProvider) info.InstanceType { + switch cloudProvider { + case info.GCE: + return getGceInstanceType() + case info.AWS: + return getAwsInstanceType() + case info.Baremetal: + return info.NoInstance + } + return info.UnknownInstance +} + +//TODO: Implement method. +func onAWS() bool { + return false +} + +//TODO: Implement method. +func getAwsInstanceType() info.InstanceType { + return info.UnknownInstance +} + +//TODO: Implement method. +func onBaremetal() bool { + return false +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/cloudinfo/gce.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cloudinfo/gce.go new file mode 100644 index 00000000000..2ea27da2f0b --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/cloudinfo/gce.go @@ -0,0 +1,36 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudinfo + +import ( + "strings" + + "github.com/GoogleCloudPlatform/gcloud-golang/compute/metadata" + info "github.com/google/cadvisor/info/v1" +) + +func onGCE() bool { + return metadata.OnGCE() +} + +func getGceInstanceType() info.InstanceType { + machineType, err := metadata.Get("instance/machine-type") + if err != nil { + return info.UnknownInstance + } + + responseParts := strings.Split(machineType, "/") // Extract the instance name from the machine type. + return info.InstanceType(responseParts[len(responseParts)-1]) +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/mockfs.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/mockfs.go index 93f08a686e4..cc3d615ee44 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/mockfs.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/fs/mockfs/mockfs.go @@ -18,7 +18,7 @@ package mockfs import ( - gomock "code.google.com/p/gomock/gomock" + gomock "github.com/golang/mock/gomock" fs "github.com/google/cadvisor/utils/fs" ) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/machine.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/machine.go new file mode 100644 index 00000000000..4d4ce23e345 --- /dev/null +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/machine.go @@ -0,0 +1,243 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package machine + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" + + "github.com/golang/glog" + info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/utils" + "github.com/google/cadvisor/utils/sysfs" + "github.com/google/cadvisor/utils/sysinfo" +) + +// The utils/machine package contains functions that extract machine-level specs. + +var cpuRegExp = regexp.MustCompile("processor\\t*: +([0-9]+)") +var coreRegExp = regexp.MustCompile("core id\\t*: +([0-9]+)") +var nodeRegExp = regexp.MustCompile("physical id\\t*: +([0-9]+)") +var CpuClockSpeedMHz = regexp.MustCompile("cpu MHz\\t*: +([0-9]+.[0-9]+)") +var memoryCapacityRegexp = regexp.MustCompile("MemTotal: *([0-9]+) kB") +var swapCapacityRegexp = regexp.MustCompile("SwapTotal: *([0-9]+) kB") + +// GetClockSpeed returns the CPU clock speed, given a []byte formatted as the /proc/cpuinfo file. +func GetClockSpeed(procInfo []byte) (uint64, error) { + // First look through sys to find a max supported cpu frequency. + const maxFreqFile = "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq" + if utils.FileExists(maxFreqFile) { + val, err := ioutil.ReadFile(maxFreqFile) + if err != nil { + return 0, err + } + var maxFreq uint64 + n, err := fmt.Sscanf(string(val), "%d", &maxFreq) + if err != nil || n != 1 { + return 0, fmt.Errorf("could not parse frequency %q", val) + } + return maxFreq, nil + } + // Fall back to /proc/cpuinfo + matches := CpuClockSpeedMHz.FindSubmatch(procInfo) + if len(matches) != 2 { + //Check if we are running on Power systems which have a different format + CpuClockSpeedMHz, _ = regexp.Compile("clock\\t*: +([0-9]+.[0-9]+)MHz") + matches = CpuClockSpeedMHz.FindSubmatch(procInfo) + if len(matches) != 2 { + return 0, fmt.Errorf("could not detect clock speed from output: %q", string(procInfo)) + } + } + speed, err := strconv.ParseFloat(string(matches[1]), 64) + if err != nil { + return 0, err + } + // Convert to kHz + return uint64(speed * 1000), nil +} + +// GetMachineMemoryCapacity returns the machine's total memory from /proc/meminfo. +// Returns the total memory capacity as an int64 (number of bytes). +func GetMachineMemoryCapacity() (int64, error) { + out, err := ioutil.ReadFile("/proc/meminfo") + if err != nil { + return 0, err + } + + memoryCapacity, err := parseCapacity(out, memoryCapacityRegexp) + if err != nil { + return 0, err + } + return memoryCapacity, err +} + +// GetMachineSwapCapacity returns the machine's total swap from /proc/meminfo. +// Returns the total swap capacity as an int64 (number of bytes). +func GetMachineSwapCapacity() (int64, error) { + out, err := ioutil.ReadFile("/proc/meminfo") + if err != nil { + return 0, err + } + + swapCapacity, err := parseCapacity(out, swapCapacityRegexp) + if err != nil { + return 0, err + } + return swapCapacity, err +} + +// parseCapacity matches a Regexp in a []byte, returning the resulting value in bytes. +// Assumes that the value matched by the Regexp is in KB. +func parseCapacity(b []byte, r *regexp.Regexp) (int64, error) { + matches := r.FindSubmatch(b) + if len(matches) != 2 { + return -1, fmt.Errorf("failed to match regexp in output: %q", string(b)) + } + m, err := strconv.ParseInt(string(matches[1]), 10, 64) + if err != nil { + return -1, err + } + + // Convert to bytes. + return m * 1024, err +} + +func GetTopology(sysFs sysfs.SysFs, cpuinfo string) ([]info.Node, int, error) { + nodes := []info.Node{} + numCores := 0 + lastThread := -1 + lastCore := -1 + lastNode := -1 + for _, line := range strings.Split(cpuinfo, "\n") { + ok, val, err := extractValue(line, cpuRegExp) + if err != nil { + return nil, -1, fmt.Errorf("could not parse cpu info from %q: %v", line, err) + } + if ok { + thread := val + numCores++ + if lastThread != -1 { + // New cpu section. Save last one. + nodeIdx, err := addNode(&nodes, lastNode) + if err != nil { + return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err) + } + nodes[nodeIdx].AddThread(lastThread, lastCore) + lastCore = -1 + lastNode = -1 + } + lastThread = thread + } + ok, val, err = extractValue(line, coreRegExp) + if err != nil { + return nil, -1, fmt.Errorf("could not parse core info from %q: %v", line, err) + } + if ok { + lastCore = val + } + ok, val, err = extractValue(line, nodeRegExp) + if err != nil { + return nil, -1, fmt.Errorf("could not parse node info from %q: %v", line, err) + } + if ok { + lastNode = val + } + } + nodeIdx, err := addNode(&nodes, lastNode) + if err != nil { + return nil, -1, fmt.Errorf("failed to add node %d: %v", lastNode, err) + } + nodes[nodeIdx].AddThread(lastThread, lastCore) + if numCores < 1 { + return nil, numCores, fmt.Errorf("could not detect any cores") + } + for idx, node := range nodes { + caches, err := sysinfo.GetCacheInfo(sysFs, node.Cores[0].Threads[0]) + if err != nil { + glog.Errorf("failed to get cache information for node %d: %v", node.Id, err) + continue + } + numThreadsPerCore := len(node.Cores[0].Threads) + numThreadsPerNode := len(node.Cores) * numThreadsPerCore + for _, cache := range caches { + c := info.Cache{ + Size: cache.Size, + Level: cache.Level, + Type: cache.Type, + } + if cache.Cpus == numThreadsPerNode && cache.Level > 2 { + // Add a node-level cache. + nodes[idx].AddNodeCache(c) + } else if cache.Cpus == numThreadsPerCore { + // Add to each core. + nodes[idx].AddPerCoreCache(c) + } + // Ignore unknown caches. + } + } + return nodes, numCores, nil +} + +func extractValue(s string, r *regexp.Regexp) (bool, int, error) { + matches := r.FindSubmatch([]byte(s)) + if len(matches) == 2 { + val, err := strconv.ParseInt(string(matches[1]), 10, 32) + if err != nil { + return true, -1, err + } + return true, int(val), nil + } + return false, -1, nil +} + +func findNode(nodes []info.Node, id int) (bool, int) { + for i, n := range nodes { + if n.Id == id { + return true, i + } + } + return false, -1 +} + +func addNode(nodes *[]info.Node, id int) (int, error) { + var idx int + if id == -1 { + // Some VMs don't fill topology data. Export single package. + id = 0 + } + + ok, idx := findNode(*nodes, id) + if !ok { + // New node + node := info.Node{Id: id} + // Add per-node memory information. + meminfo := fmt.Sprintf("/sys/devices/system/node/node%d/meminfo", id) + out, err := ioutil.ReadFile(meminfo) + // Ignore if per-node info is not available. + if err == nil { + m, err := parseCapacity(out, memoryCapacityRegexp) + if err != nil { + return -1, err + } + node.Memory = uint64(m) + } + *nodes = append(*nodes, node) + idx = len(*nodes) - 1 + } + return idx, nil +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/testdata/cpuinfo b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo similarity index 100% rename from Godeps/_workspace/src/github.com/google/cadvisor/manager/testdata/cpuinfo rename to Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/topology_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/topology_test.go similarity index 94% rename from Godeps/_workspace/src/github.com/google/cadvisor/manager/topology_test.go rename to Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/topology_test.go index dbae6df3c83..0c3f158872d 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/topology_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/topology_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package manager +package machine import ( "io/ioutil" @@ -38,7 +38,7 @@ func TestTopology(t *testing.T) { Cpus: 2, } sysFs.SetCacheInfo(c) - topology, numCores, err := getTopology(sysFs, string(testcpuinfo)) + topology, numCores, err := GetTopology(sysFs, string(testcpuinfo)) if err != nil { t.Errorf("failed to get topology for sample cpuinfo %s", string(testcpuinfo)) } @@ -84,7 +84,7 @@ func TestTopologyWithSimpleCpuinfo(t *testing.T) { Cpus: 1, } sysFs.SetCacheInfo(c) - topology, numCores, err := getTopology(sysFs, "processor\t: 0\n") + topology, numCores, err := GetTopology(sysFs, "processor\t: 0\n") if err != nil { t.Errorf("Expected cpuinfo with no topology data to succeed.") } @@ -110,7 +110,7 @@ func TestTopologyWithSimpleCpuinfo(t *testing.T) { } func TestTopologyEmptyCpuinfo(t *testing.T) { - _, _, err := getTopology(&fakesysfs.FakeSysFs{}, "") + _, _, err := GetTopology(&fakesysfs.FakeSysFs{}, "") if err == nil { t.Errorf("Expected empty cpuinfo to fail.") } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/version/version.go b/Godeps/_workspace/src/github.com/google/cadvisor/version/version.go index 5ea76cfcd30..97ea3e9157f 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/version/version.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/version/version.go @@ -15,4 +15,4 @@ package version // Version of cAdvisor. -const VERSION = "0.15.1" +const VERSION = "0.16.0" diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/.gitignore b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/.gitignore new file mode 100644 index 00000000000..836562412fe --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/LICENSE b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/LICENSE new file mode 100644 index 00000000000..c3d4cc307d2 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Nate Finch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/README.md b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/README.md new file mode 100644 index 00000000000..9f8e698c0bc --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/README.md @@ -0,0 +1,166 @@ +# lumberjack [![GoDoc](https://godoc.org/gopkg.in/natefinch/lumberjack.v2?status.png)](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [![Build Status](https://drone.io/github.com/natefinch/lumberjack/status.png)](https://drone.io/github.com/natefinch/lumberjack/latest) [![Build status](https://ci.appveyor.com/api/projects/status/00gchpxtg4gkrt5d)](https://ci.appveyor.com/project/natefinch/lumberjack) [![Coverage Status](https://coveralls.io/repos/natefinch/lumberjack/badge.svg?branch=v2.0)](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0) + +### Lumberjack is a Go package for writing logs to rolling files. + +Package lumberjack provides a rolling logger. + +Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +thusly: + + import "gopkg.in/natefinch/lumberjack.v2" + +The package name remains simply lumberjack, and the code resides at +https://github.com/natefinch/lumberjack under the v2.0 branch. + +Lumberjack is intended to be one part of a logging infrastructure. +It is not an all-in-one solution, but instead is a pluggable +component at the bottom of the logging stack that simply controls the files +to which logs are written. + +Lumberjack plays well with any logging package that can write to an +io.Writer, including the standard library's log package. + +Lumberjack assumes that only one process is writing to the output files. +Using the same lumberjack configuration from multiple processes on the same +machine will result in improper behavior. + + +**Example** + +To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts. + +Code: + +```go +log.SetOutput(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, //days +}) +``` + + + +## type Logger +``` go +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + // contains filtered or unexported fields +} +``` +Logger is an io.WriteCloser that writes to the specified filename. + +Logger opens or creates the logfile on first Write. If the file exists and +is less than MaxSize megabytes, lumberjack will open and append to that file. +If the file exists and its size is >= MaxSize megabytes, the file is renamed +by putting the current time in a timestamp in the name immediately before the +file's extension (or the end of the filename if there's no extension). A new +log file is then created using original filename. + +Whenever a write would cause the current log file exceed MaxSize megabytes, +the current file is closed, renamed, and a new log file created with the +original name. Thus, the filename you give Logger is always the "current" log +file. + +### Cleaning Up Old Log Files +Whenever a new logfile gets created, old log files may be deleted. The most +recent files according to the encoded timestamp will be retained, up to a +number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +with an encoded timestamp older than MaxAge days are deleted, regardless of +MaxBackups. Note that the time encoded in the timestamp is the rotation +time, which may differ from the last time that file was written to. + +If MaxBackups and MaxAge are both 0, no old log files will be deleted. + + + + + + + + + + + +### func (\*Logger) Close +``` go +func (l *Logger) Close() error +``` +Close implements io.Closer, and closes the current logfile. + + + +### func (\*Logger) Rotate +``` go +func (l *Logger) Rotate() error +``` +Rotate causes Logger to close the existing log file and immediately create a +new one. This is a helper function for applications that want to initiate +rotations outside of the normal rotation rules, such as in response to +SIGHUP. After rotating, this initiates a cleanup of old log files according +to the normal rules. + +**Example** + +Example of how to rotate in response to SIGHUP. + +Code: + +```go +l := &lumberjack.Logger{} +log.SetOutput(l) +c := make(chan os.Signal, 1) +signal.Notify(c, syscall.SIGHUP) + +go func() { + for { + <-c + l.Rotate() + } +}() +``` + +### func (\*Logger) Write +``` go +func (l *Logger) Write(p []byte) (n int, err error) +``` +Write implements io.Writer. If a write would cause the log file to be larger +than MaxSize, the file is closed, renamed to include a timestamp of the +current time, and a new log file is created using the original log file name. +If the length of the write is greater than MaxSize, an error is returned. + + + + + + + + + +- - - +Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown.go new file mode 100644 index 00000000000..11d06697232 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown.go @@ -0,0 +1,11 @@ +// +build !linux + +package lumberjack + +import ( + "os" +) + +func chown(_ string, _ os.FileInfo) error { + return nil +} diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown_linux.go new file mode 100644 index 00000000000..2758ec9cedd --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/chown_linux.go @@ -0,0 +1,19 @@ +package lumberjack + +import ( + "os" + "syscall" +) + +// os_Chown is a var so we can mock it out during tests. +var os_Chown = os.Chown + +func chown(name string, info os.FileInfo) error { + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode()) + if err != nil { + return err + } + f.Close() + stat := info.Sys().(*syscall.Stat_t) + return os_Chown(name, int(stat.Uid), int(stat.Gid)) +} diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/example_test.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/example_test.go new file mode 100644 index 00000000000..bf689fd9dd0 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/example_test.go @@ -0,0 +1,18 @@ +package lumberjack_test + +import ( + "log" + + "gopkg.in/natefinch/lumberjack.v2" +) + +// To use lumberjack with the standard library's log package, just pass it into +// the SetOutput function when your application starts. +func Example() { + log.SetOutput(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days + }) +} diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/linux_test.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/linux_test.go new file mode 100644 index 00000000000..40f3446685c --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/linux_test.go @@ -0,0 +1,104 @@ +// +build linux + +package lumberjack + +import ( + "os" + "syscall" + "testing" +) + +func TestMaintainMode(t *testing.T) { + currentTime = fakeTime + dir := makeTempDir("TestMaintainMode", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + + mode := os.FileMode(0770) + f, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, mode) + isNil(err, t) + f.Close() + + l := &Logger{ + Filename: filename, + MaxBackups: 1, + MaxSize: 100, // megabytes + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + filename2 := backupFile(dir) + info, err := os.Stat(filename) + isNil(err, t) + info2, err := os.Stat(filename2) + isNil(err, t) + equals(mode, info.Mode(), t) + equals(mode, info2.Mode(), t) +} + +func TestMaintainOwner(t *testing.T) { + fakeC := fakeChown{} + os_Chown = fakeC.Set + os_Stat = fakeStat + defer func() { + os_Chown = os.Chown + os_Stat = os.Stat + }() + currentTime = fakeTime + dir := makeTempDir("TestMaintainOwner", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + + l := &Logger{ + Filename: filename, + MaxBackups: 1, + MaxSize: 100, // megabytes + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + equals(555, fakeC.uid, t) + equals(666, fakeC.gid, t) +} + +type fakeChown struct { + name string + uid int + gid int +} + +func (f *fakeChown) Set(name string, uid, gid int) error { + f.name = name + f.uid = uid + f.gid = gid + return nil +} + +func fakeStat(name string) (os.FileInfo, error) { + info, err := os.Stat(name) + if err != nil { + return info, err + } + stat := info.Sys().(*syscall.Stat_t) + stat.Uid = 555 + stat.Gid = 666 + return info, nil +} diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack.go new file mode 100644 index 00000000000..701444411e3 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack.go @@ -0,0 +1,417 @@ +// Package lumberjack provides a rolling logger. +// +// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in +// thusly: +// +// import "gopkg.in/natefinch/lumberjack.v2" +// +// The package name remains simply lumberjack, and the code resides at +// https://github.com/natefinch/lumberjack under the v2.0 branch. +// +// Lumberjack is intended to be one part of a logging infrastructure. +// It is not an all-in-one solution, but instead is a pluggable +// component at the bottom of the logging stack that simply controls the files +// to which logs are written. +// +// Lumberjack plays well with any logging package that can write to an +// io.Writer, including the standard library's log package. +// +// Lumberjack assumes that only one process is writing to the output files. +// Using the same lumberjack configuration from multiple processes on the same +// machine will result in improper behavior. +package lumberjack + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" +) + +const ( + backupTimeFormat = "2006-01-02T15-04-05.000" + defaultMaxSize = 100 +) + +// ensure we always implement io.WriteCloser +var _ io.WriteCloser = (*Logger)(nil) + +// Logger is an io.WriteCloser that writes to the specified filename. +// +// Logger opens or creates the logfile on first Write. If the file exists and +// is less than MaxSize megabytes, lumberjack will open and append to that file. +// If the file exists and its size is >= MaxSize megabytes, the file is renamed +// by putting the current time in a timestamp in the name immediately before the +// file's extension (or the end of the filename if there's no extension). A new +// log file is then created using original filename. +// +// Whenever a write would cause the current log file exceed MaxSize megabytes, +// the current file is closed, renamed, and a new log file created with the +// original name. Thus, the filename you give Logger is always the "current" log +// file. +// +// Cleaning Up Old Log Files +// +// Whenever a new logfile gets created, old log files may be deleted. The most +// recent files according to the encoded timestamp will be retained, up to a +// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files +// with an encoded timestamp older than MaxAge days are deleted, regardless of +// MaxBackups. Note that the time encoded in the timestamp is the rotation +// time, which may differ from the last time that file was written to. +// +// If MaxBackups and MaxAge are both 0, no old log files will be deleted. +type Logger struct { + // Filename is the file to write logs to. Backup log files will be retained + // in the same directory. It uses -lumberjack.log in + // os.TempDir() if empty. + Filename string `json:"filename" yaml:"filename"` + + // MaxSize is the maximum size in megabytes of the log file before it gets + // rotated. It defaults to 100 megabytes. + MaxSize int `json:"maxsize" yaml:"maxsize"` + + // MaxAge is the maximum number of days to retain old log files based on the + // timestamp encoded in their filename. Note that a day is defined as 24 + // hours and may not exactly correspond to calendar days due to daylight + // savings, leap seconds, etc. The default is not to remove old log files + // based on age. + MaxAge int `json:"maxage" yaml:"maxage"` + + // MaxBackups is the maximum number of old log files to retain. The default + // is to retain all old log files (though MaxAge may still cause them to get + // deleted.) + MaxBackups int `json:"maxbackups" yaml:"maxbackups"` + + // LocalTime determines if the time used for formatting the timestamps in + // backup files is the computer's local time. The default is to use UTC + // time. + LocalTime bool `json:"localtime" yaml:"localtime"` + + size int64 + file *os.File + mu sync.Mutex +} + +var ( + // currentTime exists so it can be mocked out by tests. + currentTime = time.Now + + // os_Stat exists so it can be mocked out by tests. + os_Stat = os.Stat + + // megabyte is the conversion factor between MaxSize and bytes. It is a + // variable so tests can mock it out and not need to write megabytes of data + // to disk. + megabyte = 1024 * 1024 +) + +// Write implements io.Writer. If a write would cause the log file to be larger +// than MaxSize, the file is closed, renamed to include a timestamp of the +// current time, and a new log file is created using the original log file name. +// If the length of the write is greater than MaxSize, an error is returned. +func (l *Logger) Write(p []byte) (n int, err error) { + l.mu.Lock() + defer l.mu.Unlock() + + writeLen := int64(len(p)) + if writeLen > l.max() { + return 0, fmt.Errorf( + "write length %d exceeds maximum file size %d", writeLen, l.max(), + ) + } + + if l.file == nil { + if err = l.openExistingOrNew(len(p)); err != nil { + return 0, err + } + } + + if l.size+writeLen > l.max() { + if err := l.rotate(); err != nil { + return 0, err + } + } + + n, err = l.file.Write(p) + l.size += int64(n) + + return n, err +} + +// Close implements io.Closer, and closes the current logfile. +func (l *Logger) Close() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.close() +} + +// close closes the file if it is open. +func (l *Logger) close() error { + if l.file == nil { + return nil + } + err := l.file.Close() + l.file = nil + return err +} + +// Rotate causes Logger to close the existing log file and immediately create a +// new one. This is a helper function for applications that want to initiate +// rotations outside of the normal rotation rules, such as in response to +// SIGHUP. After rotating, this initiates a cleanup of old log files according +// to the normal rules. +func (l *Logger) Rotate() error { + l.mu.Lock() + defer l.mu.Unlock() + return l.rotate() +} + +// rotate closes the current file, moves it aside with a timestamp in the name, +// (if it exists), opens a new file with the original filename, and then runs +// cleanup. +func (l *Logger) rotate() error { + if err := l.close(); err != nil { + return err + } + + if err := l.openNew(); err != nil { + return err + } + return l.cleanup() +} + +// openNew opens a new log file for writing, moving any old log file out of the +// way. This methods assumes the file has already been closed. +func (l *Logger) openNew() error { + err := os.MkdirAll(l.dir(), 0744) + if err != nil { + return fmt.Errorf("can't make directories for new logfile: %s", err) + } + + name := l.filename() + mode := os.FileMode(0644) + info, err := os_Stat(name) + if err == nil { + // Copy the mode off the old logfile. + mode = info.Mode() + // move the existing file + newname := backupName(name, l.LocalTime) + if err := os.Rename(name, newname); err != nil { + return fmt.Errorf("can't rename log file: %s", err) + } + + // this is a no-op anywhere but linux + if err := chown(name, info); err != nil { + return err + } + } + + // we use truncate here because this should only get called when we've moved + // the file ourselves. if someone else creates the file in the meantime, + // just wipe out the contents. + f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) + if err != nil { + return fmt.Errorf("can't open new logfile: %s", err) + } + l.file = f + l.size = 0 + return nil +} + +// backupName creates a new filename from the given name, inserting a timestamp +// between the filename and the extension, using the local time if requested +// (otherwise UTC). +func backupName(name string, local bool) string { + dir := filepath.Dir(name) + filename := filepath.Base(name) + ext := filepath.Ext(filename) + prefix := filename[:len(filename)-len(ext)] + t := currentTime() + if !local { + t = t.UTC() + } + + timestamp := t.Format(backupTimeFormat) + return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) +} + +// openExistingOrNew opens the logfile if it exists and if the current write +// would not put it over MaxSize. If there is no such file or the write would +// put it over the MaxSize, a new file is created. +func (l *Logger) openExistingOrNew(writeLen int) error { + filename := l.filename() + info, err := os_Stat(filename) + if os.IsNotExist(err) { + return l.openNew() + } + if err != nil { + return fmt.Errorf("error getting log file info: %s", err) + } + + if info.Size()+int64(writeLen) >= l.max() { + return l.rotate() + } + + file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + // if we fail to open the old log file for some reason, just ignore + // it and open a new log file. + return l.openNew() + } + l.file = file + l.size = info.Size() + return nil +} + +// genFilename generates the name of the logfile from the current time. +func (l *Logger) filename() string { + if l.Filename != "" { + return l.Filename + } + name := filepath.Base(os.Args[0]) + "-lumberjack.log" + return filepath.Join(os.TempDir(), name) +} + +// cleanup deletes old log files, keeping at most l.MaxBackups files, as long as +// none of them are older than MaxAge. +func (l *Logger) cleanup() error { + if l.MaxBackups == 0 && l.MaxAge == 0 { + return nil + } + + files, err := l.oldLogFiles() + if err != nil { + return err + } + + var deletes []logInfo + + if l.MaxBackups > 0 && l.MaxBackups < len(files) { + deletes = files[l.MaxBackups:] + files = files[:l.MaxBackups] + } + if l.MaxAge > 0 { + diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge)) + + cutoff := currentTime().Add(-1 * diff) + + for _, f := range files { + if f.timestamp.Before(cutoff) { + deletes = append(deletes, f) + } + } + } + + if len(deletes) == 0 { + return nil + } + + go deleteAll(l.dir(), deletes) + + return nil +} + +func deleteAll(dir string, files []logInfo) { + // remove files on a separate goroutine + for _, f := range files { + // what am I going to do, log this? + _ = os.Remove(filepath.Join(dir, f.Name())) + } +} + +// oldLogFiles returns the list of backup log files stored in the same +// directory as the current log file, sorted by ModTime +func (l *Logger) oldLogFiles() ([]logInfo, error) { + files, err := ioutil.ReadDir(l.dir()) + if err != nil { + return nil, fmt.Errorf("can't read log file directory: %s", err) + } + logFiles := []logInfo{} + + prefix, ext := l.prefixAndExt() + + for _, f := range files { + if f.IsDir() { + continue + } + name := l.timeFromName(f.Name(), prefix, ext) + if name == "" { + continue + } + t, err := time.Parse(backupTimeFormat, name) + if err == nil { + logFiles = append(logFiles, logInfo{t, f}) + } + // error parsing means that the suffix at the end was not generated + // by lumberjack, and therefore it's not a backup file. + } + + sort.Sort(byFormatTime(logFiles)) + + return logFiles, nil +} + +// timeFromName extracts the formatted time from the filename by stripping off +// the filename's prefix and extension. This prevents someone's filename from +// confusing time.parse. +func (l *Logger) timeFromName(filename, prefix, ext string) string { + if !strings.HasPrefix(filename, prefix) { + return "" + } + filename = filename[len(prefix):] + + if !strings.HasSuffix(filename, ext) { + return "" + } + filename = filename[:len(filename)-len(ext)] + return filename +} + +// max returns the maximum size in bytes of log files before rolling. +func (l *Logger) max() int64 { + if l.MaxSize == 0 { + return int64(defaultMaxSize * megabyte) + } + return int64(l.MaxSize) * int64(megabyte) +} + +// dir returns the directory for the current filename. +func (l *Logger) dir() string { + return filepath.Dir(l.filename()) +} + +// prefixAndExt returns the filename part and extension part from the Logger's +// filename. +func (l *Logger) prefixAndExt() (prefix, ext string) { + filename := filepath.Base(l.filename()) + ext = filepath.Ext(filename) + prefix = filename[:len(filename)-len(ext)] + "-" + return prefix, ext +} + +// logInfo is a convenience struct to return the filename and its embedded +// timestamp. +type logInfo struct { + timestamp time.Time + os.FileInfo +} + +// byFormatTime sorts by newest time formatted in the name. +type byFormatTime []logInfo + +func (b byFormatTime) Less(i, j int) bool { + return b[i].timestamp.After(b[j].timestamp) +} + +func (b byFormatTime) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b byFormatTime) Len() int { + return len(b) +} diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go new file mode 100644 index 00000000000..c11dc1872ff --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/lumberjack_test.go @@ -0,0 +1,690 @@ +package lumberjack + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/BurntSushi/toml" + "gopkg.in/yaml.v1" +) + +// !!!NOTE!!! +// +// Running these tests in parallel will almost certainly cause sporadic (or even +// regular) failures, because they're all messing with the same global variable +// that controls the logic's mocked time.Now. So... don't do that. + +// Since all the tests uses the time to determine filenames etc, we need to +// control the wall clock as much as possible, which means having a wall clock +// that doesn't change unless we want it to. +var fakeCurrentTime = time.Now() + +func fakeTime() time.Time { + return fakeCurrentTime +} + +func TestNewFile(t *testing.T) { + currentTime = fakeTime + + dir := makeTempDir("TestNewFile", t) + defer os.RemoveAll(dir) + l := &Logger{ + Filename: logFile(dir), + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + existsWithLen(logFile(dir), n, t) + fileCount(dir, 1, t) +} + +func TestOpenExisting(t *testing.T) { + currentTime = fakeTime + dir := makeTempDir("TestOpenExisting", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + data := []byte("foo!") + err := ioutil.WriteFile(filename, data, 0644) + isNil(err, t) + existsWithLen(filename, len(data), t) + + l := &Logger{ + Filename: filename, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + // make sure the file got appended + existsWithLen(filename, len(data)+n, t) + + // make sure no other files were created + fileCount(dir, 1, t) +} + +func TestWriteTooLong(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + dir := makeTempDir("TestWriteTooLong", t) + defer os.RemoveAll(dir) + l := &Logger{ + Filename: logFile(dir), + MaxSize: 5, + } + defer l.Close() + b := []byte("booooooooooooooo!") + n, err := l.Write(b) + notNil(err, t) + equals(0, n, t) + equals(err.Error(), + fmt.Sprintf("write length %d exceeds maximum file size %d", len(b), l.MaxSize), t) + _, err = os.Stat(logFile(dir)) + assert(os.IsNotExist(err), t, "File exists, but should not have been created") +} + +func TestMakeLogDir(t *testing.T) { + currentTime = fakeTime + dir := time.Now().Format("TestMakeLogDir" + backupTimeFormat) + dir = filepath.Join(os.TempDir(), dir) + defer os.RemoveAll(dir) + filename := logFile(dir) + l := &Logger{ + Filename: filename, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + existsWithLen(logFile(dir), n, t) + fileCount(dir, 1, t) +} + +func TestDefaultFilename(t *testing.T) { + currentTime = fakeTime + dir := os.TempDir() + filename := filepath.Join(dir, filepath.Base(os.Args[0])+"-lumberjack.log") + defer os.Remove(filename) + l := &Logger{} + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + + isNil(err, t) + equals(len(b), n, t) + existsWithLen(filename, n, t) +} + +func TestAutoRotate(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestAutoRotate", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Filename: filename, + MaxSize: 10, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + existsWithLen(filename, n, t) + fileCount(dir, 1, t) + + newFakeTime() + + b2 := []byte("foooooo!") + n, err = l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + + // the old logfile should be moved aside and the main logfile should have + // only the last write in it. + existsWithLen(filename, n, t) + + // the backup file will use the current fake time and have the old contents. + existsWithLen(backupFile(dir), len(b), t) + + fileCount(dir, 2, t) +} + +func TestFirstWriteRotate(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + dir := makeTempDir("TestFirstWriteRotate", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Filename: filename, + MaxSize: 10, + } + defer l.Close() + + start := []byte("boooooo!") + err := ioutil.WriteFile(filename, start, 0600) + isNil(err, t) + + newFakeTime() + + // this would make us rotate + b := []byte("fooo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + existsWithLen(filename, n, t) + existsWithLen(backupFile(dir), len(start), t) + + fileCount(dir, 2, t) +} + +func TestMaxBackups(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + dir := makeTempDir("TestMaxBackups", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Filename: filename, + MaxSize: 10, + MaxBackups: 1, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + existsWithLen(filename, n, t) + fileCount(dir, 1, t) + + newFakeTime() + + // this will put us over the max + b2 := []byte("foooooo!") + n, err = l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + + // this will use the new fake time + secondFilename := backupFile(dir) + existsWithLen(secondFilename, len(b), t) + + // make sure the old file still exists with the same size. + existsWithLen(filename, n, t) + + fileCount(dir, 2, t) + + newFakeTime() + + // this will make us rotate again + n, err = l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + + // this will use the new fake time + thirdFilename := backupFile(dir) + existsWithLen(thirdFilename, len(b2), t) + + existsWithLen(filename, n, t) + + // we need to wait a little bit since the files get deleted on a different + // goroutine. + <-time.After(time.Millisecond * 10) + + // should only have two files in the dir still + fileCount(dir, 2, t) + + // second file name should still exist + existsWithLen(thirdFilename, len(b2), t) + + // should have deleted the first backup + notExist(secondFilename, t) + + // now test that we don't delete directories or non-logfile files + + newFakeTime() + + // create a file that is close to but different from the logfile name. + // It shouldn't get caught by our deletion filters. + notlogfile := logFile(dir) + ".foo" + err = ioutil.WriteFile(notlogfile, []byte("data"), 0644) + isNil(err, t) + + // Make a directory that exactly matches our log file filters... it still + // shouldn't get caught by the deletion filter since it's a directory. + notlogfiledir := backupFile(dir) + err = os.Mkdir(notlogfiledir, 0700) + isNil(err, t) + + newFakeTime() + + // this will make us rotate again + n, err = l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + + // this will use the new fake time + fourthFilename := backupFile(dir) + existsWithLen(fourthFilename, len(b2), t) + + // we need to wait a little bit since the files get deleted on a different + // goroutine. + <-time.After(time.Millisecond * 10) + + // We should have four things in the directory now - the 2 log files, the + // not log file, and the directory + fileCount(dir, 4, t) + + // third file name should still exist + existsWithLen(filename, n, t) + + existsWithLen(fourthFilename, len(b2), t) + + // should have deleted the first filename + notExist(thirdFilename, t) + + // the not-a-logfile should still exist + exists(notlogfile, t) + + // the directory + exists(notlogfiledir, t) +} + +func TestCleanupExistingBackups(t *testing.T) { + // test that if we start with more backup files than we're supposed to have + // in total, that extra ones get cleaned up when we rotate. + + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestCleanupExistingBackups", t) + defer os.RemoveAll(dir) + + // make 3 backup files + + data := []byte("data") + backup := backupFile(dir) + err := ioutil.WriteFile(backup, data, 0644) + isNil(err, t) + + newFakeTime() + + backup = backupFile(dir) + err = ioutil.WriteFile(backup, data, 0644) + isNil(err, t) + + newFakeTime() + + backup = backupFile(dir) + err = ioutil.WriteFile(backup, data, 0644) + isNil(err, t) + + // now create a primary log file with some data + filename := logFile(dir) + err = ioutil.WriteFile(filename, data, 0644) + isNil(err, t) + + l := &Logger{ + Filename: filename, + MaxSize: 10, + MaxBackups: 1, + } + defer l.Close() + + newFakeTime() + + b2 := []byte("foooooo!") + n, err := l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + + // we need to wait a little bit since the files get deleted on a different + // goroutine. + <-time.After(time.Millisecond * 10) + + // now we should only have 2 files left - the primary and one backup + fileCount(dir, 2, t) +} + +func TestMaxAge(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestMaxAge", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + l := &Logger{ + Filename: filename, + MaxSize: 10, + MaxAge: 1, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + existsWithLen(filename, n, t) + fileCount(dir, 1, t) + + // two days later + newFakeTime() + + b2 := []byte("foooooo!") + n, err = l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + existsWithLen(backupFile(dir), len(b), t) + + // we need to wait a little bit since the files get deleted on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // We should still have 2 log files, since the most recent backup was just + // created. + fileCount(dir, 2, t) + + existsWithLen(filename, len(b2), t) + + // we should have deleted the old file due to being too old + existsWithLen(backupFile(dir), len(b), t) + + // two days later + newFakeTime() + + b3 := []byte("foooooo!") + n, err = l.Write(b2) + isNil(err, t) + equals(len(b3), n, t) + existsWithLen(backupFile(dir), len(b2), t) + + // we need to wait a little bit since the files get deleted on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + // We should have 2 log files - the main log file, and the most recent + // backup. The earlier backup is past the cutoff and should be gone. + fileCount(dir, 2, t) + + existsWithLen(filename, len(b3), t) + + // we should have deleted the old file due to being too old + existsWithLen(backupFile(dir), len(b2), t) + +} + +func TestOldLogFiles(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestOldLogFiles", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + data := []byte("data") + err := ioutil.WriteFile(filename, data, 07) + isNil(err, t) + + // This gives us a time with the same precision as the time we get from the + // timestamp in the name. + t1, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat)) + isNil(err, t) + + backup := backupFile(dir) + err = ioutil.WriteFile(backup, data, 07) + isNil(err, t) + + newFakeTime() + + t2, err := time.Parse(backupTimeFormat, fakeTime().UTC().Format(backupTimeFormat)) + isNil(err, t) + + backup2 := backupFile(dir) + err = ioutil.WriteFile(backup2, data, 07) + isNil(err, t) + + l := &Logger{Filename: filename} + files, err := l.oldLogFiles() + isNil(err, t) + equals(2, len(files), t) + + // should be sorted by newest file first, which would be t2 + equals(t2, files[0].timestamp, t) + equals(t1, files[1].timestamp, t) +} + +func TestTimeFromName(t *testing.T) { + l := &Logger{Filename: "/var/log/myfoo/foo.log"} + prefix, ext := l.prefixAndExt() + val := l.timeFromName("foo-2014-05-04T14-44-33.555.log", prefix, ext) + equals("2014-05-04T14-44-33.555", val, t) + + val = l.timeFromName("foo-2014-05-04T14-44-33.555", prefix, ext) + equals("", val, t) + + val = l.timeFromName("2014-05-04T14-44-33.555.log", prefix, ext) + equals("", val, t) + + val = l.timeFromName("foo.log", prefix, ext) + equals("", val, t) +} + +func TestLocalTime(t *testing.T) { + currentTime = fakeTime + megabyte = 1 + + dir := makeTempDir("TestLocalTime", t) + defer os.RemoveAll(dir) + + l := &Logger{ + Filename: logFile(dir), + MaxSize: 10, + LocalTime: true, + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + b2 := []byte("fooooooo!") + n2, err := l.Write(b2) + isNil(err, t) + equals(len(b2), n2, t) + + existsWithLen(logFile(dir), n2, t) + existsWithLen(backupFileLocal(dir), n, t) +} + +func TestRotate(t *testing.T) { + currentTime = fakeTime + dir := makeTempDir("TestRotate", t) + defer os.RemoveAll(dir) + + filename := logFile(dir) + + l := &Logger{ + Filename: filename, + MaxBackups: 1, + MaxSize: 100, // megabytes + } + defer l.Close() + b := []byte("boo!") + n, err := l.Write(b) + isNil(err, t) + equals(len(b), n, t) + + existsWithLen(filename, n, t) + fileCount(dir, 1, t) + + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + // we need to wait a little bit since the files get deleted on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + filename2 := backupFile(dir) + existsWithLen(filename2, n, t) + existsWithLen(filename, 0, t) + fileCount(dir, 2, t) + newFakeTime() + + err = l.Rotate() + isNil(err, t) + + // we need to wait a little bit since the files get deleted on a different + // goroutine. + <-time.After(10 * time.Millisecond) + + filename3 := backupFile(dir) + existsWithLen(filename3, 0, t) + existsWithLen(filename, 0, t) + fileCount(dir, 2, t) + + b2 := []byte("foooooo!") + n, err = l.Write(b2) + isNil(err, t) + equals(len(b2), n, t) + + // this will use the new fake time + existsWithLen(filename, n, t) +} + +func TestJson(t *testing.T) { + data := []byte(` +{ + "filename": "foo", + "maxsize": 5, + "maxage": 10, + "maxbackups": 3, + "localtime": true +}`[1:]) + + l := Logger{} + err := json.Unmarshal(data, &l) + isNil(err, t) + equals("foo", l.Filename, t) + equals(5, l.MaxSize, t) + equals(10, l.MaxAge, t) + equals(3, l.MaxBackups, t) + equals(true, l.LocalTime, t) +} + +func TestYaml(t *testing.T) { + data := []byte(` +filename: foo +maxsize: 5 +maxage: 10 +maxbackups: 3 +localtime: true`[1:]) + + l := Logger{} + err := yaml.Unmarshal(data, &l) + isNil(err, t) + equals("foo", l.Filename, t) + equals(5, l.MaxSize, t) + equals(10, l.MaxAge, t) + equals(3, l.MaxBackups, t) + equals(true, l.LocalTime, t) +} + +func TestToml(t *testing.T) { + data := ` +filename = "foo" +maxsize = 5 +maxage = 10 +maxbackups = 3 +localtime = true`[1:] + + l := Logger{} + md, err := toml.Decode(data, &l) + isNil(err, t) + equals("foo", l.Filename, t) + equals(5, l.MaxSize, t) + equals(10, l.MaxAge, t) + equals(3, l.MaxBackups, t) + equals(true, l.LocalTime, t) + equals(0, len(md.Undecoded()), t) +} + +// makeTempDir creates a file with a semi-unique name in the OS temp directory. +// It should be based on the name of the test, to keep parallel tests from +// colliding, and must be cleaned up after the test is finished. +func makeTempDir(name string, t testing.TB) string { + dir := time.Now().Format(name + backupTimeFormat) + dir = filepath.Join(os.TempDir(), dir) + isNilUp(os.Mkdir(dir, 0777), t, 1) + return dir +} + +// existsWithLen checks that the given file exists and has the correct length. +func existsWithLen(path string, length int, t testing.TB) { + info, err := os.Stat(path) + isNilUp(err, t, 1) + equalsUp(int64(length), info.Size(), t, 1) +} + +// logFile returns the log file name in the given directory for the current fake +// time. +func logFile(dir string) string { + return filepath.Join(dir, "foobar.log") +} + +func backupFile(dir string) string { + return filepath.Join(dir, "foobar-"+fakeTime().UTC().Format(backupTimeFormat)+".log") +} + +func backupFileLocal(dir string) string { + return filepath.Join(dir, "foobar-"+fakeTime().Format(backupTimeFormat)+".log") +} + +// logFileLocal returns the log file name in the given directory for the current +// fake time using the local timezone. +func logFileLocal(dir string) string { + return filepath.Join(dir, fakeTime().Format(backupTimeFormat)) +} + +// fileCount checks that the number of files in the directory is exp. +func fileCount(dir string, exp int, t testing.TB) { + files, err := ioutil.ReadDir(dir) + isNilUp(err, t, 1) + // Make sure no other files were created. + equalsUp(exp, len(files), t, 1) +} + +// newFakeTime sets the fake "current time" to two days later. +func newFakeTime() { + fakeCurrentTime = fakeCurrentTime.Add(time.Hour * 24 * 2) +} + +func notExist(path string, t testing.TB) { + _, err := os.Stat(path) + assertUp(os.IsNotExist(err), t, 1, "expected to get os.IsNotExist, but instead got %v", err) +} + +func exists(path string, t testing.TB) { + _, err := os.Stat(path) + assertUp(err == nil, t, 1, "expected file to exist, but got error from os.Stat: %v", err) +} diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/rotate_test.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/rotate_test.go new file mode 100644 index 00000000000..0561464ac0d --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/rotate_test.go @@ -0,0 +1,27 @@ +// +build linux + +package lumberjack_test + +import ( + "log" + "os" + "os/signal" + "syscall" + + "github.com/natefinch/lumberjack" +) + +// Example of how to rotate in response to SIGHUP. +func ExampleLogger_Rotate() { + l := &lumberjack.Logger{} + log.SetOutput(l) + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGHUP) + + go func() { + for { + <-c + l.Rotate() + } + }() +} diff --git a/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/testing_test.go b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/testing_test.go new file mode 100644 index 00000000000..8e89c083198 --- /dev/null +++ b/Godeps/_workspace/src/gopkg.in/natefinch/lumberjack.v2/testing_test.go @@ -0,0 +1,91 @@ +package lumberjack + +import ( + "fmt" + "path/filepath" + "reflect" + "runtime" + "testing" +) + +// assert will log the given message if condition is false. +func assert(condition bool, t testing.TB, msg string, v ...interface{}) { + assertUp(condition, t, 1, msg, v...) +} + +// assertUp is like assert, but used inside helper functions, to ensure that +// the file and line number reported by failures corresponds to one or more +// levels up the stack. +func assertUp(condition bool, t testing.TB, caller int, msg string, v ...interface{}) { + if !condition { + _, file, line, _ := runtime.Caller(caller + 1) + v = append([]interface{}{filepath.Base(file), line}, v...) + fmt.Printf("%s:%d: "+msg+"\n", v...) + t.FailNow() + } +} + +// equals tests that the two values are equal according to reflect.DeepEqual. +func equals(exp, act interface{}, t testing.TB) { + equalsUp(exp, act, t, 1) +} + +// equalsUp is like equals, but used inside helper functions, to ensure that the +// file and line number reported by failures corresponds to one or more levels +// up the stack. +func equalsUp(exp, act interface{}, t testing.TB, caller int) { + if !reflect.DeepEqual(exp, act) { + _, file, line, _ := runtime.Caller(caller + 1) + fmt.Printf("%s:%d: exp: %v (%T), got: %v (%T)\n", + filepath.Base(file), line, exp, exp, act, act) + t.FailNow() + } +} + +// isNil reports a failure if the given value is not nil. Note that values +// which cannot be nil will always fail this check. +func isNil(obtained interface{}, t testing.TB) { + isNilUp(obtained, t, 1) +} + +// isNilUp is like isNil, but used inside helper functions, to ensure that the +// file and line number reported by failures corresponds to one or more levels +// up the stack. +func isNilUp(obtained interface{}, t testing.TB, caller int) { + if !_isNil(obtained) { + _, file, line, _ := runtime.Caller(caller + 1) + fmt.Printf("%s:%d: expected nil, got: %v\n", filepath.Base(file), line, obtained) + t.FailNow() + } +} + +// notNil reports a failure if the given value is nil. +func notNil(obtained interface{}, t testing.TB) { + notNilUp(obtained, t, 1) +} + +// notNilUp is like notNil, but used inside helper functions, to ensure that the +// file and line number reported by failures corresponds to one or more levels +// up the stack. +func notNilUp(obtained interface{}, t testing.TB, caller int) { + if _isNil(obtained) { + _, file, line, _ := runtime.Caller(caller + 1) + fmt.Printf("%s:%d: expected non-nil, got: %v\n", filepath.Base(file), line, obtained) + t.FailNow() + } +} + +// _isNil is a helper function for isNil and notNil, and should not be used +// directly. +func _isNil(obtained interface{}) bool { + if obtained == nil { + return true + } + + switch v := reflect.ValueOf(obtained); v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + + return false +} diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 48d730c51f1..8b361eafe6d 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -5637,6 +5637,152 @@ } ] }, + { + "path": "/api/v1/namespaces/{namespace}/pods/{name}/attach", + "description": "API at /api/v1 version v1", + "operations": [ + { + "type": "string", + "method": "GET", + "summary": "connect GET requests to attach of Pod", + "nickname": "connectGetNamespacedPodAttach", + "parameters": [ + { + "type": "boolean", + "paramType": "query", + "name": "stdin", + "description": "redirect the standard input stream of the pod for this call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stdout", + "description": "redirect the standard output stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stderr", + "description": "redirect the standard error stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "tty", + "description": "allocate a terminal for this attach call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "query", + "name": "container", + "description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "required": false, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "path", + "name": "namespace", + "description": "object name and auth scope, such as for teams and projects", + "required": true, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "path", + "name": "name", + "description": "name of the Pod", + "required": true, + "allowMultiple": false + } + ], + "produces": [ + "*/*" + ], + "consumes": [ + "*/*" + ] + }, + { + "type": "string", + "method": "POST", + "summary": "connect POST requests to attach of Pod", + "nickname": "connectPostNamespacedPodAttach", + "parameters": [ + { + "type": "boolean", + "paramType": "query", + "name": "stdin", + "description": "redirect the standard input stream of the pod for this call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stdout", + "description": "redirect the standard output stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stderr", + "description": "redirect the standard error stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "tty", + "description": "allocate a terminal for this attach call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "query", + "name": "container", + "description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "required": false, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "path", + "name": "namespace", + "description": "object name and auth scope, such as for teams and projects", + "required": true, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "path", + "name": "name", + "description": "name of the Pod", + "required": true, + "allowMultiple": false + } + ], + "produces": [ + "*/*" + ], + "consumes": [ + "*/*" + ] + } + ] + }, { "path": "/api/v1/namespaces/{namespace}/pods/{name}/binding", "description": "API at /api/v1 version v1", diff --git a/build/README.md b/build/README.md index 692ae77e64a..5a52552fe85 100644 --- a/build/README.md +++ b/build/README.md @@ -5,7 +5,8 @@ To build Kubernetes you need to have access to a Docker installation through eit ## Requirements 1. Be running Docker. 2 options supported/tested: - 1. **Mac OS X** The best way to go is to use `boot2docker`. See instructions [here](https://docs.docker.com/installation/mac/). + 1. **Mac OS X** The best way to go is to use `boot2docker`. See instructions [here](https://docs.docker.com/installation/mac/). + **Note**: You will want to set the boot2docker vm to have at least 3GB of initial memory or building will likely fail. (See: [#11852]( https://github.com/GoogleCloudPlatform/kubernetes/issues/11852)) 2. **Linux with local Docker** Install Docker according to the [instructions](https://docs.docker.com/installation/#installation) for your OS. The scripts here assume that they are using a local Docker server and that they can "reach around" docker and grab results directly from the file system. 2. Have python installed. Pretty much it is installed everywhere at this point so you can probably ignore this. 3. *Optional* For uploading your release to Google Cloud Storage, have the [Google Cloud SDK](https://developers.google.com/cloud/sdk/) installed and configured. diff --git a/build/mark-new-version.sh b/build/mark-new-version.sh index 2c45bc4aa43..881397106f3 100755 --- a/build/mark-new-version.sh +++ b/build/mark-new-version.sh @@ -52,6 +52,7 @@ fi release_branch="release-${VERSION_MAJOR}.${VERSION_MINOR}" current_branch=$(git rev-parse --abbrev-ref HEAD) +head_commit=$(git rev-parse --short HEAD) if [[ "${VERSION_PATCH}" != "0" ]]; then # sorry, no going back in time, pull latest from upstream @@ -93,15 +94,13 @@ echo "+++ Running ./versionize-docs" ${KUBE_ROOT}/build/versionize-docs.sh ${NEW_VERSION} git commit -am "Versioning docs and examples for ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}" -dochash=$(git log -n1 --format=%H) - VERSION_FILE="${KUBE_ROOT}/pkg/version/base.go" GIT_MINOR="${VERSION_MINOR}.${VERSION_PATCH}" echo "+++ Updating to ${NEW_VERSION}" $SED -ri -e "s/gitMajor\s+string = \"[^\"]*\"/gitMajor string = \"${VERSION_MAJOR}\"/" "${VERSION_FILE}" $SED -ri -e "s/gitMinor\s+string = \"[^\"]*\"/gitMinor string = \"${GIT_MINOR}\"/" "${VERSION_FILE}" -$SED -ri -e "s/gitVersion\s+string = \"[^\"]*\"/gitVersion string = \"$NEW_VERSION\"/" "${VERSION_FILE}" +$SED -ri -e "s/gitVersion\s+string = \"[^\"]*\"/gitVersion string = \"$NEW_VERSION-${release_branch}+\$Format:%h\$\"/" "${VERSION_FILE}" gofmt -s -w "${VERSION_FILE}" echo "+++ Committing version change" @@ -110,35 +109,30 @@ git commit -m "Kubernetes version $NEW_VERSION" echo "+++ Tagging version" git tag -a -m "Kubernetes version $NEW_VERSION" "${NEW_VERSION}" +newtag=$(git rev-parse --short HEAD) -echo "+++ Updating to ${NEW_VERSION}-dev" -$SED -ri -e "s/gitMajor\s+string = \"[^\"]*\"/gitMajor string = \"${VERSION_MAJOR}\"/" "${VERSION_FILE}" -$SED -ri -e "s/gitMinor\s+string = \"[^\"]*\"/gitMinor string = \"${GIT_MINOR}\+\"/" "${VERSION_FILE}" -$SED -ri -e "s/gitVersion\s+string = \"[^\"]*\"/gitVersion string = \"$NEW_VERSION-dev\"/" "${VERSION_FILE}" -gofmt -s -w "${VERSION_FILE}" - -echo "+++ Committing version change" -git add "${VERSION_FILE}" -git commit -m "Kubernetes version ${NEW_VERSION}-dev" +if [[ "${VERSION_PATCH}" == "0" ]]; then + declare -r alpha_ver="v${VERSION_MAJOR}.$((${VERSION_MINOR}+1)).0-alpha.0" + git tag -a -m "Kubernetes pre-release branch ${alpha-ver}" "${alpha_ver}" "${head_commit}" +fi echo "" echo "Success you must now:" echo "" echo "- Push the tag:" echo " git push ${push_url} v${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}" -echo " - Please note you are pushing the tag live BEFORE your PRs." -echo " You need this so the builds pick up the right tag info (and so your reviewers can see it)." -echo " If something goes wrong further down please fix the tag!" -echo " Either delete this tag and give up, fix the tag before your next PR," -echo " or find someone who can help solve the tag problem!" -echo "" if [[ "${VERSION_PATCH}" == "0" ]]; then - echo "- Send branch: ${current_branch} as a PR to ${push_url}/master" - echo " For major/minor releases, this gets the branch tag merged and changes the version numbers." + echo "- Push the alpha tag:" + echo " git push ${push_url} ${alpha_ver}" echo "- Push the new release branch:" echo " git push ${push_url} ${current_branch}:${release_branch}" + echo "- DO NOTHING TO MASTER. You were done with master when you pushed the alpha tag." else echo "- Send branch: ${current_branch} as a PR to ${release_branch} <-- NOTE THIS" - echo " Get someone to review and merge that PR" + echo "- In the contents of the PR, include the PRs in the release:" + echo " hack/cherry_pick_list.sh ${current_branch}^1" + echo " This helps cross-link PRs to patch releases they're part of in GitHub." + echo "- Have someone review the PR. This is a mechanical review to ensure it contains" + echo " the ${NEW_VERSION} commit, which was tagged at ${newtag}." fi diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 4c2a019bde5..5d87e5b4390 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -33,3 +33,11 @@ spec: - --sink=gcl - --poll_duration=2m - --stats_resolution=1m + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs + readOnly: true + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs" diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index 9932e6e2f38..1aaef0614ab 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -33,3 +33,11 @@ spec: - --sink=influxdb:http://monitoring-influxdb:8086 - --poll_duration=2m - --stats_resolution=1m + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs + readOnly: true + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs" diff --git a/cluster/addons/dns/kube2sky/kube2sky.go b/cluster/addons/dns/kube2sky/kube2sky.go index 6a6b2679413..ea298bb8a4d 100644 --- a/cluster/addons/dns/kube2sky/kube2sky.go +++ b/cluster/addons/dns/kube2sky/kube2sky.go @@ -37,7 +37,7 @@ import ( kclientcmd "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd" kframework "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/framework" kSelector "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" - tools "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait" etcd "github.com/coreos/go-etcd/etcd" @@ -354,7 +354,7 @@ func newEtcdClient(etcdServer string) (*etcd.Client, error) { err error ) for attempt := 1; attempt <= maxConnectAttempts; attempt++ { - if _, err = tools.GetEtcdVersion(etcdServer); err == nil { + if _, err = etcdstorage.GetEtcdVersion(etcdServer); err == nil { break } if attempt == maxConnectAttempts { diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile index 213381e5db3..a6c745a452e 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/Makefile @@ -1,7 +1,7 @@ .PHONY: build push IMAGE = fluentd-elasticsearch -TAG = 1.6 +TAG = 1.7 build: docker build -t gcr.io/google_containers/$(IMAGE):$(TAG) . diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf index 96d87378f96..1c744faf0da 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-image/td-agent.conf @@ -94,20 +94,13 @@ tag docker - - type elasticsearch - log_level info - include_tag_key true - host elasticsearch-logging - port 9200 - logstash_format true - flush_interval 5s - # Never wait longer than 5 minutes between retries. - max_retry_wait 300 - # Disable the limit on the number of retries (retry forever). - disable_retry_limit - ->>>>>>> Move things into a 'kube-system' namespace. + + type tail + format none + path /varlog/etcd.log + pos_file /varlog/es-etcd.log.pos + tag etcd + type tail diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile b/cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile index fae3f59ff7e..344ddda2123 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-image/Makefile @@ -15,7 +15,7 @@ .PHONY: kbuild kpush -TAG = 1.9 +TAG = 1.10 # Rules for building the test image for deployment to Dockerhub with user kubernetes. diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-image/google-fluentd.conf b/cluster/addons/fluentd-gcp/fluentd-gcp-image/google-fluentd.conf index 4fb6cc30034..07b0f84d9d3 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-image/google-fluentd.conf +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-image/google-fluentd.conf @@ -79,6 +79,14 @@ tag docker + + type tail + format none + path /varlog/etcd.log + pos_file /varlog/gcp-etcd.log.pos + tag etcd + + type tail format none diff --git a/cluster/saltbase/README.md b/cluster/saltbase/README.md index 7ea8c1bb7f0..f60bd429a9f 100644 --- a/cluster/saltbase/README.md +++ b/cluster/saltbase/README.md @@ -1,7 +1,7 @@ # SaltStack configuration This is the root of the SaltStack configuration for Kubernetes. A high -level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](../../docs/salt.md) +level overview for the Kubernetes SaltStack configuration can be found [in the docs tree.](../../docs/admin/salt.md) This SaltStack configuration currently applies to default configurations for Debian-on-GCE, Fedora-on-Vagrant, Ubuntu-on-AWS and diff --git a/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml b/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml index 264480a11e3..c758f431c86 100644 --- a/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml +++ b/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml @@ -6,7 +6,7 @@ metadata: spec: containers: - name: fluentd-elasticsearch - image: gcr.io/google_containers/fluentd-elasticsearch:1.6 + image: gcr.io/google_containers/fluentd-elasticsearch:1.7 resources: limits: cpu: 100m diff --git a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml index 2e8087f8e47..3073e62d320 100644 --- a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml +++ b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml @@ -6,7 +6,7 @@ metadata: spec: containers: - name: fluentd-cloud-logging - image: gcr.io/google_containers/fluentd-gcp:1.9 + image: gcr.io/google_containers/fluentd-gcp:1.10 resources: limits: cpu: 100m diff --git a/cluster/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default index 0c9c023a628..dd0cde397aa 100644 --- a/cluster/saltbase/salt/kubelet/default +++ b/cluster/saltbase/salt/kubelet/default @@ -41,6 +41,12 @@ {% endif -%} {% set config = "--config=/etc/kubernetes/manifests" -%} + +{% set manifest_url = "" -%} +{% if grains['roles'][0] == 'kubernetes-master' and grains.cloud in ['gce'] -%} + {% set manifest_url = "--manifest-url=http://metadata.google.internal/computeMetadata/v1/instance/attributes/google-container-manifest --manifest-url-header=Metadata-Flavor:Google" -%} +{% endif -%} + {% set hostname_override = "" -%} {% if grains.hostname_override is defined -%} {% set hostname_override = " --hostname_override=" + grains.hostname_override -%} @@ -84,4 +90,4 @@ {% set pod_cidr = "--pod-cidr=" + grains['cbr-cidr'] %} {% endif %} -DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}}" +DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{cgroup_root}} {{system_container}} {{pod_cidr}}" diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index 7a9eef7d257..61669ac6f00 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -42,6 +42,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller" replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication" + explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor" @@ -132,10 +133,14 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: apiVersion}) - etcdStorage, err := master.NewEtcdStorage(etcdClient, "", etcdtest.PathPrefix()) + etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix()) if err != nil { glog.Fatalf("Unable to get etcd storage: %v", err) } + expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix()) + if err != nil { + glog.Fatalf("Unable to get etcd storage for experimental: %v", err) + } // Master host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://")) @@ -155,11 +160,13 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st // Create a master and install handlers into mux. m := master.New(&master.Config{ DatabaseStorage: etcdStorage, + ExpDatabaseStorage: expEtcdStorage, KubeletClient: fakeKubeletClient{}, EnableCoreControllers: true, EnableLogsSupport: false, EnableProfiling: true, APIPrefix: "/api", + ExpAPIPrefix: "/experimental", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), AdmissionControl: admit.NewAlwaysAdmit(), ReadWritePort: portNumber, diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index b9c811d82ca..c62868b6639 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -32,12 +32,16 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/admission" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider" + explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/master" "github.com/GoogleCloudPlatform/kubernetes/pkg/master/ports" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" forked "github.com/GoogleCloudPlatform/kubernetes/third_party/forked/coreos/go-etcd/etcd" @@ -70,7 +74,9 @@ type APIServer struct { TLSPrivateKeyFile string CertDirectory string APIPrefix string + ExpAPIPrefix string StorageVersion string + ExpStorageVersion string CloudProvider string CloudConfigFile string EventTTL time.Duration @@ -114,6 +120,7 @@ func NewAPIServer() *APIServer { APIRate: 10.0, APIBurst: 200, APIPrefix: "/api", + ExpAPIPrefix: "/experimental", EventTTL: 1 * time.Hour, AuthorizationMode: "AlwaysAllow", AdmissionControl: "AlwaysAdmit", @@ -171,6 +178,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.CertDirectory, "cert-dir", s.CertDirectory, "The directory where the TLS certs are located (by default /var/run/kubernetes). "+ "If --tls-cert-file and --tls-private-key-file are provided, this flag will be ignored.") fs.StringVar(&s.APIPrefix, "api-prefix", s.APIPrefix, "The prefix for API requests on the server. Default '/api'.") + fs.StringVar(&s.ExpAPIPrefix, "experimental-prefix", s.ExpAPIPrefix, "The prefix for experimental API requests on the server. Default '/experimental'.") fs.StringVar(&s.StorageVersion, "storage-version", s.StorageVersion, "The version to store resources with. Defaults to server preferred") fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.") fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.") @@ -216,7 +224,7 @@ func (s *APIServer) verifyClusterIPFlags() { } } -func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersion string, pathPrefix string) (etcdStorage tools.StorageInterface, err error) { +func newEtcd(etcdConfigFile string, etcdServerList util.StringList, interfacesFunc meta.VersionInterfacesFunc, defaultVersion, storageVersion, pathPrefix string) (etcdStorage storage.Interface, err error) { var client tools.EtcdClient if etcdConfigFile != "" { client, err = etcd.NewClientFromFile(etcdConfigFile) @@ -236,7 +244,10 @@ func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersi client = etcdClient } - return master.NewEtcdStorage(client, storageVersion, pathPrefix) + if storageVersion == "" { + storageVersion = defaultVersion + } + return master.NewEtcdStorage(client, interfacesFunc, storageVersion, pathPrefix) } // Run runs the specified APIServer. This should never exit. @@ -291,6 +302,10 @@ func (s *APIServer) Run(_ []string) error { disableV1 := disableAllAPIs disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1) + // "experimental/v1={true|false} allows users to enable/disable the experimental API. + // This takes preference over api/all, if specified. + enableExp := s.getRuntimeConfigValue("experimental/v1", false) + // TODO: expose same flags as client.BindClientConfigFlags but for a server clientConfig := &client.Config{ Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), @@ -301,10 +316,14 @@ func (s *APIServer) Run(_ []string) error { glog.Fatalf("Invalid server address: %v", err) } - etcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, s.StorageVersion, s.EtcdPathPrefix) + etcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, latest.InterfacesFor, latest.Version, s.StorageVersion, s.EtcdPathPrefix) if err != nil { glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err) } + expEtcdStorage, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, explatest.InterfacesFor, explatest.Version, s.ExpStorageVersion, s.EtcdPathPrefix) + if err != nil { + glog.Fatalf("Invalid experimental storage version or misconfigured etcd: %v", err) + } n := net.IPNet(s.ServiceClusterIPRange) @@ -359,7 +378,9 @@ func (s *APIServer) Run(_ []string) error { } } config := &master.Config{ - DatabaseStorage: etcdStorage, + DatabaseStorage: etcdStorage, + ExpDatabaseStorage: expEtcdStorage, + EventTTL: s.EventTTL, KubeletClient: kubeletClient, ServiceClusterIPRange: &n, @@ -370,6 +391,7 @@ func (s *APIServer) Run(_ []string) error { EnableProfiling: s.EnableProfiling, EnableIndex: true, APIPrefix: s.APIPrefix, + ExpAPIPrefix: s.ExpAPIPrefix, CorsAllowedOriginList: s.CorsAllowedOriginList, ReadWritePort: s.SecurePort, PublicAddress: net.IP(s.AdvertiseAddress), @@ -378,6 +400,7 @@ func (s *APIServer) Run(_ []string) error { Authorizer: authorizer, AdmissionControl: admissionController, DisableV1: disableV1, + EnableExp: enableExp, MasterServiceNamespace: s.MasterServiceNamespace, ClusterName: s.ClusterName, ExternalHost: s.ExternalHost, diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 972a9229838..37e55583d85 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -319,7 +319,7 @@ func (s *KubeletServer) Run(_ []string) error { mounter := mount.New() if s.Containerized { glog.V(2).Info("Running kubelet in containerized mode (experimental)") - mounter = &mount.NsenterMounter{} + mounter = mount.NewNsenterMounter() } var dockerExecHandler dockertools.ExecHandler diff --git a/cmd/kubernetes/kubernetes.go b/cmd/kubernetes/kubernetes.go index c664fa7c503..7ddb3013e7e 100644 --- a/cmd/kubernetes/kubernetes.go +++ b/cmd/kubernetes/kubernetes.go @@ -30,17 +30,20 @@ import ( kubeletapp "github.com/GoogleCloudPlatform/kubernetes/cmd/kubelet/app" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/servicecontroller" "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication" + explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor" kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools" "github.com/GoogleCloudPlatform/kubernetes/pkg/master" "github.com/GoogleCloudPlatform/kubernetes/pkg/service" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler" @@ -78,14 +81,19 @@ func (h *delegateHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { func runApiServer(etcdClient tools.EtcdClient, addr net.IP, port int, masterServiceNamespace string) { handler := delegateHandler{} - etcdStorage, err := master.NewEtcdStorage(etcdClient, "", master.DefaultEtcdPathPrefix) + etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, master.DefaultEtcdPathPrefix) if err != nil { glog.Fatalf("Unable to get etcd storage: %v", err) } + expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, master.DefaultEtcdPathPrefix) + if err != nil { + glog.Fatalf("Unable to get etcd storage for experimental: %v", err) + } // Create a master and install handlers into mux. m := master.New(&master.Config{ - DatabaseStorage: etcdStorage, + DatabaseStorage: etcdStorage, + ExpDatabaseStorage: expEtcdStorage, KubeletClient: &client.HTTPKubeletClient{ Client: http.DefaultClient, Config: &client.KubeletConfig{Port: 10250}, @@ -95,6 +103,7 @@ func runApiServer(etcdClient tools.EtcdClient, addr net.IP, port int, masterServ EnableSwaggerSupport: true, EnableProfiling: *enableProfiling, APIPrefix: "/api", + ExpAPIPrefix: "/experimental", Authorizer: apiserver.NewAlwaysAllowAuthorizer(), ReadWritePort: port, @@ -167,7 +176,7 @@ func main() { defer util.FlushLogs() glog.Infof("Creating etcd client pointing to %v", *etcdServer) - etcdClient, err := tools.NewEtcdClientStartServerIfNecessary(*etcdServer) + etcdClient, err := etcdstorage.NewEtcdClientStartServerIfNecessary(*etcdServer) if err != nil { glog.Fatalf("Failed to connect to etcd: %v", err) } diff --git a/cmd/mungedocs/README.md b/cmd/mungedocs/README.md new file mode 100644 index 00000000000..7adef4c4cce --- /dev/null +++ b/cmd/mungedocs/README.md @@ -0,0 +1,22 @@ +# Documentation Mungers + +Basically this is like lint/gofmt for md docs. + +It basically does the following: +- iterate over all files in the given doc root. +- for each file split it into a slice (mungeLines) of lines (mungeLine) +- a mungeline has metadata about each line typically determined by a 'fast' regex. + - metadata contains things like 'is inside a preformmatted block' + - contains a markdown header + - has a link to another file + - etc.. + - if you have a really slow regex with a lot of backtracking you might want to write a fast one to limit how often you run the slow one. +- each munger is then called in turn + - they are given the mungeLines + - they create an entirely new set of mungeLines with their modifications + - the new set is returned +- the new set is then fed into the next munger. +- in the end we might commit the end mungeLines to the file or not (--verify) + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cmd/mungedocs/README.md?pixel)]() diff --git a/cmd/mungedocs/analytics.go b/cmd/mungedocs/analytics.go index eb82b64e068..311f134c639 100644 --- a/cmd/mungedocs/analytics.go +++ b/cmd/mungedocs/analytics.go @@ -17,43 +17,42 @@ limitations under the License. package main import ( - "bytes" "fmt" - "os" - "regexp" + "strings" ) -var ( - beginMungeExp = regexp.QuoteMeta(beginMungeTag("GENERATED_ANALYTICS")) - endMungeExp = regexp.QuoteMeta(endMungeTag("GENERATED_ANALYTICS")) - analyticsExp = regexp.QuoteMeta("[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/") + - "[^?]*" + - regexp.QuoteMeta("?pixel)]()") +const analyticsMungeTag = "GENERATED_ANALYTICS" +const analyticsLinePrefix = "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/" - // Matches the analytics blurb, with or without the munge headers. - analyticsRE = regexp.MustCompile(`[\n]*` + analyticsExp + `[\n]?` + - `|` + `[\n]*` + beginMungeExp + `[^<]*` + endMungeExp) -) - -// This adds the analytics link to every .md file. -func checkAnalytics(fileName string, fileBytes []byte) (output []byte, err error) { - fileName = makeRepoRelative(fileName) - desired := fmt.Sprintf(` - - -`+beginMungeTag("GENERATED_ANALYTICS")+` -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/%s?pixel)]() -`+endMungeTag("GENERATED_ANALYTICS")+` -`, fileName) - if !analyticsRE.MatchString(desired) { - fmt.Printf("%q does not match %q", analyticsRE.String(), desired) - os.Exit(1) +func updateAnalytics(fileName string, mlines mungeLines) (mungeLines, error) { + var out mungeLines + fileName, err := makeRepoRelative(fileName, fileName) + if err != nil { + return mlines, err } - //output = replaceNonPreformattedRegexp(fileBytes, analyticsRE, func(in []byte) []byte { - output = analyticsRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte { - return []byte{} - }) - output = bytes.TrimRight(output, "\n") - output = append(output, []byte(desired)...) - return output, nil + + link := fmt.Sprintf(analyticsLinePrefix+"%s?pixel)]()", fileName) + insertLines := getMungeLines(link) + mlines, err = removeMacroBlock(analyticsMungeTag, mlines) + if err != nil { + return mlines, err + } + + // Remove floating analytics links not surrounded by the munge tags. + for _, mline := range mlines { + if mline.preformatted || mline.header || mline.beginTag || mline.endTag { + out = append(out, mline) + continue + } + if strings.HasPrefix(mline.data, analyticsLinePrefix) { + continue + } + out = append(out, mline) + } + out = appendMacroBlock(out, analyticsMungeTag) + out, err = updateMacroBlock(out, analyticsMungeTag, insertLines) + if err != nil { + return mlines, err + } + return out, nil } diff --git a/cmd/mungedocs/analytics_test.go b/cmd/mungedocs/analytics_test.go index aeccae58f56..37db7971705 100644 --- a/cmd/mungedocs/analytics_test.go +++ b/cmd/mungedocs/analytics_test.go @@ -23,67 +23,71 @@ import ( ) func TestAnalytics(t *testing.T) { + b := beginMungeTag("GENERATED_ANALYTICS") + e := endMungeTag("GENERATED_ANALYTICS") var cases = []struct { - in string - out string + in string + expected string }{ { "aoeu", - "aoeu" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + "aoeu" + "\n" + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n"}, + e + "\n"}, { "aoeu" + "\n" + "\n" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()", "aoeu" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n"}, + e + "\n"}, { "aoeu" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n", - "aoeu" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + e + "\n", + "aoeu" + "\n" + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n"}, + e + "\n"}, { "aoeu" + "\n" + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n", - "aoeu" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + e + "\n", + "aoeu" + "\n" + "\n" + "\n" + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n"}, + e + "\n"}, { "prefix" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + + e + "\n" + "suffix", - "prefix" + "\n" + "suffix" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + "prefix" + "\n" + "suffix" + "\n" + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n"}, + e + "\n"}, { "aoeu" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n", + e + "\n", "aoeu" + "\n" + "\n" + "\n" + - beginMungeTag("GENERATED_ANALYTICS") + "\n" + + b + "\n" + "[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/path/to/file-name.md?pixel)]()" + "\n" + - endMungeTag("GENERATED_ANALYTICS") + "\n"}, + e + "\n"}, } - for _, c := range cases { - out, err := checkAnalytics("path/to/file-name.md", []byte(c.in)) + for i, c := range cases { + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + out, err := updateAnalytics("path/to/file-name.md", in) assert.NoError(t, err) - if string(out) != c.out { - t.Errorf("Expected \n\n%v\n\n but got \n\n%v\n\n", c.out, string(out)) + if !expected.Equal(out) { + t.Errorf("Case %d Expected \n\n%v\n\n but got \n\n%v\n\n", i, expected.String(), out.String()) } } } diff --git a/cmd/mungedocs/example_syncer.go b/cmd/mungedocs/example_syncer.go index b00c385c35f..8df6a968e45 100644 --- a/cmd/mungedocs/example_syncer.go +++ b/cmd/mungedocs/example_syncer.go @@ -17,15 +17,17 @@ limitations under the License. package main import ( - "bytes" "fmt" "io/ioutil" - "path" "regexp" "strings" ) -const exampleMungeTag = "EXAMPLE" +const exampleToken = "EXAMPLE" + +const exampleLineStart = " -func syncExamples(filePath string, markdown []byte) ([]byte, error) { - // find the example syncer begin tag - header := beginMungeTag(fmt.Sprintf("%s %s", exampleMungeTag, `(([^ ])*.(yaml|json))`)) - exampleLinkRE := regexp.MustCompile(header) - lines := splitLines(markdown) - updatedMarkdown, err := updateExampleMacroBlock(filePath, lines, exampleLinkRE, endMungeTag(exampleMungeTag)) - if err != nil { - return updatedMarkdown, err +func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) { + var err error + type exampleTag struct { + token string + linkText string + fileType string } - return updatedMarkdown, nil + exampleTags := []exampleTag{} + + // collect all example Tags + for _, mline := range mlines { + if mline.preformatted || !mline.beginTag { + continue + } + line := mline.data + if !strings.HasPrefix(line, exampleLineStart) { + continue + } + match := exampleMungeTagRE.FindStringSubmatch(line) + if len(match) < 4 { + err = fmt.Errorf("Found unparsable EXAMPLE munge line %v", line) + return mlines, err + } + tag := exampleTag{ + token: exampleToken + " " + match[1], + linkText: match[1], + fileType: match[3], + } + exampleTags = append(exampleTags, tag) + } + // update all example Tags + for _, tag := range exampleTags { + example, err := exampleContent(filePath, tag.linkText, tag.fileType) + if err != nil { + return mlines, err + } + mlines, err = updateMacroBlock(mlines, tag.token, example) + if err != nil { + return mlines, err + } + } + return mlines, nil } // exampleContent retrieves the content of the file at linkPath -func exampleContent(filePath, linkPath, fileType string) (content string, err error) { - realRoot := path.Join(*rootDir, *repoRoot) + "/" - path := path.Join(realRoot, path.Dir(filePath), linkPath) - dat, err := ioutil.ReadFile(path) +func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) { + repoRel, err := makeRepoRelative(linkPath, filePath) if err != nil { - return content, err + return nil, err } + + fileRel, err := makeFileRelative(linkPath, filePath) + if err != nil { + return nil, err + } + + dat, err := ioutil.ReadFile(repoRel) + if err != nil { + return nil, err + } + // remove leading and trailing spaces and newlines trimmedFileContent := strings.TrimSpace(string(dat)) - content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, linkPath) - return -} - -// updateExampleMacroBlock sync the yaml/json example between begin tag and end tag -func updateExampleMacroBlock(filePath string, lines []string, beginMarkExp *regexp.Regexp, endMark string) ([]byte, error) { - var buffer bytes.Buffer - betweenBeginAndEnd := false - for _, line := range lines { - trimmedLine := strings.Trim(line, " \n") - if beginMarkExp.Match([]byte(trimmedLine)) { - if betweenBeginAndEnd { - return nil, fmt.Errorf("found second begin mark while updating macro blocks") - } - betweenBeginAndEnd = true - buffer.WriteString(line) - buffer.WriteString("\n") - match := beginMarkExp.FindStringSubmatch(line) - if len(match) < 4 { - return nil, fmt.Errorf("failed to parse the link in example header") - } - // match[0] is the entire expression; [1] is the link text and [3] is the file type (yaml or json). - linkText := match[1] - fileType := match[3] - example, err := exampleContent(filePath, linkText, fileType) - if err != nil { - return nil, err - } - buffer.WriteString(example) - } else if trimmedLine == endMark { - if !betweenBeginAndEnd { - return nil, fmt.Errorf("found end mark without being mark while updating macro blocks") - } - // Extra newline avoids github markdown bug where comment ends up on same line as last bullet. - buffer.WriteString("\n") - buffer.WriteString(line) - buffer.WriteString("\n") - betweenBeginAndEnd = false - } else { - if !betweenBeginAndEnd { - buffer.WriteString(line) - buffer.WriteString("\n") - } - } - } - if betweenBeginAndEnd { - return nil, fmt.Errorf("never found closing end mark while updating macro blocks") - } - return buffer.Bytes(), nil + content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel) + out := getMungeLines(content) + return out, nil } diff --git a/cmd/mungedocs/example_syncer_test.go b/cmd/mungedocs/example_syncer_test.go index 76b90728402..84fd8854a1e 100644 --- a/cmd/mungedocs/example_syncer_test.go +++ b/cmd/mungedocs/example_syncer_test.go @@ -35,24 +35,27 @@ spec: - containerPort: 80 ` var cases = []struct { - in string - out string + in string + expected string }{ {"", ""}, { - "\n\n", - "\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n\n", + "\n\n", + "\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n\n", }, { - "\n\n", - "\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n\n", + "\n\n", + "\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n\n", }, } + repoRoot = "" for _, c := range cases { - actual, err := syncExamples("mungedocs/filename.md", []byte(c.in)) + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + actual, err := syncExamples("filename.md", in) assert.NoError(t, err) - if c.out != string(actual) { - t.Errorf("Expected example \n'%v' but got \n'%v'", c.out, string(actual)) + if !expected.Equal(actual) { + t.Errorf("Expected example \n'%q' but got \n'%q'", expected.String(), actual.String()) } } } diff --git a/cmd/mungedocs/headers.go b/cmd/mungedocs/headers.go index 0f45f609423..6876a514785 100644 --- a/cmd/mungedocs/headers.go +++ b/cmd/mungedocs/headers.go @@ -19,53 +19,56 @@ package main import ( "fmt" "regexp" - "strings" ) var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`) -var whitespaceRegex = regexp.MustCompile(`^\s*$`) -func fixHeaderLines(fileBytes []byte) []byte { - lines := splitLines(fileBytes) - out := []string{} - for i := range lines { - matches := headerRegex.FindStringSubmatch(lines[i]) - if matches == nil { - out = append(out, lines[i]) - continue - } - if i > 0 && !whitespaceRegex.Match([]byte(out[len(out)-1])) { - out = append(out, "") - } - out = append(out, fmt.Sprintf("%s %s", matches[1], matches[2])) - if i+1 < len(lines) && !whitespaceRegex.Match([]byte(lines[i+1])) { - out = append(out, "") +func fixHeaderLine(mlines mungeLines, newlines mungeLines, linenum int) mungeLines { + var out mungeLines + + mline := mlines[linenum] + line := mlines[linenum].data + + matches := headerRegex.FindStringSubmatch(line) + if matches == nil { + out = append(out, mline) + return out + } + + // There must be a blank line before the # (unless first line in file) + if linenum != 0 { + newlen := len(newlines) + if newlines[newlen-1].data != "" { + out = append(out, blankMungeLine) } } - final := strings.Join(out, "\n") - // Preserve the end of the file. - if len(fileBytes) > 0 && fileBytes[len(fileBytes)-1] == '\n' { - final += "\n" + + // There must be a space AFTER the ##'s + newline := fmt.Sprintf("%s %s", matches[1], matches[2]) + newmline := newMungeLine(newline) + out = append(out, newmline) + + // The next line needs to be a blank line (unless last line in file) + if len(mlines) > linenum+1 && mlines[linenum+1].data != "" { + out = append(out, blankMungeLine) } - return []byte(final) + return out } // Header lines need whitespace around them and after the #s. -func checkHeaderLines(filePath string, fileBytes []byte) ([]byte, error) { - fbs := splitByPreformatted(fileBytes) - fbs = append([]fileBlock{{false, []byte{}}}, fbs...) - fbs = append(fbs, fileBlock{false, []byte{}}) - - for i := range fbs { - block := &fbs[i] - if block.preformatted { +func updateHeaderLines(filePath string, mlines mungeLines) (mungeLines, error) { + var out mungeLines + for i, mline := range mlines { + if mline.preformatted { + out = append(out, mline) continue } - block.data = fixHeaderLines(block.data) + if !mline.header { + out = append(out, mline) + continue + } + newLines := fixHeaderLine(mlines, out, i) + out = append(out, newLines...) } - output := []byte{} - for _, block := range fbs { - output = append(output, block.data...) - } - return output, nil + return out, nil } diff --git a/cmd/mungedocs/headers_test.go b/cmd/mungedocs/headers_test.go index d2864377072..a73355beb55 100644 --- a/cmd/mungedocs/headers_test.go +++ b/cmd/mungedocs/headers_test.go @@ -24,8 +24,8 @@ import ( func TestHeaderLines(t *testing.T) { var cases = []struct { - in string - out string + in string + expected string }{ {"", ""}, { @@ -62,10 +62,12 @@ func TestHeaderLines(t *testing.T) { }, } for i, c := range cases { - actual, err := checkHeaderLines("filename.md", []byte(c.in)) + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + actual, err := updateHeaderLines("filename.md", in) assert.NoError(t, err) - if string(actual) != c.out { - t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual)) + if !actual.Equal(expected) { + t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String()) } } } diff --git a/cmd/mungedocs/kubectl_dash_f.go b/cmd/mungedocs/kubectl_dash_f.go index 79b42ba5bd5..c4702220d2a 100644 --- a/cmd/mungedocs/kubectl_dash_f.go +++ b/cmd/mungedocs/kubectl_dash_f.go @@ -25,29 +25,25 @@ import ( // Looks for lines that have kubectl commands with -f flags and files that // don't exist. -func checkKubectlFileTargets(file string, markdown []byte) ([]byte, error) { - inside := false - lines := splitLines(markdown) - errors := []string{} - for i := range lines { - if strings.HasPrefix(lines[i], "```") { - inside = !inside +func updateKubectlFileTargets(file string, mlines mungeLines) (mungeLines, error) { + var errors []string + for i, mline := range mlines { + if !mline.preformatted { + continue } - if inside { - if err := lookForKubectl(lines, i); err != nil { - errors = append(errors, err.Error()) - } + if err := lookForKubectl(mline.data, i); err != nil { + errors = append(errors, err.Error()) } } err := error(nil) if len(errors) != 0 { err = fmt.Errorf("%s", strings.Join(errors, "\n")) } - return markdown, err + return mlines, err } -func lookForKubectl(lines []string, lineNum int) error { - fields := strings.Fields(lines[lineNum]) +func lookForKubectl(line string, lineNum int) error { + fields := strings.Fields(line) for i := range fields { if fields[i] == "kubectl" { return gotKubectl(lineNum, fields, i) @@ -56,26 +52,26 @@ func lookForKubectl(lines []string, lineNum int) error { return nil } -func gotKubectl(line int, fields []string, fieldNum int) error { +func gotKubectl(lineNum int, fields []string, fieldNum int) error { for i := fieldNum + 1; i < len(fields); i++ { switch fields[i] { case "create", "update", "replace", "delete": - return gotCommand(line, fields, i) + return gotCommand(lineNum, fields, i) } } return nil } -func gotCommand(line int, fields []string, fieldNum int) error { +func gotCommand(lineNum int, fields []string, fieldNum int) error { for i := fieldNum + 1; i < len(fields); i++ { if strings.HasPrefix(fields[i], "-f") { - return gotDashF(line, fields, i) + return gotDashF(lineNum, fields, i) } } return nil } -func gotDashF(line int, fields []string, fieldNum int) error { +func gotDashF(lineNum int, fields []string, fieldNum int) error { target := "" if fields[fieldNum] == "-f" { if fieldNum+1 == len(fields) { @@ -112,9 +108,9 @@ func gotDashF(line int, fields []string, fieldNum int) error { } // If we got here we expect the file to exist. - _, err := os.Stat(path.Join(*rootDir, *repoRoot, target)) + _, err := os.Stat(path.Join(repoRoot, target)) if os.IsNotExist(err) { - return fmt.Errorf("%d: target file %q does not exist", line, target) + return fmt.Errorf("%d: target file %q does not exist", lineNum, target) } return err } diff --git a/cmd/mungedocs/kubectl_dash_f_test.go b/cmd/mungedocs/kubectl_dash_f_test.go index c2b72343780..b6b0c243a89 100644 --- a/cmd/mungedocs/kubectl_dash_f_test.go +++ b/cmd/mungedocs/kubectl_dash_f_test.go @@ -130,9 +130,9 @@ func TestKubectlDashF(t *testing.T) { }, } for i, c := range cases { - *rootDir = "" - *repoRoot = "" - _, err := checkKubectlFileTargets("filename.md", []byte(c.in)) + repoRoot = "" + in := getMungeLines(c.in) + _, err := updateKubectlFileTargets("filename.md", in) if err != nil && c.ok { t.Errorf("case[%d]: expected success, got %v", i, err) } diff --git a/cmd/mungedocs/links.go b/cmd/mungedocs/links.go index 9cccbb2ea93..60d7b9b73bd 100644 --- a/cmd/mungedocs/links.go +++ b/cmd/mungedocs/links.go @@ -29,20 +29,20 @@ var ( // Finds markdown links of the form [foo](bar "alt-text"). linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`) // Splits the link target into link target and alt-text. - altTextRE = regexp.MustCompile(`(.*)( ".*")`) + altTextRE = regexp.MustCompile(`([^)]*)( ".*")`) ) -// checkLinks assumes fileBytes has links in markdown syntax, and verifies that -// any relative links actually point to files that exist. -func checkLinks(filePath string, fileBytes []byte) ([]byte, error) { - dir := path.Dir(filePath) - errors := []string{} - - output := replaceNonPreformattedRegexp(fileBytes, linkRE, func(in []byte) (out []byte) { - match := linkRE.FindSubmatch(in) - // match[0] is the entire expression; [1] is the visible text and [2] is the link text. - visibleText := string(match[1]) - linkText := string(match[2]) +func processLink(in string, filePath string) (string, error) { + var err error + out := linkRE.ReplaceAllStringFunc(in, func(in string) string { + match := linkRE.FindStringSubmatch(in) + if match == nil { + err = fmt.Errorf("Detected this line had a link, but unable to parse, %v", in) + return "" + } + // match[0] is the entire expression; + visibleText := match[1] + linkText := match[2] altText := "" if parts := altTextRE.FindStringSubmatch(linkText); parts != nil { linkText = parts[1] @@ -54,13 +54,10 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) { linkText = strings.Trim(linkText, "\n") linkText = strings.Trim(linkText, " ") - u, err := url.Parse(linkText) - if err != nil { - errors = append( - errors, - fmt.Sprintf("link %q is unparsable: %v", linkText, err), - ) - return in + u, terr := url.Parse(linkText) + if terr != nil { + err = fmt.Errorf("link %q is unparsable: %v", linkText, terr) + return "" } if u.Host != "" && u.Host != "github.com" { @@ -72,10 +69,8 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) { if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") { newPath, targetExists := checkPath(filePath, path.Clean(u.Path)) if !targetExists { - errors = append( - errors, - fmt.Sprintf("%q: target not found", linkText), - ) + err = fmt.Errorf("%q: target not found", linkText) + return "" } u.Path = newPath if strings.HasPrefix(u.Path, "/") { @@ -89,11 +84,16 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) { // Make the visible text show the absolute path if it's // not nested in or beneath the current directory. if strings.HasPrefix(u.Path, "..") { - suggestedVisibleText = makeRepoRelative(path.Join(dir, u.Path)) + dir := path.Dir(filePath) + suggestedVisibleText, err = makeRepoRelative(path.Join(dir, u.Path), filePath) + if err != nil { + return "" + } } else { suggestedVisibleText = u.Path } - if unescaped, err := url.QueryUnescape(u.String()); err != nil { + var unescaped string + if unescaped, err = url.QueryUnescape(u.String()); err != nil { // Remove %28 type stuff, be nice to humans. // And don't fight with the toc generator. linkText = unescaped @@ -107,18 +107,37 @@ func checkLinks(filePath string, fileBytes []byte) ([]byte, error) { visibleText = suggestedVisibleText } - return []byte(fmt.Sprintf("[%s](%s)", visibleText, linkText+altText)) + return fmt.Sprintf("[%s](%s)", visibleText, linkText+altText) }) + if out == "" { + return in, err + } + return out, nil +} + +// updateLinks assumes lines has links in markdown syntax, and verifies that +// any relative links actually point to files that exist. +func updateLinks(filePath string, mlines mungeLines) (mungeLines, error) { + var out mungeLines + errors := []string{} + + for _, mline := range mlines { + if mline.preformatted || !mline.link { + out = append(out, mline) + continue + } + line, err := processLink(mline.data, filePath) + if err != nil { + errors = append(errors, err.Error()) + } + ml := newMungeLine(line) + out = append(out, ml) + } err := error(nil) if len(errors) != 0 { err = fmt.Errorf("%s", strings.Join(errors, "\n")) } - return output, err -} - -func makeRepoRelative(filePath string) string { - realRoot := path.Join(*rootDir, *repoRoot) + "/" - return strings.TrimPrefix(filePath, realRoot) + return out, err } // We have to append together before path.Clean will be able to tell that stuff diff --git a/cmd/mungedocs/links_test.go b/cmd/mungedocs/links_test.go new file mode 100644 index 00000000000..01889b4bfff --- /dev/null +++ b/cmd/mungedocs/links_test.go @@ -0,0 +1,76 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +var _ = fmt.Printf + +func TestBadLinks(t *testing.T) { + var cases = []struct { + in string + }{ + {"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/NOTREADME.md)"}, + {"[NOTREADME](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/NOTREADME.md)"}, + {"[NOTREADME](../NOTREADME.md)"}, + } + for _, c := range cases { + in := getMungeLines(c.in) + _, err := updateLinks("filename.md", in) + assert.Error(t, err) + } +} +func TestGoodLinks(t *testing.T) { + var cases = []struct { + in string + expected string + }{ + {"", ""}, + {"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/README.md)", + "[README](README.md)"}, + {"[README](../README.md)", + "[README](README.md)"}, + {"[README](https://lwn.net)", + "[README](https://lwn.net)"}, + // _ to - + {"[README](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/devel/cli_roadmap.md)", + "[README](../../docs/devel/cli-roadmap.md)"}, + // - to _ + {"[README](../../docs/devel/api-changes.md)", + "[README](../../docs/devel/api_changes.md)"}, + + // Does this even make sense? i dunno + {"[README](/docs/README.md)", + "[README](https://github.com/docs/README.md)"}, + {"[README](/GoogleCloudPlatform/kubernetes/tree/master/docs/README.md)", + "[README](../../docs/README.md)"}, + } + for i, c := range cases { + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + actual, err := updateLinks("filename.md", in) + assert.NoError(t, err) + if !actual.Equal(expected) { + t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String()) + } + } +} diff --git a/cmd/mungedocs/mungedocs.go b/cmd/mungedocs/mungedocs.go index 4a8ca460afb..f9afe89f529 100644 --- a/cmd/mungedocs/mungedocs.go +++ b/cmd/mungedocs/mungedocs.go @@ -17,7 +17,6 @@ limitations under the License. package main import ( - "bytes" "errors" "fmt" "io/ioutil" @@ -30,28 +29,31 @@ import ( ) var ( - verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.") - rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.") - repoRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root. + verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.") + rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.") + // "repo-root" seems like a dumb name, this is the relative path (from rootDir) to get to the repoRoot + relRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root. It's done this way so that generally you just have to set --root-dir. Examples: * --root-dir=docs/ --repo-root=.. means the repository root is ./ * --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/ * --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`) skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList) + repoRoot string ErrChangesNeeded = errors.New("mungedocs: changes required") // All of the munge operations to perform. // TODO: allow selection from command line. (e.g., just check links in the examples directory.) allMunges = []munge{ + {"remove-whitespace", updateWhitespace}, {"table-of-contents", updateTOC}, {"unversioned-warning", updateUnversionedWarning}, - {"check-links", checkLinks}, - {"blank-lines-surround-preformatted", checkPreformatted}, - {"header-lines", checkHeaderLines}, - {"analytics", checkAnalytics}, - {"kubectl-dash-f", checkKubectlFileTargets}, + {"md-links", updateLinks}, + {"blank-lines-surround-preformatted", updatePreformatted}, + {"header-lines", updateHeaderLines}, + {"analytics", updateAnalytics}, + {"kubectl-dash-f", updateKubectlFileTargets}, {"sync-examples", syncExamples}, } availableMungeList = func() string { @@ -68,7 +70,7 @@ Examples: // data into a new byte array and return that. type munge struct { name string - fn func(filePath string, before []byte) (after []byte, err error) + fn func(filePath string, mlines mungeLines) (after mungeLines, err error) } type fileProcessor struct { @@ -90,12 +92,14 @@ func (f fileProcessor) visit(path string) error { return err } + mungeLines := getMungeLines(string(fileBytes)) + modificationsMade := false errFound := false filePrinted := false for _, munge := range f.munges { - after, err := munge.fn(path, fileBytes) - if err != nil || !bytes.Equal(after, fileBytes) { + after, err := munge.fn(path, mungeLines) + if err != nil || !after.Equal(mungeLines) { if !filePrinted { fmt.Printf("%s\n----\n", path) filePrinted = true @@ -110,7 +114,7 @@ func (f fileProcessor) visit(path string) error { } fmt.Println("") } - fileBytes = after + mungeLines = after } // Write out new file with any changes. @@ -119,7 +123,7 @@ func (f fileProcessor) visit(path string) error { // We're not allowed to make changes. return ErrChangesNeeded } - ioutil.WriteFile(path, fileBytes, 0644) + ioutil.WriteFile(path, mungeLines.Bytes(), 0644) } if errFound { return ErrChangesNeeded @@ -165,6 +169,7 @@ func wantedMunges() (filtered []munge) { } func main() { + var err error flag.Parse() if *rootDir == "" { @@ -172,11 +177,9 @@ func main() { os.Exit(1) } - // Split the root dir of "foo/docs" into "foo" and "docs". We - // chdir into "foo" and walk "docs" so the walk is always at a - // relative path. - stem, leaf := path.Split(strings.TrimRight(*rootDir, "/")) - if err := os.Chdir(stem); err != nil { + repoRoot = path.Join(*rootDir, *relRoot) + repoRoot, err = filepath.Abs(repoRoot) + if err != nil { fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) os.Exit(2) } @@ -194,7 +197,7 @@ func main() { // changes needed, exit 1 if manual changes are needed. var changesNeeded bool - err := filepath.Walk(leaf, newWalkFunc(&fp, &changesNeeded)) + err = filepath.Walk(*rootDir, newWalkFunc(&fp, &changesNeeded)) if err != nil { fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) os.Exit(2) diff --git a/cmd/mungedocs/preformatted.go b/cmd/mungedocs/preformatted.go index 515b6e3f2c8..f3864278ee0 100644 --- a/cmd/mungedocs/preformatted.go +++ b/cmd/mungedocs/preformatted.go @@ -16,40 +16,26 @@ limitations under the License. package main -import "bytes" - // Blocks of ``` need to have blank lines on both sides or they don't look // right in HTML. -func checkPreformatted(filePath string, fileBytes []byte) ([]byte, error) { - f := splitByPreformatted(fileBytes) - f = append(fileBlocks{{false, []byte{}}}, f...) - f = append(f, fileBlock{false, []byte{}}) - - output := []byte(nil) - for i := 1; i < len(f)-1; i++ { - prev := &f[i-1] - block := &f[i] - next := &f[i+1] - if !block.preformatted { - continue - } - neededSuffix := []byte("\n\n") - for !bytes.HasSuffix(prev.data, neededSuffix) { - prev.data = append(prev.data, '\n') - } - for !bytes.HasSuffix(block.data, neededSuffix) { - block.data = append(block.data, '\n') - if bytes.HasPrefix(next.data, []byte("\n")) { - // don't change the number of newlines unless needed. - next.data = next.data[1:] - if len(next.data) == 0 { - f = append(f[:i+1], f[i+2:]...) - } +func updatePreformatted(filePath string, mlines mungeLines) (mungeLines, error) { + var out mungeLines + inpreformat := false + for i, mline := range mlines { + if !inpreformat && mline.preformatted { + if i == 0 || out[len(out)-1].data != "" { + out = append(out, blankMungeLine) } + // start of a preformat block + inpreformat = true + } + out = append(out, mline) + if inpreformat && !mline.preformatted { + if i >= len(mlines)-2 || mlines[i+1].data != "" { + out = append(out, blankMungeLine) + } + inpreformat = false } } - for _, block := range f { - output = append(output, block.data...) - } - return output, nil + return out, nil } diff --git a/cmd/mungedocs/preformatted_test.go b/cmd/mungedocs/preformatted_test.go new file mode 100644 index 00000000000..205a89e3ad3 --- /dev/null +++ b/cmd/mungedocs/preformatted_test.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPreformatted(t *testing.T) { + var cases = []struct { + in string + expected string + }{ + {"", ""}, + { + "```\nbob\n```", + "\n```\nbob\n```\n\n", + }, + { + "```\nbob\n```\n```\nnotbob\n```\n", + "\n```\nbob\n```\n\n```\nnotbob\n```\n\n", + }, + { + "```bob```\n", + "```bob```\n", + }, + { + " ```\n bob\n ```", + "\n ```\n bob\n ```\n\n", + }, + } + for i, c := range cases { + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + actual, err := updatePreformatted("filename.md", in) + assert.NoError(t, err) + if !actual.Equal(expected) { + t.Errorf("case[%d]: expected %q got %q", i, c.expected, actual.String()) + } + } +} diff --git a/cmd/mungedocs/toc.go b/cmd/mungedocs/toc.go index ecb97001c7d..8a244eaf0c0 100644 --- a/cmd/mungedocs/toc.go +++ b/cmd/mungedocs/toc.go @@ -17,8 +17,6 @@ limitations under the License. package main import ( - "bufio" - "bytes" "fmt" "regexp" "strings" @@ -26,6 +24,8 @@ import ( const tocMungeTag = "GENERATED_TOC" +var r = regexp.MustCompile("[^A-Za-z0-9-]") + // inserts/updates a table of contents in markdown file. // // First, builds a ToC. @@ -33,15 +33,11 @@ const tocMungeTag = "GENERATED_TOC" // the ToC, thereby updating any previously inserted ToC. // // TODO(erictune): put this in own package with tests -func updateTOC(filePath string, markdown []byte) ([]byte, error) { - toc, err := buildTOC(markdown) +func updateTOC(filePath string, mlines mungeLines) (mungeLines, error) { + toc := buildTOC(mlines) + updatedMarkdown, err := updateMacroBlock(mlines, tocMungeTag, toc) if err != nil { - return nil, err - } - lines := splitLines(markdown) - updatedMarkdown, err := updateMacroBlock(lines, beginMungeTag(tocMungeTag), endMungeTag(tocMungeTag), string(toc)) - if err != nil { - return nil, err + return mlines, err } return updatedMarkdown, nil } @@ -52,24 +48,19 @@ func updateTOC(filePath string, markdown []byte) ([]byte, error) { // and builds a table of contents from those. Assumes bookmarks for those will be // like #each-word-in-heading-in-lowercases-with-dashes-instead-of-spaces. // builds the ToC. -func buildTOC(markdown []byte) ([]byte, error) { - var buffer bytes.Buffer - buffer.WriteString("\n") - scanner := bufio.NewScanner(bytes.NewReader(markdown)) - inBlockQuotes := false - for scanner.Scan() { - line := scanner.Text() - match, err := regexp.Match("^```", []byte(line)) - if err != nil { - return nil, err - } - if match { - inBlockQuotes = !inBlockQuotes + +func buildTOC(mlines mungeLines) mungeLines { + var out mungeLines + + for _, mline := range mlines { + if mline.preformatted || !mline.header { continue } - if inBlockQuotes { - continue + // Add a blank line after the munge start tag + if len(out) == 0 { + out = append(out, blankMungeLine) } + line := mline.data noSharps := strings.TrimLeft(line, "#") numSharps := len(line) - len(noSharps) heading := strings.Trim(noSharps, " \n") @@ -77,16 +68,15 @@ func buildTOC(markdown []byte) ([]byte, error) { indent := strings.Repeat(" ", numSharps-1) bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1) // remove symbols (except for -) in bookmarks - r := regexp.MustCompile("[^A-Za-z0-9-]") bookmark = r.ReplaceAllString(bookmark, "") - tocLine := fmt.Sprintf("%s- [%s](#%s)\n", indent, heading, bookmark) - buffer.WriteString(tocLine) + tocLine := fmt.Sprintf("%s- [%s](#%s)", indent, heading, bookmark) + out = append(out, newMungeLine(tocLine)) } } - if err := scanner.Err(); err != nil { - return []byte{}, err + // Add a blank line before the munge end tag + if len(out) != 0 { + out = append(out, blankMungeLine) } - - return buffer.Bytes(), nil + return out } diff --git a/cmd/mungedocs/toc_test.go b/cmd/mungedocs/toc_test.go index b2f7f4d9925..7b5ddf9b817 100644 --- a/cmd/mungedocs/toc_test.go +++ b/cmd/mungedocs/toc_test.go @@ -24,37 +24,38 @@ import ( func Test_buildTOC(t *testing.T) { var cases = []struct { - in string - out string + in string + expected string }{ - {"", "\n"}, - {"Lorem ipsum\ndolor sit amet\n", "\n"}, + {"", ""}, + {"Lorem ipsum\ndolor sit amet\n", ""}, { "# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n", - "\n- [Title](#title)\n - [Section Heading](#section-heading)\n", + "\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n", }, { "# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```", - "\n- [Title](#title)\n - [Section Heading](#section-heading)\n", + "\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n", }, { "# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n", - "\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n", + "\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n\n", }, } - for _, c := range cases { - actual, err := buildTOC([]byte(c.in)) - assert.NoError(t, err) - if c.out != string(actual) { - t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual)) + for i, c := range cases { + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + actual := buildTOC(in) + if !expected.Equal(actual) { + t.Errorf("Case[%d] Expected TOC '%v' but got '%v'", i, expected.String(), actual.String()) } } } func Test_updateTOC(t *testing.T) { var cases = []struct { - in string - out string + in string + expected string }{ {"", ""}, { @@ -67,10 +68,12 @@ func Test_updateTOC(t *testing.T) { }, } for _, c := range cases { - actual, err := updateTOC("filename.md", []byte(c.in)) + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + actual, err := updateTOC("filename.md", in) assert.NoError(t, err) - if c.out != string(actual) { - t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual)) + if !expected.Equal(actual) { + t.Errorf("Expected TOC '%v' but got '%v'", expected.String(), actual.String()) } } } diff --git a/cmd/mungedocs/unversioned_warning.go b/cmd/mungedocs/unversioned_warning.go index 8d04fade6cc..b86e72ebe57 100644 --- a/cmd/mungedocs/unversioned_warning.go +++ b/cmd/mungedocs/unversioned_warning.go @@ -20,10 +20,7 @@ import "fmt" const unversionedWarningTag = "UNVERSIONED_WARNING" -var beginUnversionedWarning = beginMungeTag(unversionedWarningTag) -var endUnversionedWarning = endMungeTag(unversionedWarningTag) - -const unversionedWarningFmt = ` +const unversionedWarningPre = ` WARNING The latest 1.0.x release of this document can be found -[here](http://releases.k8s.io/release-1.0/%s). +` + +const unversionedWarningFmt = `[here](http://releases.k8s.io/release-1.0/%s).` + +const unversionedWarningPost = ` Documentation for other releases can be found at [releases.k8s.io](http://releases.k8s.io). @@ -52,21 +53,31 @@ Documentation for other releases can be found at -- + ` -func makeUnversionedWarning(fileName string) string { - return fmt.Sprintf(unversionedWarningFmt, fileName) +func makeUnversionedWarning(fileName string) mungeLines { + insert := unversionedWarningPre + fmt.Sprintf(unversionedWarningFmt, fileName) + unversionedWarningPost + return getMungeLines(insert) } // inserts/updates a warning for unversioned docs -func updateUnversionedWarning(file string, markdown []byte) ([]byte, error) { - lines := splitLines(markdown) - if hasLine(lines, "") { +func updateUnversionedWarning(file string, mlines mungeLines) (mungeLines, error) { + file, err := makeRepoRelative(file, file) + if err != nil { + return mlines, err + } + if hasLine(mlines, "") { // No warnings on release branches - return markdown, nil + return mlines, nil } - if !hasMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning) { - lines = append([]string{beginUnversionedWarning, endUnversionedWarning}, lines...) + if !hasMacroBlock(mlines, unversionedWarningTag) { + mlines = prependMacroBlock(unversionedWarningTag, mlines) } - return updateMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning, makeUnversionedWarning(file)) + + mlines, err = updateMacroBlock(mlines, unversionedWarningTag, makeUnversionedWarning(file)) + if err != nil { + return mlines, err + } + return mlines, nil } diff --git a/cmd/mungedocs/unversioned_warning_test.go b/cmd/mungedocs/unversioned_warning_test.go index cac4ac3fe69..4d758bd2b98 100644 --- a/cmd/mungedocs/unversioned_warning_test.go +++ b/cmd/mungedocs/unversioned_warning_test.go @@ -23,30 +23,34 @@ import ( ) func TestUnversionedWarning(t *testing.T) { - warningBlock := beginUnversionedWarning + "\n" + makeUnversionedWarning("filename.md") + "\n" + endUnversionedWarning + "\n" + beginMark := beginMungeTag(unversionedWarningTag) + endMark := endMungeTag(unversionedWarningTag) + + warningString := makeUnversionedWarning("filename.md").String() + warningBlock := beginMark + "\n" + warningString + endMark + "\n" var cases = []struct { - in string - out string + in string + expected string }{ {"", warningBlock}, { "Foo\nBar\n", - warningBlock + "Foo\nBar\n", + warningBlock + "\nFoo\nBar\n", }, { "Foo\n\nBar", "Foo\n\nBar", }, { - beginUnversionedWarning + "\n" + endUnversionedWarning + "\n", + beginMark + "\n" + endMark + "\n", warningBlock, }, { - beginUnversionedWarning + "\n" + "something\n" + endUnversionedWarning + "\n", + beginMark + "\n" + "something\n" + endMark + "\n", warningBlock, }, { - "Foo\n" + beginUnversionedWarning + "\n" + endUnversionedWarning + "\nBar\n", + "Foo\n" + beginMark + "\n" + endMark + "\nBar\n", "Foo\n" + warningBlock + "Bar\n", }, { @@ -55,10 +59,12 @@ func TestUnversionedWarning(t *testing.T) { }, } for i, c := range cases { - actual, err := updateUnversionedWarning("filename.md", []byte(c.in)) + in := getMungeLines(c.in) + expected := getMungeLines(c.expected) + actual, err := updateUnversionedWarning("filename.md", in) assert.NoError(t, err) - if string(actual) != c.out { - t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual)) + if !expected.Equal(actual) { + t.Errorf("case[%d]: expected %v got %v", i, expected.String(), actual.String()) } } } diff --git a/cmd/mungedocs/util.go b/cmd/mungedocs/util.go index a5fd2847a45..01ac20a5d3e 100644 --- a/cmd/mungedocs/util.go +++ b/cmd/mungedocs/util.go @@ -17,83 +17,140 @@ limitations under the License. package main import ( - "bytes" "fmt" + "path" + "path/filepath" "regexp" "strings" + "unicode" ) -// Splits a document up into a slice of lines. -func splitLines(document []byte) []string { - lines := strings.Split(string(document), "\n") - // Skip trailing empty string from Split-ing - if len(lines) > 0 && lines[len(lines)-1] == "" { - lines = lines[:len(lines)-1] - } - return lines -} - // Replaces the text between matching "beginMark" and "endMark" within the // document represented by "lines" with "insertThis". // // Delimiters should occupy own line. // Returns copy of document with modifications. -func updateMacroBlock(lines []string, beginMark, endMark, insertThis string) ([]byte, error) { - var buffer bytes.Buffer +func updateMacroBlock(mlines mungeLines, token string, insertThis mungeLines) (mungeLines, error) { + beginMark := beginMungeTag(token) + endMark := endMungeTag(token) + var out mungeLines betweenBeginAndEnd := false - for _, line := range lines { - trimmedLine := strings.Trim(line, " \n") - if trimmedLine == beginMark { + for _, mline := range mlines { + if mline.preformatted && !betweenBeginAndEnd { + out = append(out, mline) + continue + } + line := mline.data + if mline.beginTag && line == beginMark { if betweenBeginAndEnd { return nil, fmt.Errorf("found second begin mark while updating macro blocks") } betweenBeginAndEnd = true - buffer.WriteString(line) - buffer.WriteString("\n") - } else if trimmedLine == endMark { + out = append(out, mline) + } else if mline.endTag && line == endMark { if !betweenBeginAndEnd { - return nil, fmt.Errorf("found end mark without being mark while updating macro blocks") + return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks") } - buffer.WriteString(insertThis) - // Extra newline avoids github markdown bug where comment ends up on same line as last bullet. - buffer.WriteString("\n") - buffer.WriteString(line) - buffer.WriteString("\n") betweenBeginAndEnd = false + out = append(out, insertThis...) + out = append(out, mline) } else { if !betweenBeginAndEnd { - buffer.WriteString(line) - buffer.WriteString("\n") + out = append(out, mline) } } } if betweenBeginAndEnd { return nil, fmt.Errorf("never found closing end mark while updating macro blocks") } - return buffer.Bytes(), nil + return out, nil } // Tests that a document, represented as a slice of lines, has a line. Ignores // leading and trailing space. -func hasLine(lines []string, needle string) bool { - for _, line := range lines { - trimmedLine := strings.Trim(line, " \n") - if trimmedLine == needle { +func hasLine(lines mungeLines, needle string) bool { + for _, mline := range lines { + haystack := strings.TrimSpace(mline.data) + if haystack == needle { return true } } return false } +func removeMacroBlock(token string, mlines mungeLines) (mungeLines, error) { + beginMark := beginMungeTag(token) + endMark := endMungeTag(token) + var out mungeLines + betweenBeginAndEnd := false + for _, mline := range mlines { + if mline.preformatted { + out = append(out, mline) + continue + } + line := mline.data + if mline.beginTag && line == beginMark { + if betweenBeginAndEnd { + return nil, fmt.Errorf("found second begin mark while updating macro blocks") + } + betweenBeginAndEnd = true + } else if mline.endTag && line == endMark { + if !betweenBeginAndEnd { + return nil, fmt.Errorf("found end mark without begin mark while updating macro blocks") + } + betweenBeginAndEnd = false + } else { + if !betweenBeginAndEnd { + out = append(out, mline) + } + } + } + if betweenBeginAndEnd { + return nil, fmt.Errorf("never found closing end mark while updating macro blocks") + } + return out, nil +} + +// Add a macro block to the beginning of a set of lines +func prependMacroBlock(token string, mlines mungeLines) mungeLines { + beginLine := newMungeLine(beginMungeTag(token)) + endLine := newMungeLine(endMungeTag(token)) + out := mungeLines{beginLine, endLine} + if len(mlines) > 0 && mlines[0].data != "" { + out = append(out, blankMungeLine) + } + return append(out, mlines...) +} + +// Add a macro block to the end of a set of lines +func appendMacroBlock(mlines mungeLines, token string) mungeLines { + beginLine := newMungeLine(beginMungeTag(token)) + endLine := newMungeLine(endMungeTag(token)) + out := mlines + if len(mlines) > 0 && mlines[len(mlines)-1].data != "" { + out = append(out, blankMungeLine) + } + return append(out, beginLine, endLine) +} + // Tests that a document, represented as a slice of lines, has a macro block. -func hasMacroBlock(lines []string, begin string, end string) bool { +func hasMacroBlock(lines mungeLines, token string) bool { + beginMark := beginMungeTag(token) + endMark := endMungeTag(token) + foundBegin := false - for _, line := range lines { - trimmedLine := strings.Trim(line, " \n") + for _, mline := range lines { + if mline.preformatted { + continue + } + if !mline.beginTag && !mline.endTag { + continue + } + line := mline.data switch { - case !foundBegin && trimmedLine == begin: + case !foundBegin && line == beginMark: foundBegin = true - case foundBegin && trimmedLine == end: + case foundBegin && line == endMark: return true } } @@ -112,72 +169,123 @@ func endMungeTag(desc string) string { return fmt.Sprintf("", desc) } -// Calls 'replace' for all sections of the document not in ``` / ``` blocks. So -// that you don't have false positives inside those blocks. -func replaceNonPreformatted(input []byte, replace func([]byte) []byte) []byte { - f := splitByPreformatted(input) - output := []byte(nil) - for _, block := range f { - if block.preformatted { - output = append(output, block.data...) - } else { - output = append(output, replace(block.data)...) +type mungeLine struct { + data string + preformatted bool + header bool + link bool + beginTag bool + endTag bool +} + +type mungeLines []mungeLine + +func (m1 mungeLines) Equal(m2 mungeLines) bool { + if len(m1) != len(m2) { + return false + } + for i := range m1 { + if m1[i].data != m2[i].data { + return false } } - return output + return true } -type fileBlock struct { - preformatted bool - data []byte +func (mlines mungeLines) String() string { + slice := []string{} + for _, mline := range mlines { + slice = append(slice, mline.data) + } + s := strings.Join(slice, "\n") + // We need to tack on an extra newline at the end of the file + return s + "\n" } -type fileBlocks []fileBlock +func (mlines mungeLines) Bytes() []byte { + return []byte(mlines.String()) +} var ( // Finds all preformatted block start/stops. preformatRE = regexp.MustCompile("^\\s*```") notPreformatRE = regexp.MustCompile("^\\s*```.*```") + // Is this line a header? + mlHeaderRE = regexp.MustCompile(`^#`) + // Is there a link on this line? + mlLinkRE = regexp.MustCompile(`\[[^]]*\]\([^)]*\)`) + beginTagRE = regexp.MustCompile(` + Create the development namespace using kubectl. diff --git a/docs/admin/node.md b/docs/admin/node.md index c1452826320..fa24ffd2842 100644 --- a/docs/admin/node.md +++ b/docs/admin/node.md @@ -234,7 +234,7 @@ capacity when adding a node. The Kubernetes scheduler ensures that there are enough resources for all the pods on a node. It checks that the sum of the limits of containers on the node is no greater than than the node capacity. It includes all containers started by kubelet, but not containers started directly by docker, nor -processes not in containers. +processes not in containers. If you want to explicitly reserve resources for non-Pod processes, you can create a placeholder pod. Use the following template: diff --git a/docs/admin/resource-quota.md b/docs/admin/resource-quota.md index 4ed9b52d30a..669a97fa13b 100644 --- a/docs/admin/resource-quota.md +++ b/docs/admin/resource-quota.md @@ -160,14 +160,14 @@ Sometimes more complex policies may be desired, such as: Such policies could be implemented using ResourceQuota as a building-block, by writing a 'controller' which watches the quota usage and adjusts the quota -hard limits of each namespace according to other signals. +hard limits of each namespace according to other signals. Note that resource quota divides up aggregate cluster resources, but it creates no restrictions around nodes: pods from several namespaces may run on the same node. ## Example -See a [detailed example for how to use resource quota](../user-guide/resourcequota/). +See a [detailed example for how to use resource quota](../user-guide/resourcequota/). ## Read More diff --git a/docs/admin/service-accounts-admin.md b/docs/admin/service-accounts-admin.md index ff4cbad808f..3fe0c85af23 100644 --- a/docs/admin/service-accounts-admin.md +++ b/docs/admin/service-accounts-admin.md @@ -56,7 +56,7 @@ for a number of reasons: - Auditing considerations for humans and service accounts may differ. - A config bundle for a complex system may include definition of various service accounts for components of that system. Because service accounts can be created - ad-hoc and have namespaced names, such config is portable. + ad-hoc and have namespaced names, such config is portable. ## Service account automation diff --git a/docs/api.md b/docs/api.md index 6e1bf443ab5..55aaa3e86e9 100644 --- a/docs/api.md +++ b/docs/api.md @@ -55,7 +55,7 @@ What constitutes a compatible change and how to change the API are detailed by t ## API versioning -To make it easier to eliminate fields or restructure resource representations, Kubernetes supports multiple API versions, each at a different API path prefix, such as `/api/v1beta3`. These are simply different interfaces to read and/or modify the same underlying resources. In general, all API resources are accessible via all API versions, though there may be some cases in the future where that is not true. +To make it easier to eliminate fields or restructure resource representations, Kubernetes supports multiple API versions, each at a different API path prefix, such as `/api/v1beta3`. These are simply different interfaces to read and/or modify the same underlying resources. In general, all API resources are accessible via all API versions, though there may be some cases in the future where that is not true. We chose to version at the API level rather than at the resource or field level to ensure that the API presents a clear, consistent view of system resources and behavior, and to enable controlling access to end-of-lifed and/or experimental APIs. diff --git a/docs/design/README.md b/docs/design/README.md index 62946cb6f5b..72d2c662afc 100644 --- a/docs/design/README.md +++ b/docs/design/README.md @@ -33,7 +33,7 @@ Documentation for other releases can be found at # Kubernetes Design Overview -Kubernetes is a system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications. +Kubernetes is a system for managing containerized applications across multiple hosts, providing basic mechanisms for deployment, maintenance, and scaling of applications. Kubernetes establishes robust declarative primitives for maintaining the desired state requested by the user. We see these primitives as the main value added by Kubernetes. Self-healing mechanisms, such as auto-restarting, re-scheduling, and replicating containers require active controllers, not just imperative orchestration. diff --git a/docs/design/admission_control_resource_quota.md b/docs/design/admission_control_resource_quota.md index c86577ac6b3..136603d2c36 100644 --- a/docs/design/admission_control_resource_quota.md +++ b/docs/design/admission_control_resource_quota.md @@ -104,7 +104,7 @@ type ResourceQuotaList struct { ## AdmissionControl plugin: ResourceQuota -The **ResourceQuota** plug-in introspects all incoming admission requests. +The **ResourceQuota** plug-in introspects all incoming admission requests. It makes decisions by evaluating the incoming object against all defined **ResourceQuota.Status.Hard** resource limits in the request namespace. If acceptance of the resource would cause the total usage of a named resource to exceed its hard limit, the request is denied. @@ -125,7 +125,7 @@ Any resource that is not part of core Kubernetes must follow the resource naming This means the resource must have a fully-qualified name (i.e. mycompany.org/shinynewresource) If the incoming request does not cause the total usage to exceed any of the enumerated hard resource limits, the plug-in will post a -**ResourceQuotaUsage** document to the server to atomically update the observed usage based on the previously read +**ResourceQuotaUsage** document to the server to atomically update the observed usage based on the previously read **ResourceQuota.ResourceVersion**. This keeps incremental usage atomically consistent, but does introduce a bottleneck (intentionally) into the system. @@ -184,7 +184,7 @@ resourcequotas 1 1 services 3 5 ``` -## More information +## More information See [resource quota document](../admin/resource-quota.md) and the [example of Resource Quota](../user-guide/resourcequota/) for more information. diff --git a/docs/design/architecture.md b/docs/design/architecture.md index f7c5517198e..5f829d684fd 100644 --- a/docs/design/architecture.md +++ b/docs/design/architecture.md @@ -47,7 +47,7 @@ Each node runs Docker, of course. Docker takes care of the details of downloadi ### Kubelet -The **Kubelet** manages [pods](../user-guide/pods.md) and their containers, their images, their volumes, etc. +The **Kubelet** manages [pods](../user-guide/pods.md) and their containers, their images, their volumes, etc. ### Kube-Proxy diff --git a/docs/design/event_compression.md b/docs/design/event_compression.md index bfa2c5d60a3..ce8d1ad4e08 100644 --- a/docs/design/event_compression.md +++ b/docs/design/event_compression.md @@ -49,7 +49,7 @@ Event compression should be best effort (not guaranteed). Meaning, in the worst ## Design Instead of a single Timestamp, each event object [contains](http://releases.k8s.io/HEAD/pkg/api/types.go#L1111) the following fields: - * `FirstTimestamp util.Time` + * `FirstTimestamp util.Time` * The date/time of the first occurrence of the event. * `LastTimestamp util.Time` * The date/time of the most recent occurrence of the event. diff --git a/docs/design/expansion.md b/docs/design/expansion.md index 75c748ca671..24a07f0d19f 100644 --- a/docs/design/expansion.md +++ b/docs/design/expansion.md @@ -87,7 +87,7 @@ available to subsequent expansions. ### Use Case: Variable expansion in command -Users frequently need to pass the values of environment variables to a container's command. +Users frequently need to pass the values of environment variables to a container's command. Currently, Kubernetes does not perform any expansion of variables. The workaround is to invoke a shell in the container's command and have the shell perform the substitution, or to write a wrapper script that sets up the environment and runs the command. This has a number of drawbacks: @@ -130,7 +130,7 @@ The exact syntax for variable expansion has a large impact on how users perceive feature. We considered implementing a very restrictive subset of the shell `${var}` syntax. This syntax is an attractive option on some level, because many people are familiar with it. However, this syntax also has a large number of lesser known features such as the ability to provide -default values for unset variables, perform inline substitution, etc. +default values for unset variables, perform inline substitution, etc. In the interest of preventing conflation of the expansion feature in Kubernetes with the shell feature, we chose a different syntax similar to the one in Makefiles, `$(var)`. We also chose not @@ -239,7 +239,7 @@ The necessary changes to implement this functionality are: `ObjectReference` and an `EventRecorder` 2. Introduce `third_party/golang/expansion` package that provides: 1. An `Expand(string, func(string) string) string` function - 2. A `MappingFuncFor(ObjectEventRecorder, ...map[string]string) string` function + 2. A `MappingFuncFor(ObjectEventRecorder, ...map[string]string) string` function 3. Make the kubelet expand environment correctly 4. Make the kubelet expand command correctly @@ -311,7 +311,7 @@ func Expand(input string, mapping func(string) string) string { #### Kubelet changes -The Kubelet should be made to correctly expand variables references in a container's environment, +The Kubelet should be made to correctly expand variables references in a container's environment, command, and args. Changes will need to be made to: 1. The `makeEnvironmentVariables` function in the kubelet; this is used by diff --git a/docs/design/namespaces.md b/docs/design/namespaces.md index da3bb2c5b0b..596f6f4389e 100644 --- a/docs/design/namespaces.md +++ b/docs/design/namespaces.md @@ -52,7 +52,7 @@ Each user community has its own: A cluster operator may create a Namespace for each unique user community. -The Namespace provides a unique scope for: +The Namespace provides a unique scope for: 1. named resources (to avoid basic naming collisions) 2. delegated management authority to trusted users @@ -142,7 +142,7 @@ type NamespaceSpec struct { A *FinalizerName* is a qualified name. -The API Server enforces that a *Namespace* can only be deleted from storage if and only if +The API Server enforces that a *Namespace* can only be deleted from storage if and only if it's *Namespace.Spec.Finalizers* is empty. A *finalize* operation is the only mechanism to modify the *Namespace.Spec.Finalizers* field post creation. @@ -189,12 +189,12 @@ are known to the cluster. The *namespace controller* enumerates each known resource type in that namespace and deletes it one by one. Admission control blocks creation of new resources in that namespace in order to prevent a race-condition -where the controller could believe all of a given resource type had been deleted from the namespace, +where the controller could believe all of a given resource type had been deleted from the namespace, when in fact some other rogue client agent had created new objects. Using admission control in this scenario allows each of registry implementations for the individual objects to not need to take into account Namespace life-cycle. Once all objects known to the *namespace controller* have been deleted, the *namespace controller* -executes a *finalize* operation on the namespace that removes the *kubernetes* value from +executes a *finalize* operation on the namespace that removes the *kubernetes* value from the *Namespace.Spec.Finalizers* list. If the *namespace controller* sees a *Namespace* whose *ObjectMeta.DeletionTimestamp* is set, and @@ -245,13 +245,13 @@ In etcd, we want to continue to still support efficient WATCH across namespaces. Resources that persist content in etcd will have storage paths as follows: -/{k8s_storage_prefix}/{resourceType}/{resource.Namespace}/{resource.Name} +/{k8s_storage_prefix}/{resourceType}/{resource.Namespace}/{resource.Name} This enables consumers to WATCH /registry/{resourceType} for changes across namespace of a particular {resourceType}. ### Kubelet -The kubelet will register pod's it sources from a file or http source with a namespace associated with the +The kubelet will register pod's it sources from a file or http source with a namespace associated with the *cluster-id* ### Example: OpenShift Origin managing a Kubernetes Namespace @@ -362,7 +362,7 @@ This results in the following state: At this point, the Kubernetes *namespace controller* in its sync loop will see that the namespace has a deletion timestamp and that its list of finalizers is empty. As a result, it knows all -content associated from that namespace has been purged. It performs a final DELETE action +content associated from that namespace has been purged. It performs a final DELETE action to remove that Namespace from the storage. At this point, all content associated with that Namespace, and the Namespace itself are gone. diff --git a/docs/design/persistent-storage.md b/docs/design/persistent-storage.md index 51cfce89b52..bb2008118a9 100644 --- a/docs/design/persistent-storage.md +++ b/docs/design/persistent-storage.md @@ -41,11 +41,11 @@ Two new API kinds: A `PersistentVolume` (PV) is a storage resource provisioned by an administrator. It is analogous to a node. See [Persistent Volume Guide](../user-guide/persistent-volumes/) for how to use it. -A `PersistentVolumeClaim` (PVC) is a user's request for a persistent volume to use in a pod. It is analogous to a pod. +A `PersistentVolumeClaim` (PVC) is a user's request for a persistent volume to use in a pod. It is analogous to a pod. One new system component: -`PersistentVolumeClaimBinder` is a singleton running in master that watches all PersistentVolumeClaims in the system and binds them to the closest matching available PersistentVolume. The volume manager watches the API for newly created volumes to manage. +`PersistentVolumeClaimBinder` is a singleton running in master that watches all PersistentVolumeClaims in the system and binds them to the closest matching available PersistentVolume. The volume manager watches the API for newly created volumes to manage. One new volume: @@ -69,7 +69,7 @@ Cluster administrators use the API to manage *PersistentVolumes*. A custom stor PVs are system objects and, thus, have no namespace. -Many means of dynamic provisioning will be eventually be implemented for various storage types. +Many means of dynamic provisioning will be eventually be implemented for various storage types. ##### PersistentVolume API @@ -116,7 +116,7 @@ TBD #### Events -The implementation of persistent storage will not require events to communicate to the user the state of their claim. The CLI for bound claims contains a reference to the backing persistent volume. This is always present in the API and CLI, making an event to communicate the same unnecessary. +The implementation of persistent storage will not require events to communicate to the user the state of their claim. The CLI for bound claims contains a reference to the backing persistent volume. This is always present in the API and CLI, making an event to communicate the same unnecessary. Events that communicate the state of a mounted volume are left to the volume plugins. @@ -232,9 +232,9 @@ When a claim holder is finished with their data, they can delete their claim. $ kubectl delete pvc myclaim-1 ``` -The ```PersistentVolumeClaimBinder``` will reconcile this by removing the claim reference from the PV and change the PVs status to 'Released'. +The ```PersistentVolumeClaimBinder``` will reconcile this by removing the claim reference from the PV and change the PVs status to 'Released'. -Admins can script the recycling of released volumes. Future dynamic provisioners will understand how a volume should be recycled. +Admins can script the recycling of released volumes. Future dynamic provisioners will understand how a volume should be recycled. diff --git a/docs/design/principles.md b/docs/design/principles.md index c208fb6b468..23a20349548 100644 --- a/docs/design/principles.md +++ b/docs/design/principles.md @@ -33,7 +33,7 @@ Documentation for other releases can be found at # Design Principles -Principles to follow when extending Kubernetes. +Principles to follow when extending Kubernetes. ## API @@ -44,14 +44,14 @@ See also the [API conventions](../devel/api-conventions.md). * The control plane should be transparent -- there are no hidden internal APIs. * The cost of API operations should be proportional to the number of objects intentionally operated upon. Therefore, common filtered lookups must be indexed. Beware of patterns of multiple API calls that would incur quadratic behavior. * Object status must be 100% reconstructable by observation. Any history kept must be just an optimization and not required for correct operation. -* Cluster-wide invariants are difficult to enforce correctly. Try not to add them. If you must have them, don't enforce them atomically in master components, that is contention-prone and doesn't provide a recovery path in the case of a bug allowing the invariant to be violated. Instead, provide a series of checks to reduce the probability of a violation, and make every component involved able to recover from an invariant violation. +* Cluster-wide invariants are difficult to enforce correctly. Try not to add them. If you must have them, don't enforce them atomically in master components, that is contention-prone and doesn't provide a recovery path in the case of a bug allowing the invariant to be violated. Instead, provide a series of checks to reduce the probability of a violation, and make every component involved able to recover from an invariant violation. * Low-level APIs should be designed for control by higher-level systems. Higher-level APIs should be intent-oriented (think SLOs) rather than implementation-oriented (think control knobs). ## Control logic * Functionality must be *level-based*, meaning the system must operate correctly given the desired state and the current/observed state, regardless of how many intermediate state updates may have been missed. Edge-triggered behavior must be just an optimization. * Assume an open world: continually verify assumptions and gracefully adapt to external events and/or actors. Example: we allow users to kill pods under control of a replication controller; it just replaces them. -* Do not define comprehensive state machines for objects with behaviors associated with state transitions and/or "assumed" states that cannot be ascertained by observation. +* Do not define comprehensive state machines for objects with behaviors associated with state transitions and/or "assumed" states that cannot be ascertained by observation. * Don't assume a component's decisions will not be overridden or rejected, nor for the component to always understand why. For example, etcd may reject writes. Kubelet may reject pods. The scheduler may not be able to schedule pods. Retry, but back off and/or make alternative decisions. * Components should be self-healing. For example, if you must keep some state (e.g., cache) the content needs to be periodically refreshed, so that if an item does get erroneously stored or a deletion event is missed etc, it will be soon fixed, ideally on timescales that are shorter than what will attract attention from humans. * Component behavior should degrade gracefully. Prioritize actions so that the most important activities can continue to function even when overloaded and/or in states of partial failure. @@ -61,7 +61,7 @@ See also the [API conventions](../devel/api-conventions.md). * Only the apiserver should communicate with etcd/store, and not other components (scheduler, kubelet, etc.). * Compromising a single node shouldn't compromise the cluster. * Components should continue to do what they were last told in the absence of new instructions (e.g., due to network partition or component outage). -* All components should keep all relevant state in memory all the time. The apiserver should write through to etcd/store, other components should write through to the apiserver, and they should watch for updates made by other clients. +* All components should keep all relevant state in memory all the time. The apiserver should write through to etcd/store, other components should write through to the apiserver, and they should watch for updates made by other clients. * Watch is preferred over polling. ## Extensibility diff --git a/docs/design/resources.md b/docs/design/resources.md index 7bcce84a86c..e006d44d043 100644 --- a/docs/design/resources.md +++ b/docs/design/resources.md @@ -51,7 +51,7 @@ The resource model aims to be: A Kubernetes _resource_ is something that can be requested by, allocated to, or consumed by a pod or container. Examples include memory (RAM), CPU, disk-time, and network bandwidth. -Once resources on a node have been allocated to one pod, they should not be allocated to another until that pod is removed or exits. This means that Kubernetes schedulers should ensure that the sum of the resources allocated (requested and granted) to its pods never exceeds the usable capacity of the node. Testing whether a pod will fit on a node is called _feasibility checking_. +Once resources on a node have been allocated to one pod, they should not be allocated to another until that pod is removed or exits. This means that Kubernetes schedulers should ensure that the sum of the resources allocated (requested and granted) to its pods never exceeds the usable capacity of the node. Testing whether a pod will fit on a node is called _feasibility checking_. Note that the resource model currently prohibits over-committing resources; we will want to relax that restriction later. @@ -70,7 +70,7 @@ For future reference, note that some resources, such as CPU and network bandwidt ### Resource quantities -Initially, all Kubernetes resource types are _quantitative_, and have an associated _unit_ for quantities of the associated resource (e.g., bytes for memory, bytes per seconds for bandwidth, instances for software licences). The units will always be a resource type's natural base units (e.g., bytes, not MB), to avoid confusion between binary and decimal multipliers and the underlying unit multiplier (e.g., is memory measured in MiB, MB, or GB?). +Initially, all Kubernetes resource types are _quantitative_, and have an associated _unit_ for quantities of the associated resource (e.g., bytes for memory, bytes per seconds for bandwidth, instances for software licences). The units will always be a resource type's natural base units (e.g., bytes, not MB), to avoid confusion between binary and decimal multipliers and the underlying unit multiplier (e.g., is memory measured in MiB, MB, or GB?). Resource quantities can be added and subtracted: for example, a node has a fixed quantity of each resource type that can be allocated to pods/containers; once such an allocation has been made, the allocated resources cannot be made available to other pods/containers without over-committing the resources. @@ -110,7 +110,7 @@ resourceCapacitySpec: [ ``` Where: -* _total_: the total allocatable resources of a node. Initially, the resources at a given scope will bound the resources of the sum of inner scopes. +* _total_: the total allocatable resources of a node. Initially, the resources at a given scope will bound the resources of the sum of inner scopes. #### Notes @@ -194,7 +194,7 @@ The following are planned future extensions to the resource model, included here Because resource usage and related metrics change continuously, need to be tracked over time (i.e., historically), can be characterized in a variety of ways, and are fairly voluminous, we will not include usage in core API objects, such as [Pods](../user-guide/pods.md) and Nodes, but will provide separate APIs for accessing and managing that data. See the Appendix for possible representations of usage data, but the representation we'll use is TBD. -Singleton values for observed and predicted future usage will rapidly prove inadequate, so we will support the following structure for extended usage information: +Singleton values for observed and predicted future usage will rapidly prove inadequate, so we will support the following structure for extended usage information: ```yaml resourceStatus: [ @@ -223,7 +223,7 @@ where a `` or `` structure looks like this: ``` All parts of this structure are optional, although we strongly encourage including quantities for 50, 90, 95, 99, 99.5, and 99.9 percentiles. _[In practice, it will be important to include additional info such as the length of the time window over which the averages are calculated, the confidence level, and information-quality metrics such as the number of dropped or discarded data points.]_ -and predicted +and predicted ## Future resource types diff --git a/docs/design/secrets.md b/docs/design/secrets.md index f5793133850..3adc57af624 100644 --- a/docs/design/secrets.md +++ b/docs/design/secrets.md @@ -34,7 +34,7 @@ Documentation for other releases can be found at ## Abstract A proposal for the distribution of [secrets](../user-guide/secrets.md) (passwords, keys, etc) to the Kubelet and to -containers inside Kubernetes using a custom [volume](../user-guide/volumes.md#secrets) type. See the [secrets example](../user-guide/secrets/) for more information. +containers inside Kubernetes using a custom [volume](../user-guide/volumes.md#secrets) type. See the [secrets example](../user-guide/secrets/) for more information. ## Motivation @@ -117,7 +117,7 @@ which consumes this type of secret, the Kubelet may take a number of actions: 1. Expose the secret in a `.kubernetes_auth` file in a well-known location in the container's file system -2. Configure that node's `kube-proxy` to decorate HTTP requests from that pod to the +2. Configure that node's `kube-proxy` to decorate HTTP requests from that pod to the `kubernetes-master` service with the auth token, e. g. by adding a header to the request (see the [LOAS Daemon](https://github.com/GoogleCloudPlatform/kubernetes/issues/2209) proposal) @@ -146,7 +146,7 @@ We should consider what the best way to allow this is; there are a few different export MY_SECRET_ENV=MY_SECRET_VALUE The user could `source` the file at `/etc/secrets/my-secret` prior to executing the command for - the image either inline in the command or in an init script, + the image either inline in the command or in an init script, 2. Give secrets an attribute that allows users to express the intent that the platform should generate the above syntax in the file used to present a secret. The user could consume these diff --git a/docs/design/security_context.md b/docs/design/security_context.md index 03213927ece..7a80c01d2de 100644 --- a/docs/design/security_context.md +++ b/docs/design/security_context.md @@ -48,55 +48,55 @@ The problem of securing containers in Kubernetes has come up [before](https://gi ### Container isolation -In order to improve container isolation from host and other containers running on the host, containers should only be -granted the access they need to perform their work. To this end it should be possible to take advantage of Docker -features such as the ability to [add or remove capabilities](https://docs.docker.com/reference/run/#runtime-privilege-linux-capabilities-and-lxc-configuration) and [assign MCS labels](https://docs.docker.com/reference/run/#security-configuration) +In order to improve container isolation from host and other containers running on the host, containers should only be +granted the access they need to perform their work. To this end it should be possible to take advantage of Docker +features such as the ability to [add or remove capabilities](https://docs.docker.com/reference/run/#runtime-privilege-linux-capabilities-and-lxc-configuration) and [assign MCS labels](https://docs.docker.com/reference/run/#security-configuration) to the container process. Support for user namespaces has recently been [merged](https://github.com/docker/libcontainer/pull/304) into Docker's libcontainer project and should soon surface in Docker itself. It will make it possible to assign a range of unprivileged uids and gids from the host to each container, improving the isolation between host and container and between containers. ### External integration with shared storage -In order to support external integration with shared storage, processes running in a Kubernetes cluster -should be able to be uniquely identified by their Unix UID, such that a chain of ownership can be established. +In order to support external integration with shared storage, processes running in a Kubernetes cluster +should be able to be uniquely identified by their Unix UID, such that a chain of ownership can be established. Processes in pods will need to have consistent UID/GID/SELinux category labels in order to access shared disks. ## Constraints and Assumptions -* It is out of the scope of this document to prescribe a specific set +* It is out of the scope of this document to prescribe a specific set of constraints to isolate containers from their host. Different use cases need different settings. -* The concept of a security context should not be tied to a particular security mechanism or platform +* The concept of a security context should not be tied to a particular security mechanism or platform (ie. SELinux, AppArmor) * Applying a different security context to a scope (namespace or pod) requires a solution such as the one proposed for [service accounts](service_accounts.md). ## Use Cases -In order of increasing complexity, following are example use cases that would +In order of increasing complexity, following are example use cases that would be addressed with security contexts: 1. Kubernetes is used to run a single cloud application. In order to protect nodes from containers: * All containers run as a single non-root user * Privileged containers are disabled - * All containers run with a particular MCS label + * All containers run with a particular MCS label * Kernel capabilities like CHOWN and MKNOD are removed from containers - + 2. Just like case #1, except that I have more than one application running on the Kubernetes cluster. * Each application is run in its own namespace to avoid name collisions * For each application a different uid and MCS label is used - -3. Kubernetes is used as the base for a PAAS with - multiple projects, each project represented by a namespace. + +3. Kubernetes is used as the base for a PAAS with + multiple projects, each project represented by a namespace. * Each namespace is associated with a range of uids/gids on the node that - are mapped to uids/gids on containers using linux user namespaces. + are mapped to uids/gids on containers using linux user namespaces. * Certain pods in each namespace have special privileges to perform system actions such as talking back to the server for deployment, run docker builds, etc. * External NFS storage is assigned to each namespace and permissions set - using the range of uids/gids assigned to that namespace. + using the range of uids/gids assigned to that namespace. ## Proposed Design @@ -109,12 +109,12 @@ to mutate Docker API calls in order to apply the security context. It is recommended that this design be implemented in two phases: -1. Implement the security context provider extension point in the Kubelet +1. Implement the security context provider extension point in the Kubelet so that a default security context can be applied on container run and creation. 2. Implement a security context structure that is part of a service account. The default context provider can then be used to apply a security context based on the service account associated with the pod. - + ### Security Context Provider The Kubelet will have an interface that points to a `SecurityContextProvider`. The `SecurityContextProvider` is invoked before creating and running a given container: @@ -137,7 +137,7 @@ type SecurityContextProvider interface { } ``` -If the value of the SecurityContextProvider field on the Kubelet is nil, the kubelet will create and run the container as it does today. +If the value of the SecurityContextProvider field on the Kubelet is nil, the kubelet will create and run the container as it does today. ### Security Context diff --git a/docs/design/simple-rolling-update.md b/docs/design/simple-rolling-update.md index d99e7b259fd..720f4cbf4e5 100644 --- a/docs/design/simple-rolling-update.md +++ b/docs/design/simple-rolling-update.md @@ -33,9 +33,9 @@ Documentation for other releases can be found at ## Simple rolling update -This is a lightweight design document for simple [rolling update](../user-guide/kubectl/kubectl_rolling-update.md) in `kubectl`. +This is a lightweight design document for simple [rolling update](../user-guide/kubectl/kubectl_rolling-update.md) in `kubectl`. -Complete execution flow can be found [here](#execution-details). See the [example of rolling update](../user-guide/update-demo/) for more information. +Complete execution flow can be found [here](#execution-details). See the [example of rolling update](../user-guide/update-demo/) for more information. ### Lightweight rollout diff --git a/docs/devel/api-conventions.md b/docs/devel/api-conventions.md index 8889b72133d..5a1bfe81acb 100644 --- a/docs/devel/api-conventions.md +++ b/docs/devel/api-conventions.md @@ -173,11 +173,11 @@ Objects that contain both spec and status should not contain additional top-leve ##### Typical status properties * **phase**: The phase is a simple, high-level summary of the phase of the lifecycle of an object. The phase should progress monotonically. Typical phase values are `Pending` (not yet fully physically realized), `Running` or `Active` (fully realized and active, but not necessarily operating correctly), and `Terminated` (no longer active), but may vary slightly for different types of objects. New phase values should not be added to existing objects in the future. Like other status fields, it must be possible to ascertain the lifecycle phase by observation. Additional details regarding the current phase may be contained in other fields. -* **conditions**: Conditions represent orthogonal observations of an object's current state. Objects may report multiple conditions, and new types of conditions may be added in the future. Condition status values may be `True`, `False`, or `Unknown`. Unlike the phase, conditions are not expected to be monotonic -- their values may change back and forth. A typical condition type is `Ready`, which indicates the object was believed to be fully operational at the time it was last probed. Conditions may carry additional information, such as the last probe time or last transition time. +* **conditions**: Conditions represent orthogonal observations of an object's current state. Objects may report multiple conditions, and new types of conditions may be added in the future. Condition status values may be `True`, `False`, or `Unknown`. Unlike the phase, conditions are not expected to be monotonic -- their values may change back and forth. A typical condition type is `Ready`, which indicates the object was believed to be fully operational at the time it was last probed. Conditions may carry additional information, such as the last probe time or last transition time. TODO(@vishh): Reason and Message. -Phases and conditions are observations and not, themselves, state machines, nor do we define comprehensive state machines for objects with behaviors associated with state transitions. The system is level-based and should assume an Open World. Additionally, new observations and details about these observations may be added over time. +Phases and conditions are observations and not, themselves, state machines, nor do we define comprehensive state machines for objects with behaviors associated with state transitions. The system is level-based and should assume an Open World. Additionally, new observations and details about these observations may be added over time. In order to preserve extensibility, in the future, we intend to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from observations. @@ -376,7 +376,7 @@ Late-initializers should only make the following types of modifications: - Adding keys to maps - Adding values to arrays which have mergeable semantics (`patchStrategy:"merge"` attribute in the type definition). - + These conventions: 1. allow a user (with sufficient privilege) to override any system-default behaviors by setting the fields that would otherwise have been defaulted. diff --git a/docs/devel/api_changes.md b/docs/devel/api_changes.md index d8e20014e3f..687af00af50 100644 --- a/docs/devel/api_changes.md +++ b/docs/devel/api_changes.md @@ -309,7 +309,7 @@ a panic from the `serialization_test`. If so, look at the diff it produces (or the backtrace in case of a panic) and figure out what you forgot. Encode that into the fuzzer's custom fuzz functions. Hint: if you added defaults for a field, that field will need to have a custom fuzz function that ensures that the field is -fuzzed to a non-empty value. +fuzzed to a non-empty value. The fuzzer can be found in `pkg/api/testing/fuzzer.go`. diff --git a/docs/devel/collab.md b/docs/devel/collab.md index 96db64c85f3..624b3bcbb16 100644 --- a/docs/devel/collab.md +++ b/docs/devel/collab.md @@ -61,7 +61,7 @@ Maintainers will do merges of appropriately reviewed-and-approved changes during There may be discussion an even approvals granted outside of the above hours, but merges will generally be deferred. -If a PR is considered complex or controversial, the merge of that PR should be delayed to give all interested parties in all timezones the opportunity to provide feedback. Concretely, this means that such PRs should be held for 24 +If a PR is considered complex or controversial, the merge of that PR should be delayed to give all interested parties in all timezones the opportunity to provide feedback. Concretely, this means that such PRs should be held for 24 hours before merging. Of course "complex" and "controversial" are left to the judgment of the people involved, but we trust that part of being a committer is the judgment required to evaluate such things honestly, and not be motivated by your desire (or your cube-mate's desire) to get their code merged. Also see "Holds" below, any reviewer can issue a "hold" to indicate that the PR is in fact complicated or complex and deserves further review. diff --git a/docs/devel/development.md b/docs/devel/development.md index 27cb034dfd9..87b4b5d0ae6 100644 --- a/docs/devel/development.md +++ b/docs/devel/development.md @@ -99,6 +99,17 @@ git push -f origin myfeature 1. Visit http://github.com/$YOUR_GITHUB_USERNAME/kubernetes 2. Click the "Compare and pull request" button next to your "myfeature" branch. +### When to retain commits and when to squash + +Upon merge, all git commits should represent meaningful milestones or units of +work. Use commits to add clarity to the development and review process. + +Before merging a PR, squash any "fix review feedback", "typo", and "rebased" +sorts of commits. It is not imperative that every commit in a PR compile and +pass tests independently, but it is worth striving for. For mass automated +fixups (e.g. automated doc formatting), use one or more commits for the +changes to tooling and a final commit to apply the fixup en masse. This makes +reviews much easier. ## godep and dependency management diff --git a/docs/devel/scheduler_algorithm.md b/docs/devel/scheduler_algorithm.md index c67bcdbffc3..ab8e69ef8e2 100644 --- a/docs/devel/scheduler_algorithm.md +++ b/docs/devel/scheduler_algorithm.md @@ -44,7 +44,7 @@ The purpose of filtering the nodes is to filter out the nodes that do not meet c - `PodFitsPorts`: Check if any HostPort required by the Pod is already occupied on the node. - `PodFitsHost`: Filter out all nodes except the one specified in the PodSpec's NodeName field. - `PodSelectorMatches`: Check if the labels of the node match the labels specified in the Pod's `nodeSelector` field ([Here](../user-guide/node-selection/) is an example of how to use `nodeSelector` field). -- `CheckNodeLabelPresence`: Check if all the specified labels exist on a node or not, regardless of the value. +- `CheckNodeLabelPresence`: Check if all the specified labels exist on a node or not, regardless of the value. The details of the above predicates can be found in [plugin/pkg/scheduler/algorithm/predicates/predicates.go](http://releases.k8s.io/HEAD/plugin/pkg/scheduler/algorithm/predicates/predicates.go). All predicates mentioned above can be used in combination to perform a sophisticated filtering policy. Kubernetes uses some, but not all, of these predicates by default. You can see which ones are used by default in [plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go](http://releases.k8s.io/HEAD/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go). @@ -53,7 +53,7 @@ The details of the above predicates can be found in [plugin/pkg/scheduler/algori The filtered nodes are considered suitable to host the Pod, and it is often that there are more than one nodes remaining. Kubernetes prioritizes the remaining nodes to find the "best" one for the Pod. The prioritization is performed by a set of priority functions. For each remaining node, a priority function gives a score which scales from 0-10 with 10 representing for "most preferred" and 0 for "least preferred". Each priority function is weighted by a positive number and the final score of each node is calculated by adding up all the weighted scores. For example, suppose there are two priority functions, `priorityFunc1` and `priorityFunc2` with weighting factors `weight1` and `weight2` respectively, the final score of some NodeA is: finalScoreNodeA = (weight1 * priorityFunc1) + (weight2 * priorityFunc2) - + After the scores of all nodes are calculated, the node with highest score is chosen as the host of the Pod. If there are more than one nodes with equal highest scores, a random one among them is chosen. Currently, Kubernetes scheduler provides some practical priority functions, including: diff --git a/docs/devel/writing-a-getting-started-guide.md b/docs/devel/writing-a-getting-started-guide.md index 40f513bed6c..04d0d67f6a9 100644 --- a/docs/devel/writing-a-getting-started-guide.md +++ b/docs/devel/writing-a-getting-started-guide.md @@ -70,7 +70,7 @@ These guidelines say *what* to do. See the Rationale section for *why*. - Setup a cluster and run the [conformance test](development.md#conformance-testing) against it, and report the results in your PR. - Versioned distros should typically not modify or add code in `cluster/`. That is just scripts for developer - distros. + distros. - When a new major or minor release of Kubernetes comes out, we may also release a new conformance test, and require a new conformance test run to earn a conformance checkmark. @@ -82,20 +82,20 @@ Just file an issue or chat us on IRC and one of the committers will link to it f These guidelines say *what* to do. See the Rationale section for *why*. - the main reason to add a new development distro is to support a new IaaS provider (VM and - network management). This means implementing a new `pkg/cloudprovider/$IAAS_NAME`. + network management). This means implementing a new `pkg/cloudprovider/$IAAS_NAME`. - Development distros should use Saltstack for Configuration Management. - development distros need to support automated cluster creation, deletion, upgrading, etc. This mean writing scripts in `cluster/$IAAS_NAME`. - all commits to the tip of this repo need to not break any of the development distros - the author of the change is responsible for making changes necessary on all the cloud-providers if the change affects any of them, and reverting the change if it breaks any of the CIs. - - a development distro needs to have an organization which owns it. This organization needs to: + - a development distro needs to have an organization which owns it. This organization needs to: - Setting up and maintaining Continuous Integration that runs e2e frequently (multiple times per day) against the Distro at head, and which notifies all devs of breakage. - being reasonably available for questions and assisting with refactoring and feature additions that affect code for their IaaS. -## Rationale +## Rationale - We want people to create Kubernetes clusters with whatever IaaS, Node OS, configuration management tools, and so on, which they are familiar with. The @@ -114,19 +114,19 @@ These guidelines say *what* to do. See the Rationale section for *why*. learning curve to understand our automated testing scripts. And it is considerable effort to fully automate setup and teardown of a cluster, which is needed for CI. And, not everyone has the time and money to run CI. We do not want to - discourage people from writing and sharing guides because of this. + discourage people from writing and sharing guides because of this. - Versioned distro authors are free to run their own CI and let us know if there is breakage, but we will not include them as commit hooks -- there cannot be so many commit checks that it is impossible to pass them all. - We prefer a single Configuration Management tool for development distros. If there were more than one, the core developers would have to learn multiple tools and update config in multiple places. **Saltstack** happens to be the one we picked when we started the project. We - welcome versioned distros that use any tool; there are already examples of + welcome versioned distros that use any tool; there are already examples of CoreOS Fleet, Ansible, and others. - You can still run code from head or your own branch if you use another Configuration Management tool -- you just have to do some manual steps during testing and deployment. - + diff --git a/docs/getting-started-guides/README.md b/docs/getting-started-guides/README.md index 73347626dbc..d9c279f6e09 100644 --- a/docs/getting-started-guides/README.md +++ b/docs/getting-started-guides/README.md @@ -39,7 +39,7 @@ crafting your own customized cluster. We'll guide you in picking a solution tha ## Picking the Right Solution -If you just want to "kick the tires" on Kubernetes, we recommend the [local Docker-based](docker.md) solution. +If you just want to "kick the tires" on Kubernetes, we recommend the [local Docker-based](docker.md) solution. The local Docker-based solution is one of several [Local cluster](#local-machine-solutions) solutions that are quick to set up, but are limited to running on one machine. @@ -50,9 +50,9 @@ solution is the easiest to create and maintain. [Turn-key cloud solutions](#turn-key-cloud-solutions) require only a few commands to create and cover a wider range of cloud providers. -[Custom solutions](#custom-solutions) require more effort to setup but cover and even +[Custom solutions](#custom-solutions) require more effort to setup but cover and even they vary from step-by-step instructions to general advice for setting up -a Kubernetes cluster from scratch. +a Kubernetes cluster from scratch. ### Local-machine Solutions @@ -117,8 +117,8 @@ These solutions are combinations of cloud provider and OS not covered by the abo - [Offline](coreos/bare_metal_offline.md) (no internet required. Uses CoreOS and Flannel) - [fedora/fedora_ansible_config.md](fedora/fedora_ansible_config.md) -- [Fedora single node](fedora/fedora_manual_config.md) -- [Fedora multi node](fedora/flannel_multi_node_cluster.md) +- [Fedora single node](fedora/fedora_manual_config.md) +- [Fedora multi node](fedora/flannel_multi_node_cluster.md) - [Centos](centos/centos_manual_config.md) - [Ubuntu](ubuntu.md) - [Docker Multi Node](docker-multinode.md) diff --git a/docs/getting-started-guides/aws-coreos.md b/docs/getting-started-guides/aws-coreos.md index ce1ef3fa135..dfb58870f6a 100644 --- a/docs/getting-started-guides/aws-coreos.md +++ b/docs/getting-started-guides/aws-coreos.md @@ -215,7 +215,7 @@ kubectl get pods Record the **Host** of the pod, which should be the private IP address. -Gather the public IP address for the worker node. +Gather the public IP address for the worker node. ```bash aws ec2 describe-instances --filters 'Name=private-ip-address,Values=' diff --git a/docs/getting-started-guides/centos/centos_manual_config.md b/docs/getting-started-guides/centos/centos_manual_config.md index f5207203e0e..af6d02c1808 100644 --- a/docs/getting-started-guides/centos/centos_manual_config.md +++ b/docs/getting-started-guides/centos/centos_manual_config.md @@ -60,7 +60,7 @@ centos-minion = 192.168.121.65 ``` **Prepare the hosts:** - + * Create virt7-testing repo on all hosts - centos-{master,minion} with following information. ``` @@ -175,7 +175,7 @@ KUBELET_HOSTNAME="--hostname_override=centos-minion" # Add your own! KUBELET_ARGS="" -``` +``` * Start the appropriate services on node (centos-minion). diff --git a/docs/getting-started-guides/cloudstack.md b/docs/getting-started-guides/cloudstack.md index 5e687401301..a08db9866b0 100644 --- a/docs/getting-started-guides/cloudstack.md +++ b/docs/getting-started-guides/cloudstack.md @@ -68,8 +68,8 @@ Or create a `~/.cloudstack.ini` file: [cloudstack] endpoint = - key = - secret = + key = + secret = method = post We need to use the http POST method to pass the _large_ userdata to the coreOS instances. @@ -104,7 +104,7 @@ Check the tasks and templates in `roles/k8s` if you want to modify anything. Once the playbook as finished, it will print out the IP of the Kubernetes master: - TASK: [k8s | debug msg='k8s master IP is {{ k8s_master.default_ip }}'] ******** + TASK: [k8s | debug msg='k8s master IP is {{ k8s_master.default_ip }}'] ******** SSH to it using the key that was created and using the _core_ user and you can list the machines in your cluster: diff --git a/docs/getting-started-guides/coreos/bare_metal_offline.md b/docs/getting-started-guides/coreos/bare_metal_offline.md index 444d3cc74b1..fa537b393b4 100644 --- a/docs/getting-started-guides/coreos/bare_metal_offline.md +++ b/docs/getting-started-guides/coreos/bare_metal_offline.md @@ -59,13 +59,13 @@ Deploy a CoreOS running Kubernetes environment. This particular guild is made to ## High Level Design -1. Manage the tftp directory +1. Manage the tftp directory * /tftpboot/(coreos)(centos)(RHEL) * /tftpboot/pxelinux.0/(MAC) -> linked to Linux image config file 2. Update per install the link for pxelinux 3. Update the DHCP config to reflect the host needing deployment -4. Setup nodes to deploy CoreOS creating a etcd cluster. -5. Have no access to the public [etcd discovery tool](https://discovery.etcd.io/). +4. Setup nodes to deploy CoreOS creating a etcd cluster. +5. Have no access to the public [etcd discovery tool](https://discovery.etcd.io/). 6. Installing the CoreOS slaves to become Kubernetes nodes. ## This Guides variables @@ -115,7 +115,7 @@ To setup CentOS PXELINUX environment there is a complete [guide here](http://doc timeout 15 ONTIMEOUT local display boot.msg - + MENU TITLE Main Menu LABEL local @@ -126,7 +126,7 @@ Now you should have a working PXELINUX setup to image CoreOS nodes. You can veri ## Adding CoreOS to PXE -This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment. +This section describes how to setup the CoreOS images to live alongside a pre-existing PXELINUX environment. 1. Find or create the TFTP root directory that everything will be based off of. * For this document we will assume `/tftpboot/` is our root directory. @@ -170,9 +170,9 @@ This section describes how to setup the CoreOS images to live alongside a pre-ex APPEND initrd=images/coreos/coreos_production_pxe_image.cpio.gz cloud-config-url=http:///pxe-cloud-config-slave.yml MENU END -This configuration file will now boot from local drive but have the option to PXE image CoreOS. +This configuration file will now boot from local drive but have the option to PXE image CoreOS. -## DHCP configuration +## DHCP configuration This section covers configuring the DHCP server to hand out our new images. In this case we are assuming that there are other servers that will boot alongside other images. @@ -186,7 +186,7 @@ This section covers configuring the DHCP server to hand out our new images. In t next-server 10.20.30.242; option broadcast-address 10.20.30.255; filename ""; - + ... # http://www.syslinux.org/wiki/index.php/PXELINUX host core_os_master { @@ -194,7 +194,7 @@ This section covers configuring the DHCP server to hand out our new images. In t option routers 10.20.30.1; fixed-address 10.20.30.40; option domain-name-servers 10.20.30.242; - filename "/pxelinux.0"; + filename "/pxelinux.0"; } host core_os_slave { hardware ethernet d0:00:67:13:0d:01; @@ -217,7 +217,7 @@ We will be specifying the node configuration later in the guide. ## Kubernetes -To deploy our configuration we need to create an `etcd` master. To do so we want to pxe CoreOS with a specific cloud-config.yml. There are two options we have here. +To deploy our configuration we need to create an `etcd` master. To do so we want to pxe CoreOS with a specific cloud-config.yml. There are two options we have here. 1. Is to template the cloud config file and programmatically create new static configs for different cluster setups. 2. Have a service discovery protocol running in our stack to do auto discovery. @@ -427,7 +427,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl --logtostderr=true Restart=always RestartSec=10 - - name: kube-controller-manager.service + - name: kube-controller-manager.service command: start content: | [Unit] @@ -535,7 +535,7 @@ On the PXE server make and fill in the variables `vi /var/www/html/coreos/pxe-cl command: start content: | [Unit] - After=network-online.target + After=network-online.target Wants=network-online.target Description=flannel is an etcd backed overlay network for containers [Service] diff --git a/docs/getting-started-guides/coreos/coreos_multinode_cluster.md b/docs/getting-started-guides/coreos/coreos_multinode_cluster.md index 8b5a76d2190..4aa9fe7650b 100644 --- a/docs/getting-started-guides/coreos/coreos_multinode_cluster.md +++ b/docs/getting-started-guides/coreos/coreos_multinode_cluster.md @@ -44,7 +44,7 @@ Use the [master.yaml](cloud-configs/master.yaml) and [node.yaml](cloud-configs/n * Provision the master node * Capture the master node private IP address * Edit node.yaml -* Provision one or more worker nodes +* Provision one or more worker nodes ### AWS diff --git a/docs/getting-started-guides/docker-multinode.md b/docs/getting-started-guides/docker-multinode.md index 3cd368a0ead..60787ac7105 100644 --- a/docs/getting-started-guides/docker-multinode.md +++ b/docs/getting-started-guides/docker-multinode.md @@ -52,10 +52,7 @@ Please install Docker 1.6.2 or Docker 1.7.1. ## Prerequisites -1. You need a machine with docker installed. - -There is a [bug](https://github.com/docker/docker/issues/14106) in Docker 1.7.0 that prevents this guide from working correctly. -Please install Docker 1.6.2 or wait for Docker 1.7.1. +1. You need a machine with docker of right version installed. ## Overview diff --git a/docs/getting-started-guides/docker-multinode/testing.md b/docs/getting-started-guides/docker-multinode/testing.md index 90bb90e4cee..1f4af152fec 100644 --- a/docs/getting-started-guides/docker-multinode/testing.md +++ b/docs/getting-started-guides/docker-multinode/testing.md @@ -79,7 +79,7 @@ curl Note that you will need run this curl command on your boot2docker VM if you are running on OS X. -### Scaling +### Scaling Now try to scale up the nginx you created before: diff --git a/docs/getting-started-guides/docker.md b/docs/getting-started-guides/docker.md index 1d744f3e7d7..17e0d0402bf 100644 --- a/docs/getting-started-guides/docker.md +++ b/docs/getting-started-guides/docker.md @@ -108,7 +108,7 @@ docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1 ### Test it out -At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl +At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl binary ([OS X](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/darwin/amd64/kubectl)) ([linux](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl)) diff --git a/docs/getting-started-guides/fedora/fedora_manual_config.md b/docs/getting-started-guides/fedora/fedora_manual_config.md index f400311027d..3b1e521566b 100644 --- a/docs/getting-started-guides/fedora/fedora_manual_config.md +++ b/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -60,9 +60,9 @@ fed-node = 192.168.121.65 ``` **Prepare the hosts:** - + * Install Kubernetes on all hosts - fed-{master,node}. This will also pull in docker. Also install etcd on fed-master. This guide has been tested with kubernetes-0.18 and beyond. -* The [--enablerepo=update-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive. +* The [--enablerepo=update-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive. * If you want the very latest Kubernetes release [you can download and yum install the RPM directly from Fedora Koji](http://koji.fedoraproject.org/koji/packageinfo?packageID=19202) instead of using the yum install command below. ```sh diff --git a/docs/getting-started-guides/juju.md b/docs/getting-started-guides/juju.md index 2d580ab7c6d..1fe1bee538b 100644 --- a/docs/getting-started-guides/juju.md +++ b/docs/getting-started-guides/juju.md @@ -262,10 +262,10 @@ works with [Amazon Web Service](https://jujucharms.com/docs/stable/config-aws), [Vmware vSphere](https://jujucharms.com/docs/stable/config-vmware). If you do not see your favorite cloud provider listed many clouds can be -configured for [manual provisioning](https://jujucharms.com/docs/stable/config-manual). +configured for [manual provisioning](https://jujucharms.com/docs/stable/config-manual). The Kubernetes bundle has been tested on GCE and AWS and found to work with -version 1.0.0. +version 1.0.0. diff --git a/docs/getting-started-guides/logging.md b/docs/getting-started-guides/logging.md index fabcbf93df8..646b741bdca 100644 --- a/docs/getting-started-guides/logging.md +++ b/docs/getting-started-guides/logging.md @@ -74,7 +74,7 @@ spec: ``` [Download example](../../examples/blog-logging/counter-pod.yaml) - + This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let’s create the pod in the default namespace. @@ -92,7 +92,7 @@ NAME READY STATUS RESTARTS AG counter 1/1 Running 0 5m ``` -This step may take a few minutes to download the ubuntu:14.04 image during which the pod status will be shown as `Pending`. +This step may take a few minutes to download the ubuntu:14.04 image during which the pod status will be shown as `Pending`. One of the nodes is now running the counter pod: @@ -169,7 +169,7 @@ metadata: spec: containers: - name: fluentd-cloud-logging - image: gcr.io/google_containers/fluentd-gcp:1.9 + image: gcr.io/google_containers/fluentd-gcp:1.10 resources: limits: cpu: 100m @@ -192,7 +192,7 @@ spec: ``` [Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) - + This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it. diff --git a/docs/getting-started-guides/scratch.md b/docs/getting-started-guides/scratch.md index b085f83c60f..5e58b9a485b 100644 --- a/docs/getting-started-guides/scratch.md +++ b/docs/getting-started-guides/scratch.md @@ -92,7 +92,7 @@ steps that existing cluster setup scripts are making. ## Designing and Preparing -### Learning +### Learning 1. You should be familiar with using Kubernetes already. We suggest you set up a temporary cluster by following one of the other Getting Started Guides. @@ -108,7 +108,7 @@ an interface for managing TCP Load Balancers, Nodes (Instances) and Networking R The interface is defined in `pkg/cloudprovider/cloud.go`. It is possible to create a custom cluster without implementing a cloud provider (for example if using bare-metal), and not all parts of the interface need to be implemented, depending -on how flags are set on various components. +on how flags are set on various components. ### Nodes @@ -220,13 +220,13 @@ all the necessary binaries. #### Selecting Images You will run docker, kubelet, and kube-proxy outside of a container, the same way you would run any system daemon, so -you just need the bare binaries. For etcd, kube-apiserver, kube-controller-manager, and kube-scheduler, +you just need the bare binaries. For etcd, kube-apiserver, kube-controller-manager, and kube-scheduler, we recommend that you run these as containers, so you need an image to be built. You have several choices for Kubernetes images: - Use images hosted on Google Container Registry (GCR): - e.g `gcr.io/google_containers/kube-apiserver:$TAG`, where `TAG` is the latest - release tag, which can be found on the [latest releases page](https://github.com/GoogleCloudPlatform/kubernetes/releases/latest). + release tag, which can be found on the [latest releases page](https://github.com/GoogleCloudPlatform/kubernetes/releases/latest). - Ensure $TAG is the same tag as the release tag you are using for kubelet and kube-proxy. - Build your own images. - Useful if you are using a private registry. @@ -294,7 +294,7 @@ You will end up with the following files (we will use these variables later on) #### Preparing Credentials The admin user (and any users) need: - - a token or a password to identify them. + - a token or a password to identify them. - tokens are just long alphanumeric strings, e.g. 32 chars. See - `TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)` @@ -318,7 +318,7 @@ The kubeconfig file for the administrator can be created as follows: - `kubectl config set-context $CONTEXT_NAME --cluster=$CLUSTER_NAME --user=$USER` - `kubectl config use-context $CONTEXT_NAME` -Next, make a kubeconfig file for the kubelets and kube-proxy. There are a couple of options for how +Next, make a kubeconfig file for the kubelets and kube-proxy. There are a couple of options for how many distinct files to make: 1. Use the same credential as the admin - This is simplest to setup. @@ -355,7 +355,7 @@ guide assume that there are kubeconfigs in `/var/lib/kube-proxy/kubeconfig` and ## Configuring and Installing Base Software on Nodes -This section discusses how to configure machines to be Kubernetes nodes. +This section discusses how to configure machines to be Kubernetes nodes. You should run three daemons on every node: - docker or rkt @@ -395,7 +395,7 @@ so that kube-proxy can manage iptables instead of docker. - if you are using an overlay network, consult those instructions. - `--mtu=` - may be required when using Flannel, because of the extra packet size due to udp encapsulation - - `--insecure-registry $CLUSTER_SUBNET` + - `--insecure-registry $CLUSTER_SUBNET` - to connect to a private registry, if you set one up, without using SSL. You may want to increase the number of open files for docker: @@ -412,7 +412,7 @@ installation, by following examples given in the Docker documentation. The minimum version required is [v0.5.6](https://github.com/coreos/rkt/releases/tag/v0.5.6). [systemd](http://www.freedesktop.org/wiki/Software/systemd/) is required on your node to run rkt. The -minimum version required to match rkt v0.5.6 is +minimum version required to match rkt v0.5.6 is [systemd 215](http://lists.freedesktop.org/archives/systemd-devel/2014-July/020903.html). [rkt metadata service](https://github.com/coreos/rkt/blob/master/Documentation/networking.md) is also required @@ -444,7 +444,7 @@ Arguments to consider: All nodes should run kube-proxy. (Running kube-proxy on a "master" node is not strictly required, but being consistent is easier.) Obtain a binary as described for -kubelet. +kubelet. Arguments to consider: - If following the HTTPS security approach: @@ -456,7 +456,7 @@ Arguments to consider: ### Networking Each node needs to be allocated its own CIDR range for pod networking. -Call this `NODE_X_POD_CIDR`. +Call this `NODE_X_POD_CIDR`. A bridge called `cbr0` needs to be created on each node. The bridge is explained further in the [networking documentation](../admin/networking.md). The bridge itself @@ -498,7 +498,7 @@ NOTE: This is environment specific. Some environments will not need any masquerading at all. Others, such as GCE, will not allow pod IPs to send traffic to the internet, but have no problem with them inside your GCE Project. -### Other +### Other - Enable auto-upgrades for your OS package manager, if desired. - Configure log rotation for all node components (e.g. using [logrotate](http://linux.die.net/man/8/logrotate)). @@ -529,7 +529,7 @@ You will need to run one or more instances of etcd. - Recommended approach: run one etcd instance, with its log written to a directory backed by durable storage (RAID, GCE PD) - Alternative: run 3 or 5 etcd instances. - - Log can be written to non-durable storage because storage is replicated. + - Log can be written to non-durable storage because storage is replicated. - run a single apiserver which connects to one of the etc nodes. See [cluster-troubleshooting](../admin/cluster-troubleshooting.md) for more discussion on factors affecting cluster availability. diff --git a/docs/getting-started-guides/ubuntu-calico.md b/docs/getting-started-guides/ubuntu-calico.md index 1ae8830d605..35a2736e314 100644 --- a/docs/getting-started-guides/ubuntu-calico.md +++ b/docs/getting-started-guides/ubuntu-calico.md @@ -49,7 +49,7 @@ On the Master: On each Node: - `kube-proxy` - `kube-kubelet` -- `calico-node` +- `calico-node` ## Prerequisites @@ -191,7 +191,7 @@ node-X | 192.168.X.1/24 #### Start docker on cbr0 -The Docker daemon must be started and told to use the already configured cbr0 instead of using the usual docker0, as well as disabling ip-masquerading and modification of the ip-tables. +The Docker daemon must be started and told to use the already configured cbr0 instead of using the usual docker0, as well as disabling ip-masquerading and modification of the ip-tables. 1.) Edit the ubuntu-15.04 docker.service for systemd at: `/lib/systemd/system/docker.service` diff --git a/docs/getting-started-guides/ubuntu.md b/docs/getting-started-guides/ubuntu.md index b6b3b0d16d8..ba5307d94dc 100644 --- a/docs/getting-started-guides/ubuntu.md +++ b/docs/getting-started-guides/ubuntu.md @@ -49,7 +49,7 @@ This document describes how to deploy Kubernetes on ubuntu nodes, including 1 Ku ## Prerequisites -*1 The nodes have installed docker version 1.2+ and bridge-utils to manipulate linux bridge* +*1 The nodes have installed docker version 1.2+ and bridge-utils to manipulate linux bridge* *2 All machines can communicate with each other, no need to connect Internet (should use private docker registry in this case)* @@ -57,7 +57,7 @@ This document describes how to deploy Kubernetes on ubuntu nodes, including 1 Ku *4 Dependencies of this guide: etcd-2.0.12, flannel-0.4.0, k8s-1.0.1, but it may work with higher versions* -*5 All the remote servers can be ssh logged in without a password by using key authentication* +*5 All the remote servers can be ssh logged in without a password by using key authentication* ### Starting a Cluster @@ -80,7 +80,7 @@ Please make sure that there are `kube-apiserver`, `kube-controller-manager`, `ku An example cluster is listed as below: -| IP Address|Role | +| IP Address|Role | |---------|------| |10.10.103.223| node | |10.10.103.162| node | @@ -112,13 +112,13 @@ The `SERVICE_CLUSTER_IP_RANGE` variable defines the Kubernetes service IP range. 172.16.0.0 - 172.31.255.255 (172.16/12 prefix) - 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) + 192.168.0.0 - 192.168.255.255 (192.168/16 prefix) The `FLANNEL_NET` variable defines the IP range used for flannel overlay network, should not conflict with above `SERVICE_CLUSTER_IP_RANGE`. After all the above variables being set correctly, we can use following command in cluster/ directory to bring up the whole cluster. -`$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh` +`$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh` The scripts automatically scp binaries and config files to all the machines and start the k8s service on them. The only thing you need to do is to type the sudo password when promoted. The current machine name is shown below, so you will not type in the wrong password. @@ -135,9 +135,9 @@ If all things goes right, you will see the below message from console **All done !** -You can also use `kubectl` command to see if the newly created k8s is working correctly. The `kubectl` binary is under the `cluster/ubuntu/binaries` directory. You can move it into your PATH. Then you can use the below command smoothly. +You can also use `kubectl` command to see if the newly created k8s is working correctly. The `kubectl` binary is under the `cluster/ubuntu/binaries` directory. You can move it into your PATH. Then you can use the below command smoothly. -For example, use `$ kubectl get nodes` to see if all your nodes are in ready status. It may take some time for the nodes ready to use like below. +For example, use `$ kubectl get nodes` to see if all your nodes are in ready status. It may take some time for the nodes ready to use like below. ```console NAME LABELS STATUS @@ -192,19 +192,19 @@ We are working on these features which we'd like to let everybody know: #### Trouble Shooting -Generally, what this approach did is quite simple: +Generally, what this approach did is quite simple: 1. Download and copy binaries and configuration files to proper directories on every node -2. Configure `etcd` using IPs based on input from user +2. Configure `etcd` using IPs based on input from user 3. Create and start flannel network -So, if you see a problem, **check etcd configuration first** +So, if you see a problem, **check etcd configuration first** Please try: -1. Check `/var/log/upstart/etcd.log` for suspicious etcd log +1. Check `/var/log/upstart/etcd.log` for suspicious etcd log 2. Check `/etc/default/etcd`, as we do not have much input validation, a right config should be like: @@ -212,11 +212,11 @@ Please try: ETCD_OPTS="-name infra1 -initial-advertise-peer-urls -listen-peer-urls -initial-cluster-token etcd-cluster-1 -initial-cluster infra1=,infra2=,infra3= -initial-cluster-state new" ``` -3. You can use below command +3. You can use below command `$ KUBERNETES_PROVIDER=ubuntu ./kube-down.sh` to bring down the cluster and run `$ KUBERNETES_PROVIDER=ubuntu ./kube-up.sh` again to start again. - -4. You can also customize your own settings in `/etc/default/{component_name}` after configured success. + +4. You can also customize your own settings in `/etc/default/{component_name}` after configured success. diff --git a/docs/man/man1/.files_generated b/docs/man/man1/.files_generated index 4156725b069..76219f56480 100644 --- a/docs/man/man1/.files_generated +++ b/docs/man/man1/.files_generated @@ -1,4 +1,5 @@ kubectl-api-versions.1 +kubectl-attach.1 kubectl-cluster-info.1 kubectl-config-set-cluster.1 kubectl-config-set-context.1 diff --git a/docs/man/man1/kubectl-attach.1 b/docs/man/man1/kubectl-attach.1 new file mode 100644 index 00000000000..861cfb8b2a7 --- /dev/null +++ b/docs/man/man1/kubectl-attach.1 @@ -0,0 +1,161 @@ +.TH "KUBERNETES" "1" " kubernetes User Manuals" "Eric Paris" "Jan 2015" "" + + +.SH NAME +.PP +kubectl attach \- Attach to a running container. + + +.SH SYNOPSIS +.PP +\fBkubectl attach\fP [OPTIONS] + + +.SH DESCRIPTION +.PP +Attach to a a process that is already running inside an existing container. + + +.SH OPTIONS +.PP +\fB\-c\fP, \fB\-\-container\fP="" + Container name + +.PP +\fB\-h\fP, \fB\-\-help\fP=false + help for attach + +.PP +\fB\-i\fP, \fB\-\-stdin\fP=false + Pass stdin to the container + +.PP +\fB\-t\fP, \fB\-\-tty\fP=false + Stdin is a TTY + + +.SH OPTIONS INHERITED FROM PARENT COMMANDS +.PP +\fB\-\-alsologtostderr\fP=false + log to standard error as well as files + +.PP +\fB\-\-api\-version\fP="" + The API version to use when talking to the server + +.PP +\fB\-\-certificate\-authority\fP="" + Path to a cert. file for the certificate authority. + +.PP +\fB\-\-client\-certificate\fP="" + Path to a client key file for TLS. + +.PP +\fB\-\-client\-key\fP="" + Path to a client key file for TLS. + +.PP +\fB\-\-cluster\fP="" + The name of the kubeconfig cluster to use + +.PP +\fB\-\-context\fP="" + The name of the kubeconfig context to use + +.PP +\fB\-\-insecure\-skip\-tls\-verify\fP=false + If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. + +.PP +\fB\-\-kubeconfig\fP="" + Path to the kubeconfig file to use for CLI requests. + +.PP +\fB\-\-log\-backtrace\-at\fP=:0 + when logging hits line file:N, emit a stack trace + +.PP +\fB\-\-log\-dir\fP="" + If non\-empty, write log files in this directory + +.PP +\fB\-\-log\-flush\-frequency\fP=5s + Maximum number of seconds between log flushes + +.PP +\fB\-\-logtostderr\fP=true + log to standard error instead of files + +.PP +\fB\-\-match\-server\-version\fP=false + Require server version to match client version + +.PP +\fB\-\-namespace\fP="" + If present, the namespace scope for this CLI request. + +.PP +\fB\-\-password\fP="" + Password for basic authentication to the API server. + +.PP +\fB\-s\fP, \fB\-\-server\fP="" + The address and port of the Kubernetes API server + +.PP +\fB\-\-stderrthreshold\fP=2 + logs at or above this threshold go to stderr + +.PP +\fB\-\-token\fP="" + Bearer token for authentication to the API server. + +.PP +\fB\-\-user\fP="" + The name of the kubeconfig user to use + +.PP +\fB\-\-username\fP="" + Username for basic authentication to the API server. + +.PP +\fB\-\-v\fP=0 + log level for V logs + +.PP +\fB\-\-validate\fP=false + If true, use a schema to validate the input before sending it + +.PP +\fB\-\-vmodule\fP= + comma\-separated list of pattern=N settings for file\-filtered logging + + +.SH EXAMPLE +.PP +.RS + +.nf +// get output from running pod 123456\-7890, using the first container by default +$ kubectl attach 123456\-7890 + +// get output from ruby\-container from pod 123456\-7890 +$ kubectl attach 123456\-7890 \-c ruby\-container date + +// switch to raw terminal mode, sends stdin to 'bash' in ruby\-container from pod 123456\-780 +// and sends stdout/stderr from 'bash' back to the client +$ kubectl attach 123456\-7890 \-c ruby\-container \-i \-t + +.fi +.RE + + +.SH SEE ALSO +.PP +\fBkubectl(1)\fP, + + +.SH HISTORY +.PP +January 2015, Originally compiled by Eric Paris (eparis at redhat dot com) based on the kubernetes source material, but hopefully they have been automatically generated since! diff --git a/docs/man/man1/kubectl-describe.1 b/docs/man/man1/kubectl-describe.1 index 708a03e369a..5a4766103de 100644 --- a/docs/man/man1/kubectl-describe.1 +++ b/docs/man/man1/kubectl-describe.1 @@ -28,9 +28,9 @@ exists, it will output details for every resource that has a name prefixed with .PP Possible resources include (case insensitive): pods (po), services (svc), -replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), -limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota) or secrets. +replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), +persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), +namespaces (ns) or secrets. .SH OPTIONS diff --git a/docs/man/man1/kubectl-get.1 b/docs/man/man1/kubectl-get.1 index 3b198285c7d..7bbe6ac2cf6 100644 --- a/docs/man/man1/kubectl-get.1 +++ b/docs/man/man1/kubectl-get.1 @@ -19,7 +19,7 @@ Display one or many resources. Possible resources include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota) or secrets. +resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets. .PP By specifying the output as 'template' and providing a Go template as the value diff --git a/docs/man/man1/kubectl-scale.1 b/docs/man/man1/kubectl-scale.1 index 03f3d12be89..81d0ea236f6 100644 --- a/docs/man/man1/kubectl-scale.1 +++ b/docs/man/man1/kubectl-scale.1 @@ -43,6 +43,10 @@ scale is sent to the server. \fB\-\-resource\-version\fP="" Precondition for resource version. Requires that the current resource version match this value in order to scale. +.PP +\fB\-\-timeout\fP=0 + The length of time to wait before giving up on a scale operation, zero means don't wait. + .SH OPTIONS INHERITED FROM PARENT COMMANDS .PP @@ -153,6 +157,9 @@ $ kubectl scale \-\-replicas=3 replicationcontrollers foo // If the replication controller named foo's current size is 2, scale foo to 3. $ kubectl scale \-\-current\-replicas=2 \-\-replicas=3 replicationcontrollers foo +// Scale multiple replication controllers. +$ kubectl scale \-\-replicas=5 rc/foo rc/bar + .fi .RE diff --git a/docs/man/man1/kubectl.1 b/docs/man/man1/kubectl.1 index f6969202a60..e2dd746bd6d 100644 --- a/docs/man/man1/kubectl.1 +++ b/docs/man/man1/kubectl.1 @@ -124,7 +124,7 @@ Find more information at .SH SEE ALSO .PP -\fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-replace(1)\fP, \fBkubectl\-patch(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-logs(1)\fP, \fBkubectl\-rolling\-update(1)\fP, \fBkubectl\-scale(1)\fP, \fBkubectl\-exec(1)\fP, \fBkubectl\-port\-forward(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-run(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-cluster\-info(1)\fP, \fBkubectl\-api\-versions(1)\fP, \fBkubectl\-version(1)\fP, +\fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-replace(1)\fP, \fBkubectl\-patch(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-logs(1)\fP, \fBkubectl\-rolling\-update(1)\fP, \fBkubectl\-scale(1)\fP, \fBkubectl\-attach(1)\fP, \fBkubectl\-exec(1)\fP, \fBkubectl\-port\-forward(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-run(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-cluster\-info(1)\fP, \fBkubectl\-api\-versions(1)\fP, \fBkubectl\-version(1)\fP, .SH HISTORY diff --git a/docs/proposals/apiserver_watch.md b/docs/proposals/apiserver_watch.md index ce866b6d686..5610ccbc68c 100644 --- a/docs/proposals/apiserver_watch.md +++ b/docs/proposals/apiserver_watch.md @@ -163,7 +163,7 @@ resource type. However, this watch can potentially expire at any time and reconnecting can return "too old resource version". In that case relisting is necessary. In such case, to avoid LIST requests coming from all watchers at the same time, we can introduce an additional etcd event type: -[EtcdResync](../../pkg/tools/etcd_watcher.go#L36) +[EtcdResync](../../pkg/storage/etcd/etcd_watcher.go#L36) Whenever reslisting will be done to refresh the internal watch to etcd, EtcdResync event will be send to all the watchers. It will contain the diff --git a/docs/proposals/autoscaling.md b/docs/proposals/autoscaling.md index 86a9a819e45..ff50aa97f6b 100644 --- a/docs/proposals/autoscaling.md +++ b/docs/proposals/autoscaling.md @@ -34,7 +34,7 @@ Documentation for other releases can be found at ## Abstract Auto-scaling is a data-driven feature that allows users to increase or decrease capacity as needed by controlling the -number of pods deployed within the system automatically. +number of pods deployed within the system automatically. ## Motivation @@ -49,7 +49,7 @@ done automatically based on statistical analysis and thresholds. * Scale verb - [1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629) * Config conflicts - [Config](https://github.com/GoogleCloudPlatform/kubernetes/blob/c7cb991987193d4ca33544137a5cb7d0292cf7df/docs/config.md#automated-re-configuration-processes) * Rolling updates - [1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353) - * Multiple scalable types - [1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624) + * Multiple scalable types - [1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624) ## Constraints and Assumptions @@ -77,7 +77,7 @@ balanced or situated behind a proxy - the data from those proxies and load balan server traffic for applications. This is the primary, but not sole, source of data for making decisions. Within Kubernetes a [kube proxy](../user-guide/services.md#ips-and-vips) -running on each node directs service requests to the underlying implementation. +running on each node directs service requests to the underlying implementation. While the proxy provides internal inter-pod connections, there will be L3 and L7 proxies and load balancers that manage traffic to backends. OpenShift, for instance, adds a "route" resource for defining external to internal traffic flow. @@ -87,7 +87,7 @@ data source for the number of backends. ### Scaling based on predictive analysis Scaling may also occur based on predictions of system state like anticipated load, historical data, etc. Hand in hand -with scaling based on traffic, predictive analysis may be used to determine anticipated system load and scale the application automatically. +with scaling based on traffic, predictive analysis may be used to determine anticipated system load and scale the application automatically. ### Scaling based on arbitrary data @@ -113,7 +113,7 @@ use a client/cache implementation to receive watch data from the data aggregator scaling the application. Auto-scalers are created and defined like other resources via REST endpoints and belong to the namespace just as a `ReplicationController` or `Service`. -Since an auto-scaler is a durable object it is best represented as a resource. +Since an auto-scaler is a durable object it is best represented as a resource. ```go //The auto scaler interface @@ -241,7 +241,7 @@ be specified as "when requests per second fall below 25 for 30 seconds scale the ### Data Aggregator This section has intentionally been left empty. I will defer to folks who have more experience gathering and analyzing -time series statistics. +time series statistics. Data aggregation is opaque to the auto-scaler resource. The auto-scaler is configured to use `AutoScaleThresholds` that know how to work with the underlying data in order to know if an application must be scaled up or down. Data aggregation @@ -257,7 +257,7 @@ potentially piggyback on this registry. If multiple scalable targets satisfy the `TargetSelector` criteria the auto-scaler should be configurable as to which target(s) are scaled. To begin with, if multiple targets are found the auto-scaler will scale the largest target up -or down as appropriate. In the future this may be more configurable. +or down as appropriate. In the future this may be more configurable. ### Interactions with a deployment @@ -266,12 +266,12 @@ there will be multiple replication controllers, with one scaling up and another auto-scaler must be aware of the entire set of capacity that backs a service so it does not fight with the deployer. `AutoScalerSpec.MonitorSelector` is what provides this ability. By using a selector that spans the entire service the auto-scaler can monitor capacity of multiple replication controllers and check that capacity against the `AutoScalerSpec.MaxAutoScaleCount` and -`AutoScalerSpec.MinAutoScaleCount` while still only targeting a specific set of `ReplicationController`s with `TargetSelector`. +`AutoScalerSpec.MinAutoScaleCount` while still only targeting a specific set of `ReplicationController`s with `TargetSelector`. In the course of a deployment it is up to the deployment orchestration to decide how to manage the labels on the replication controllers if it needs to ensure that only specific replication controllers are targeted by the auto-scaler. By default, the auto-scaler will scale the largest replication controller that meets the target label -selector criteria. +selector criteria. During deployment orchestration the auto-scaler may be making decisions to scale its target up or down. In order to prevent the scaler from fighting with a deployment process that is scaling one replication controller up and scaling another one diff --git a/docs/proposals/federation.md b/docs/proposals/federation.md index 99dbe90400e..1845e9eb610 100644 --- a/docs/proposals/federation.md +++ b/docs/proposals/federation.md @@ -31,17 +31,17 @@ Documentation for other releases can be found at -# Kubernetes Cluster Federation +# Kubernetes Cluster Federation ## (a.k.a. "Ubernetes") ## Requirements Analysis and Product Proposal -## _by Quinton Hoole ([quinton@google.com](mailto:quinton@google.com))_ +## _by Quinton Hoole ([quinton@google.com](mailto:quinton@google.com))_ -_Initial revision: 2015-03-05_ -_Last updated: 2015-03-09_ -This doc: [tinyurl.com/ubernetesv2](http://tinyurl.com/ubernetesv2) +_Initial revision: 2015-03-05_ +_Last updated: 2015-03-09_ +This doc: [tinyurl.com/ubernetesv2](http://tinyurl.com/ubernetesv2) Slides: [tinyurl.com/ubernetes-slides](http://tinyurl.com/ubernetes-slides) ## Introduction @@ -89,11 +89,11 @@ loosely speaking, a cluster can be thought of as running in a single data center, or cloud provider availability zone, a more precise definition is that each cluster provides: -1. a single Kubernetes API entry point, +1. a single Kubernetes API entry point, 1. a consistent, cluster-wide resource naming scheme 1. a scheduling/container placement domain 1. a service network routing domain -1. (in future) an authentication and authorization model. +1. (in future) an authentication and authorization model. 1. .... The above in turn imply the need for a relatively performant, reliable @@ -220,7 +220,7 @@ the multi-cloud provider implementation should just work for a single cloud provider). Propose high-level design catering for both, with initial implementation targeting single cloud provider only. -**Clarifying questions:** +**Clarifying questions:** **How does global external service discovery work?** In the steady state, which external clients connect to which clusters? GeoDNS or similar? What is the tolerable failover latency if a cluster goes @@ -266,8 +266,8 @@ Doing nothing (i.e. forcing users to choose between 1 and 2 on their own) is probably an OK starting point. Kubernetes autoscaling can get us to 3 at some later date. -Up to this point, this use case ("Unavailability Zones") seems materially different from all the others above. It does not require dynamic cross-cluster service migration (we assume that the service is already running in more than one cluster when the failure occurs). Nor does it necessarily involve cross-cluster service discovery or location affinity. As a result, I propose that we address this use case somewhat independently of the others (although I strongly suspect that it will become substantially easier once we've solved the others). - +Up to this point, this use case ("Unavailability Zones") seems materially different from all the others above. It does not require dynamic cross-cluster service migration (we assume that the service is already running in more than one cluster when the failure occurs). Nor does it necessarily involve cross-cluster service discovery or location affinity. As a result, I propose that we address this use case somewhat independently of the others (although I strongly suspect that it will become substantially easier once we've solved the others). + All of the above (regarding "Unavailibility Zones") refers primarily to already-running user-facing services, and minimizing the impact on end users of those services becoming unavailable in a given cluster. @@ -322,7 +322,7 @@ location affinity: (other than the source of YouTube videos, which is assumed to be equally remote from all clusters in this example). Each pod can be scheduled independently, in any cluster, and moved at any time. -1. **"Preferentially Coupled"**: Somewhere between Coupled and Decoupled. These applications prefer to have all of their pods located in the same cluster (e.g. for failure correlation, network latency or bandwidth cost reasons), but can tolerate being partitioned for "short" periods of time (for example while migrating the application from one cluster to another). Most small to medium sized LAMP stacks with not-very-strict latency goals probably fall into this category (provided that they use sane service discovery and reconnect-on-fail, which they need to do anyway to run effectively, even in a single Kubernetes cluster). +1. **"Preferentially Coupled"**: Somewhere between Coupled and Decoupled. These applications prefer to have all of their pods located in the same cluster (e.g. for failure correlation, network latency or bandwidth cost reasons), but can tolerate being partitioned for "short" periods of time (for example while migrating the application from one cluster to another). Most small to medium sized LAMP stacks with not-very-strict latency goals probably fall into this category (provided that they use sane service discovery and reconnect-on-fail, which they need to do anyway to run effectively, even in a single Kubernetes cluster). And then there's what I'll call _absolute_ location affinity. Some applications are required to run in bounded geographical or network @@ -341,7 +341,7 @@ of our users are in Western Europe, U.S. West Coast" etc). ## Cross-cluster service discovery -I propose having pods use standard discovery methods used by external clients of Kubernetes applications (i.e. DNS). DNS might resolve to a public endpoint in the local or a remote cluster. Other than Strictly Coupled applications, software should be largely oblivious of which of the two occurs. +I propose having pods use standard discovery methods used by external clients of Kubernetes applications (i.e. DNS). DNS might resolve to a public endpoint in the local or a remote cluster. Other than Strictly Coupled applications, software should be largely oblivious of which of the two occurs. _Aside:_ How do we avoid "tromboning" through an external VIP when DNS resolves to a public IP on the local cluster? Strictly speaking this would be an optimization, and probably only matters to high bandwidth, @@ -384,15 +384,15 @@ such events include: 1. A change of scheduling policy ("we no longer use cloud provider X"). 1. A change of resource pricing ("cloud provider Y dropped their prices - lets migrate there"). -Strictly Decoupled applications can be trivially moved, in part or in whole, one pod at a time, to one or more clusters. -For Preferentially Decoupled applications, the federation system must first locate a single cluster with sufficient capacity to accommodate the entire application, then reserve that capacity, and incrementally move the application, one (or more) resources at a time, over to the new cluster, within some bounded time period (and possibly within a predefined "maintenance" window). +Strictly Decoupled applications can be trivially moved, in part or in whole, one pod at a time, to one or more clusters. +For Preferentially Decoupled applications, the federation system must first locate a single cluster with sufficient capacity to accommodate the entire application, then reserve that capacity, and incrementally move the application, one (or more) resources at a time, over to the new cluster, within some bounded time period (and possibly within a predefined "maintenance" window). Strictly Coupled applications (with the exception of those deemed completely immovable) require the federation system to: 1. start up an entire replica application in the destination cluster 1. copy persistent data to the new application instance 1. switch traffic across -1. tear down the original application instance +1. tear down the original application instance It is proposed that support for automated migration of Strictly Coupled applications be deferred to a later date. @@ -422,11 +422,11 @@ TBD: All very hand-wavey still, but some initial thoughts to get the conversatio ## Ubernetes API -This looks a lot like the existing Kubernetes API but is explicitly multi-cluster. +This looks a lot like the existing Kubernetes API but is explicitly multi-cluster. -+ Clusters become first class objects, which can be registered, listed, described, deregistered etc via the API. -+ Compute resources can be explicitly requested in specific clusters, or automatically scheduled to the "best" cluster by Ubernetes (by a pluggable Policy Engine). -+ There is a federated equivalent of a replication controller type, which is multicluster-aware, and delegates to cluster-specific replication controllers as required (e.g. a federated RC for n replicas might simply spawn multiple replication controllers in different clusters to do the hard work). ++ Clusters become first class objects, which can be registered, listed, described, deregistered etc via the API. ++ Compute resources can be explicitly requested in specific clusters, or automatically scheduled to the "best" cluster by Ubernetes (by a pluggable Policy Engine). ++ There is a federated equivalent of a replication controller type, which is multicluster-aware, and delegates to cluster-specific replication controllers as required (e.g. a federated RC for n replicas might simply spawn multiple replication controllers in different clusters to do the hard work). + These federated replication controllers (and in fact all the services comprising the Ubernetes Control Plane) have to run somewhere. For high availability Ubernetes deployments, these diff --git a/docs/reporting-security-issues.md b/docs/reporting-security-issues.md index 4081d9d42fd..d2948c75912 100644 --- a/docs/reporting-security-issues.md +++ b/docs/reporting-security-issues.md @@ -33,21 +33,21 @@ Documentation for other releases can be found at # Security -If you believe you have discovered a vulnerability or a have a security incident to report, please follow the steps below. This applies to Kubernetes releases v1.0 or later. +If you believe you have discovered a vulnerability or a have a security incident to report, please follow the steps below. This applies to Kubernetes releases v1.0 or later. -To watch for security and major API announcements, please join our [kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce) group. +To watch for security and major API announcements, please join our [kubernetes-announce](https://groups.google.com/forum/#!forum/kubernetes-announce) group. ## Reporting a security issue To report an issue, please: - Submit a bug report [here](http://goo.gl/vulnz). - Select “I want to report a technical security bug in a Google product (SQLi, XSS, etc.).” - - Select “Other” as the Application Type. + - Select “Other” as the Application Type. - Under reproduction steps, please additionally include - the words "Kubernetes Security issue" - Description of the issue - Kubernetes release (e.g. output of `kubectl version` command, which includes server version.) - - Environment setup (e.g. which "Getting Started Guide" you followed, if any; what node operating system used; what service or software creates your virtual machines, if any) + - Environment setup (e.g. which "Getting Started Guide" you followed, if any; what node operating system used; what service or software creates your virtual machines, if any) An online submission will have the fastest response; however, if you prefer email, please send mail to security@google.com. If you feel the need, please use the [PGP public key](https://services.google.com/corporate/publickey.txt) to encrypt communications. diff --git a/docs/user-guide/accessing-the-cluster.md b/docs/user-guide/accessing-the-cluster.md index 474d8dd8af8..83709c6bc96 100644 --- a/docs/user-guide/accessing-the-cluster.md +++ b/docs/user-guide/accessing-the-cluster.md @@ -150,7 +150,7 @@ There are [client libraries](../devel/client-libraries.md) for accessing the API from several languages. The Kubernetes project-supported [Go](http://releases.k8s.io/HEAD/pkg/client/) client library can use the same [kubeconfig file](kubeconfig-file.md) -as the kubectl CLI does to locate and authenticate to the apiserver. +as the kubectl CLI does to locate and authenticate to the apiserver. See documentation for other libraries for how they authenticate. @@ -241,7 +241,7 @@ at `https://104.197.5.247/api/v1/proxy/namespaces/kube-system/services/elasticse #### Manually constructing apiserver proxy URLs -As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL: +As mentioned above, you use the `kubectl cluster-info` command to retrieve the service's proxy URL. To create proxy URLs that include service endpoints, suffixes, and parameters, you simply append to the service's proxy URL: `http://`*`kubernetes_master_address`*`/`*`service_path`*`/`*`service_name`*`/`*`service_endpoint-suffix-parameter`* @@ -297,7 +297,7 @@ There are several different proxies you may encounter when using Kubernetes: - can be used to reach a Node, Pod, or Service - does load balancing when used to reach a Service 1. The [kube proxy](services.md#ips-and-vips): - - runs on each node + - runs on each node - proxies UDP and TCP - does not understand HTTP - provides load balancing diff --git a/docs/user-guide/application-troubleshooting.md b/docs/user-guide/application-troubleshooting.md index 416c8232497..aed40221cfd 100644 --- a/docs/user-guide/application-troubleshooting.md +++ b/docs/user-guide/application-troubleshooting.md @@ -87,7 +87,7 @@ there are insufficient resources of one type or another that prevent scheduling. your pod. Reasons include: * **You don't have enough resources**: You may have exhausted the supply of CPU or Memory in your cluster, in this case -you need to delete Pods, adjust resource requests, or add new nodes to your cluster. See [Compute Resources document](compute-resources.md#my-pods-are-pending-with-event-message-failedscheduling) for more information. +you need to delete Pods, adjust resource requests, or add new nodes to your cluster. See [Compute Resources document](compute-resources.md#my-pods-are-pending-with-event-message-failedscheduling) for more information. * **You are using `hostPort`**: When you bind a Pod to a `hostPort` there are a limited number of places that pod can be scheduled. In most cases, `hostPort` is unnecessary, try using a Service object to expose your Pod. If you do require @@ -100,7 +100,7 @@ If a Pod is stuck in the `Waiting` state, then it has been scheduled to a worker Again, the information from `kubectl describe ...` should be informative. The most common cause of `Waiting` pods is a failure to pull the image. There are three things to check: * Make sure that you have the name of the image correct * Have you pushed the image to the repository? -* Run a manual `docker pull ` on your machine to see if the image can be pulled. +* Run a manual `docker pull ` on your machine to see if the image can be pulled. #### My pod is crashing or otherwise unhealthy @@ -139,7 +139,7 @@ feature request on GitHub describing your use case and why these tools are insuf ### Debugging Replication Controllers Replication controllers are fairly straightforward. They can either create Pods or they can't. If they can't -create pods, then please refer to the [instructions above](#debugging-pods) to debug your pods. +create pods, then please refer to the [instructions above](#debugging-pods) to debug your pods. You can also use `kubectl describe rc ${CONTROLLER_NAME}` to introspect events related to the replication controller. @@ -199,11 +199,11 @@ check: * Can you connect to your pods directly? Get the IP address for the Pod, and try to connect directly to that IP * Is your application serving on the port that you configured? Kubernetes doesn't do port remapping, so if your application serves on 8080, the `containerPort` field needs to be 8080. -#### More information +#### More information -If none of the above solves your problem, follow the instructions in [Debugging Service document](debugging-services.md) to make sure that your `Service` is running, has `Endpoints`, and your `Pods` are actually serving; you have DNS working, iptables rules installed, and kube-proxy does not seem to be misbehaving. +If none of the above solves your problem, follow the instructions in [Debugging Service document](debugging-services.md) to make sure that your `Service` is running, has `Endpoints`, and your `Pods` are actually serving; you have DNS working, iptables rules installed, and kube-proxy does not seem to be misbehaving. -You may also visit [troubleshooting document](../troubleshooting.md) for more information. +You may also visit [troubleshooting document](../troubleshooting.md) for more information. diff --git a/docs/user-guide/compute-resources.md b/docs/user-guide/compute-resources.md index 00b70880885..19ffe53ff19 100644 --- a/docs/user-guide/compute-resources.md +++ b/docs/user-guide/compute-resources.md @@ -133,7 +133,7 @@ When using Docker: **TODO: document behavior for rkt** If a container exceeds its memory limit, it may be terminated. If it is restartable, it will be -restarted by kubelet, as will any other type of runtime failure. +restarted by kubelet, as will any other type of runtime failure. A container may or may not be allowed to exceed its CPU limit for extended periods of time. However, it will not be killed for excessive CPU usage. @@ -178,7 +178,7 @@ The [resource quota](../admin/resource-quota.md) feature can be configured to limit the total amount of resources that can be consumed. If used in conjunction with namespaces, it can prevent one team from hogging all the resources. -### My container is terminated +### My container is terminated Your container may be terminated because it's resource-starved. To check if a container is being killed because it is hitting a resource limit, call `kubectl describe pod` on the pod you are interested in: diff --git a/docs/user-guide/config-best-practices.md b/docs/user-guide/config-best-practices.md index dfe7b94ec08..f65120092bf 100644 --- a/docs/user-guide/config-best-practices.md +++ b/docs/user-guide/config-best-practices.md @@ -35,7 +35,7 @@ Documentation for other releases can be found at This document is meant to highlight and consolidate in one place configuration best practices that are introduced throughout the user-guide and getting-started documentation and examples. This is a living document so if you think of something that is not on this list but might be useful to others, please don't hesitate to file an issue or submit a PR. -1. When writing configuration, use the latest stable API version (currently v1). +1. When writing configuration, use the latest stable API version (currently v1). 1. Configuration should be stored in version control before being pushed to the cluster. This allows configuration to be quickly rolled back if needed and will aid with cluster re-creation and restoration if the worst were to happen. 1. Use YAML rather than JSON. They can be used interchangeably in almost all scenarios but YAML tends to be more user-friendly for config. 1. Group related objects together in a single file. This is often better than separate files. diff --git a/docs/user-guide/configuring-containers.md b/docs/user-guide/configuring-containers.md index 845a68af221..f97e4e21d75 100644 --- a/docs/user-guide/configuring-containers.md +++ b/docs/user-guide/configuring-containers.md @@ -73,7 +73,7 @@ spec: # specification of the pod’s contents The value of `metadata.name`, `hello-world`, will be the name of the pod resource created, and must be unique within the cluster, whereas `containers[0].name` is just a nickname for the container within that pod. `image` is the name of the Docker image, which Kubernetes expects to be able to pull from a registry, the [Docker Hub](https://registry.hub.docker.com/) by default. -`restartPolicy: Never` indicates that we just want to run the container once and then terminate the pod. +`restartPolicy: Never` indicates that we just want to run the container once and then terminate the pod. The [`command`](containers.md#containers-and-commands) overrides the Docker container’s `Entrypoint`. Command arguments (corresponding to Docker’s `Cmd`) may be specified using `args`, as follows: @@ -142,7 +142,7 @@ However, a shell isn’t necessary just to expand environment variables. Kuberne ## Viewing pod status -You can see the pod you created (actually all of your cluster's pods) using the `get` command. +You can see the pod you created (actually all of your cluster's pods) using the `get` command. If you’re quick, it will look as follows: @@ -199,7 +199,7 @@ $ kubectl delete pods/hello-world pods/hello-world ``` -Terminated pods aren’t currently automatically deleted, so that you can observe their final status, so be sure to clean up your dead pods. +Terminated pods aren’t currently automatically deleted, so that you can observe their final status, so be sure to clean up your dead pods. On the other hand, containers and their logs are eventually deleted automatically in order to free up disk space on the nodes. diff --git a/docs/user-guide/container-environment.md b/docs/user-guide/container-environment.md index 4533dc0a60f..9c1ac389e48 100644 --- a/docs/user-guide/container-environment.md +++ b/docs/user-guide/container-environment.md @@ -52,10 +52,10 @@ Documentation for other releases can be found at ## Overview -This document describes the environment for Kubelet managed containers on a Kubernetes node (kNode).  In contrast to the Kubernetes cluster API, which provides an API for creating and managing containers, the Kubernetes container environment provides the container access to information about what else is going on in the cluster.  +This document describes the environment for Kubelet managed containers on a Kubernetes node (kNode).  In contrast to the Kubernetes cluster API, which provides an API for creating and managing containers, the Kubernetes container environment provides the container access to information about what else is going on in the cluster. -This cluster information makes it possible to build applications that are *cluster aware*.   -Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers.  Container hooks are somewhat analogous to operating system signals in a traditional process model.   However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster.  Containers that participate in this cluster lifecycle become *cluster native*.  +This cluster information makes it possible to build applications that are *cluster aware*. +Additionally, the Kubernetes container environment defines a series of hooks that are surfaced to optional hook handlers defined as part of individual containers.  Container hooks are somewhat analogous to operating system signals in a traditional process model.   However these hooks are designed to make it easier to build reliable, scalable cloud applications in the Kubernetes cluster.  Containers that participate in this cluster lifecycle become *cluster native*. Another important part of the container environment is the file system that is available to the container. In Kubernetes, the filesystem is a combination of an [image](images.md) and one or more [volumes](volumes.md). @@ -68,7 +68,9 @@ There are two types of information that are available within the container envir ### Container Information -Currently, the only information about the container that is available to the container is the Pod name for the pod in which the container is running.  This ID is set as the hostname of the container, and is accessible through all calls to access the hostname within the container (e.g. the hostname command, or the [gethostname][1] function call in libc).  Additionally, user-defined environment variables from the pod definition, are also available to the container, as are any environment variables specified statically in the Docker image. +Currently, the Pod name for the pod in which the container is running is set as the hostname of the container, and is accessible through all calls to access the hostname within the container (e.g. the hostname command, or the [gethostname][1] function call in libc), but this is planned to change in the future and should not be used. + +The Pod name and namespace are also available as environment variables via the [downward API](downward-api.md). Additionally, user-defined environment variables from the pod definition, are also available to the container, as are any environment variables specified statically in the Docker image. In the future, we anticipate expanding this information with richer information about the container.  Examples include available memory, number of restarts, and in general any state that you could get from the call to GET /pods on the API server. @@ -89,7 +91,7 @@ Services have dedicated IP address, and are also surfaced to the container via D *NB*: Container hooks are under active development, we anticipate adding additional hooks as the Kubernetes container management system evolves.* -Container hooks provide information to the container about events in its management lifecycle.  For example, immediately after a container is started, it receives a *PostStart* hook.  These hooks are broadcast *into* the container with information about the life-cycle of the container.  They are different from the events provided by Docker and other systems which are *output* from the container.  Output events provide a log of what has already happened.  Input hooks provide real-time notification about things that are happening, but no historical log.   +Container hooks provide information to the container about events in its management lifecycle.  For example, immediately after a container is started, it receives a *PostStart* hook.  These hooks are broadcast *into* the container with information about the life-cycle of the container.  They are different from the events provided by Docker and other systems which are *output* from the container.  Output events provide a log of what has already happened.  Input hooks provide real-time notification about things that are happening, but no historical log. ### Hook Details diff --git a/docs/user-guide/containers.md b/docs/user-guide/containers.md index e7a7efe733a..4156ff9fc12 100644 --- a/docs/user-guide/containers.md +++ b/docs/user-guide/containers.md @@ -48,7 +48,7 @@ we can use: Docker images have metadata associated with them that is used to store information about the image. The image author may use this to define defaults for the command and arguments to run a container when the user does not supply values. Docker calls the fields for commands and arguments -`Entrypoint` and `Cmd` respectively. The full details for this feature are too complicated to +`Entrypoint` and `Cmd` respectively. The full details for this feature are too complicated to describe here, mostly due to the fact that the docker API allows users to specify both of these fields as either a string array or a string and there are subtle differences in how those cases are handled. We encourage the curious to check out [docker's documentation]() for this feature. @@ -69,10 +69,10 @@ Here are examples for these rules in table format | Image `Entrypoint` | Image `Cmd` | Container `Command` | Container `Args` | Command Run | |--------------------|------------------|---------------------|--------------------|------------------| -| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` | -| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` | -| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` | -| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` | +| `[/ep-1]` | `[foo bar]` | <not set> | <not set> | `[ep-1 foo bar]` | +| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | <not set> | `[ep-2]` | +| `[/ep-1]` | `[foo bar]` | <not set> | `[zoo boo]` | `[ep-1 zoo boo]` | +| `[/ep-1]` | `[foo bar]` | `[/ep-2]` | `[zoo boo]` | `[ep-2 zoo boo]` | ## Capabilities diff --git a/docs/user-guide/debugging-services.md b/docs/user-guide/debugging-services.md index ecfac13f1d7..512f4a927d1 100644 --- a/docs/user-guide/debugging-services.md +++ b/docs/user-guide/debugging-services.md @@ -552,7 +552,7 @@ Contact us on ## More information -Visit [troubleshooting document](../troubleshooting.md) for more information. +Visit [troubleshooting document](../troubleshooting.md) for more information. diff --git a/docs/user-guide/docker-cli-to-kubectl.md b/docs/user-guide/docker-cli-to-kubectl.md index 4ecc73d4caf..ebdae76c707 100644 --- a/docs/user-guide/docker-cli-to-kubectl.md +++ b/docs/user-guide/docker-cli-to-kubectl.md @@ -41,6 +41,7 @@ In this doc, we introduce the Kubernetes command line to for interacting with th - [kubectl for docker users](#kubectl-for-docker-users) - [docker run](#docker-run) - [docker ps](#docker-ps) + - [docker attach](#docker-attach) - [docker exec](#docker-exec) - [docker logs](#docker-logs) - [docker stop and docker rm](#docker-stop-and-docker-rm) @@ -99,9 +100,9 @@ NAME READY STATUS RESTARTS AGE nginx-app-5jyvm 1/1 Running 0 1h ``` -#### docker exec +#### docker attach -How do I execute a command in a container? Checkout [kubectl exec](kubectl/kubectl_exec.md). +How do I attach to a process that is already running in a container? Checkout [kubectl attach](kubectl/kubectl_attach.md) With docker: @@ -109,18 +110,47 @@ With docker: $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a9ec34d98787 nginx "nginx -g 'daemon of 8 minutes ago Up 8 minutes 0.0.0.0:80->80/tcp, 443/tcp nginx-app -$ docker exec a9ec34d98787 cat /etc/hostname -a9ec34d98787 +$ docker attach -it a9ec34d98787 +... ``` With kubectl: ```console +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +nginx-app-5jyvm 1/1 Running 0 10m +$ kubectl attach -it nginx-app-5jyvm +... + +``` + +#### docker exec + +How do I execute a command in a container? Checkout [kubectl exec](kubectl/kubectl_exec.md). + +With docker: + +```console + +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +a9ec34d98787 nginx "nginx -g 'daemon of 8 minutes ago Up 8 minutes 0.0.0.0:80->80/tcp, 443/tcp nginx-app +$ docker exec a9ec34d98787 cat /etc/hostname +a9ec34d98787 + +``` + +With kubectl: + +```console + $ kubectl get po NAME READY STATUS RESTARTS AGE nginx-app-5jyvm 1/1 Running 0 10m $ kubectl exec nginx-app-5jyvm -- cat /etc/hostname nginx-app-5jyvm + ``` What about interactive commands? @@ -129,15 +159,21 @@ What about interactive commands? With docker: ```console + $ docker exec -ti a9ec34d98787 /bin/sh + # exit + ``` With kubectl: ```console + $ kubectl exec -ti nginx-app-5jyvm -- /bin/sh + # exit + ``` For more information see [Getting into containers](getting-into-containers.md). @@ -150,25 +186,31 @@ How do I follow stdout/stderr of a running process? Checkout [kubectl logs](kube With docker: ```console + $ docker logs -f a9e 192.168.9.1 - - [14/Jul/2015:01:04:02 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.35.0" "-" 192.168.9.1 - - [14/Jul/2015:01:04:03 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.35.0" "-" + ``` With kubectl: ```console + $ kubectl logs -f nginx-app-zibvs 10.240.63.110 - - [14/Jul/2015:01:09:01 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" 10.240.63.110 - - [14/Jul/2015:01:09:02 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" + ``` Now's a good time to mention slight difference between pods and containers; by default pods will not terminate if their processes exit. Instead it will restart the process. This is similar to the docker run option `--restart=always` with one major difference. In docker, the output for each invocation of the process is concatenated but for Kubernetes, each invokation is separate. To see the output from a prevoius run in Kubernetes, do this: ```console + $ kubectl logs --previous nginx-app-zibvs 10.240.63.110 - - [14/Jul/2015:01:09:01 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" 10.240.63.110 - - [14/Jul/2015:01:09:02 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.26.0" "-" + ``` See [Logging](logging.md) for more information. @@ -180,6 +222,7 @@ How do I stop and delete a running process? Checkout [kubectl delete](kubectl/ku With docker ```console + $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES a9ec34d98787 nginx "nginx -g 'daemon of 22 hours ago Up 22 hours 0.0.0.0:80->80/tcp, 443/tcp nginx-app @@ -187,11 +230,13 @@ $ docker stop a9ec34d98787 a9ec34d98787 $ docker rm a9ec34d98787 a9ec34d98787 + ``` With kubectl: ```console + $ kubectl get rc nginx-app CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS nginx-app nginx-app nginx run=nginx-app 1 @@ -203,6 +248,7 @@ NAME READY STATUS RESTARTS AGE nginx-app-aualv 1/1 Running 0 16s $ kubectl get po NAME READY STATUS RESTARTS AGE + ``` Notice that we don't delete the pod directly. With kubectl we want to delete the replication controller that owns the pod. If we delete the pod directly, the replication controller will recreate the pod. @@ -218,6 +264,7 @@ How do I get the version of my client and server? Checkout [kubectl version](kub With docker: ```console + $ docker version Client version: 1.7.0 Client API version: 1.19 @@ -229,14 +276,17 @@ Server API version: 1.19 Go version (server): go1.4.2 Git commit (server): 0baf609 OS/Arch (server): linux/amd64 + ``` With kubectl: ```console + $ kubectl version Client Version: version.Info{Major:"0", Minor:"20.1", GitVersion:"v0.20.1", GitCommit:"", GitTreeState:"not a git tree"} Server Version: version.Info{Major:"0", Minor:"21+", GitVersion:"v0.21.1-411-g32699e873ae1ca-dirty", GitCommit:"32699e873ae1caa01812e41de7eab28df4358ee4", GitTreeState:"dirty"} + ``` #### docker info @@ -246,6 +296,7 @@ How do I get miscellaneous info about my environment and configuration? Checkout With docker: ```console + $ docker info Containers: 40 Images: 168 @@ -263,11 +314,13 @@ Total Memory: 31.32 GiB Name: k8s-is-fun.mtv.corp.google.com ID: ADUV:GCYR:B3VJ:HMPO:LNPQ:KD5S:YKFQ:76VN:IANZ:7TFV:ZBF4:BYJO WARNING: No swap limit support + ``` With kubectl: ```console + $ kubectl cluster-info Kubernetes master is running at https://108.59.85.141 KubeDNS is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/kube-dns @@ -275,6 +328,7 @@ KubeUI is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/s Grafana is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/monitoring-grafana Heapster is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster InfluxDB is running at https://108.59.85.141/api/v1/proxy/namespaces/kube-system/services/monitoring-influxdb + ``` diff --git a/docs/user-guide/downward-api.md b/docs/user-guide/downward-api.md index 5da6fd68846..36bc3769ed2 100644 --- a/docs/user-guide/downward-api.md +++ b/docs/user-guide/downward-api.md @@ -105,7 +105,7 @@ spec: ``` [Download example](downward-api/dapi-pod.yaml) - + Some more thorough examples: * [environment variables](environment-guide/) diff --git a/docs/user-guide/getting-into-containers.md b/docs/user-guide/getting-into-containers.md index 25d8567e957..5007880f445 100644 --- a/docs/user-guide/getting-into-containers.md +++ b/docs/user-guide/getting-into-containers.md @@ -53,7 +53,7 @@ NAME READY REASON RESTARTS AGE redis-master-ft9ex 1/1 Running 0 12s ``` -then we can check the environment variables of the pod, +then we can check the environment variables of the pod, ```console $ kubectl exec redis-master-ft9ex env diff --git a/docs/user-guide/images.md b/docs/user-guide/images.md index 31f9fc89124..4e59462d276 100644 --- a/docs/user-guide/images.md +++ b/docs/user-guide/images.md @@ -68,7 +68,7 @@ Credentials can be provided in several ways: - Per-cluster - automatically configured on Google Compute Engine or Google Container Engine - all pods can read the project's private registry - - Configuring Nodes to Authenticate to a Private Registry + - Configuring Nodes to Authenticate to a Private Registry - all pods can read any configured private registries - requires node configuration by cluster administrator - Pre-pulling Images @@ -77,7 +77,7 @@ Credentials can be provided in several ways: - Specifying ImagePullSecrets on a Pod - only pods which provide own keys can access the private registry Each option is described in more detail below. - + ### Using Google Container Registry @@ -101,7 +101,7 @@ with credentials for Google Container Registry. You cannot use this approach. **Note:** this approach is suitable if you can control node configuration. It will not work reliably on GCE, and any other cloud provider that does automatic node replacement. - + Docker stores keys for private registries in the `$HOME/.dockercfg` file. If you put this in the `$HOME` of `root` on a kubelet, then docker will use it. @@ -109,7 +109,7 @@ Here are the recommended steps to configuring your nodes to use a private regist example, run these on your desktop/laptop: 1. run `docker login [server]` for each set of credentials you want to use. 1. view `$HOME/.dockercfg` in an editor to ensure it contains just the credentials you want to use. - 1. get a list of your nodes + 1. get a list of your nodes - for example: `nodes=$(kubectl get nodes -o template --template='{{range.items}}{{.metadata.name}} {{end}}')` 1. copy your local `.dockercfg` to the home directory of root on each node. - for example: `for n in $nodes; do scp ~/.dockercfg root@$n:/root/.dockercfg; done` @@ -218,7 +218,7 @@ secrets/myregistrykey $ ``` -If you get the error message `error: no objects passed to create`, it may mean the base64 encoded string is invalid. +If you get the error message `error: no objects passed to create`, it may mean the base64 encoded string is invalid. If you get an error message like `Secret "myregistrykey" is invalid: data[.dockercfg]: invalid value ...` it means the data was successfully un-base64 encoded, but could not be parsed as a dockercfg file. diff --git a/docs/user-guide/introspection-and-debugging.md b/docs/user-guide/introspection-and-debugging.md index df5075d5afe..c6a179a1f6a 100644 --- a/docs/user-guide/introspection-and-debugging.md +++ b/docs/user-guide/introspection-and-debugging.md @@ -138,7 +138,7 @@ Lastly, you see a log of recent events related to your Pod. The system compresse ## Example: debugging Pending Pods -A common scenario that you can detect using events is when you’ve created a Pod that won’t fit on any node. For example, the Pod might request more resources than are free on any node, or it might specify a label selector that doesn’t match any nodes. Let’s say we created the previous Replication Controller with 5 replicas (instead of 2) and requesting 600 millicores instead of 500, on a four-node cluster where each (virtual) machine has 1 CPU. In that case one of the Pods will not be able to schedule. (Note that because of the cluster addon pods such as fluentd, skydns, etc., that run on each node, if we requested 1000 millicores then none of the Pods would be able to schedule.) +A common scenario that you can detect using events is when you’ve created a Pod that won’t fit on any node. For example, the Pod might request more resources than are free on any node, or it might specify a label selector that doesn’t match any nodes. Let’s say we created the previous Replication Controller with 5 replicas (instead of 2) and requesting 600 millicores instead of 500, on a four-node cluster where each (virtual) machine has 1 CPU. In that case one of the Pods will not be able to schedule. (Note that because of the cluster addon pods such as fluentd, skydns, etc., that run on each node, if we requested 1000 millicores then none of the Pods would be able to schedule.) ```console $ kubectl get pods diff --git a/docs/user-guide/kubectl/.files_generated b/docs/user-guide/kubectl/.files_generated index 71996881a42..73e9a345778 100644 --- a/docs/user-guide/kubectl/.files_generated +++ b/docs/user-guide/kubectl/.files_generated @@ -1,5 +1,6 @@ kubectl.md kubectl_api-versions.md +kubectl_attach.md kubectl_cluster-info.md kubectl_config.md kubectl_config_set-cluster.md diff --git a/docs/user-guide/kubectl/kubectl.md b/docs/user-guide/kubectl/kubectl.md index 5cf0e4d5c4b..5a5ce37db74 100644 --- a/docs/user-guide/kubectl/kubectl.md +++ b/docs/user-guide/kubectl/kubectl.md @@ -79,6 +79,7 @@ kubectl ### SEE ALSO * [kubectl api-versions](kubectl_api-versions.md) - Print available API versions. +* [kubectl attach](kubectl_attach.md) - Attach to a running container. * [kubectl cluster-info](kubectl_cluster-info.md) - Display cluster info * [kubectl config](kubectl_config.md) - config modifies kubeconfig files * [kubectl create](kubectl_create.md) - Create a resource by filename or stdin @@ -100,7 +101,7 @@ kubectl * [kubectl stop](kubectl_stop.md) - Deprecated: Gracefully shut down a resource by name or filename. * [kubectl version](kubectl_version.md) - Print the client and server version information. -###### Auto generated by spf13/cobra at 2015-07-29 09:18:59.541696918 +0000 UTC +###### Auto generated by spf13/cobra at 2015-07-30 03:45:17.319803488 +0000 UTC diff --git a/docs/user-guide/kubectl/kubectl_attach.md b/docs/user-guide/kubectl/kubectl_attach.md new file mode 100644 index 00000000000..a1e51cc6386 --- /dev/null +++ b/docs/user-guide/kubectl/kubectl_attach.md @@ -0,0 +1,108 @@ + + + + +WARNING +WARNING +WARNING +WARNING +WARNING + +

PLEASE NOTE: This document applies to the HEAD of the source tree

+ +If you are using a released version of Kubernetes, you should +refer to the docs that go with that version. + + +The latest 1.0.x release of this document can be found +[here](http://releases.k8s.io/release-1.0/docs/user-guide/kubectl/kubectl_attach.md). + +Documentation for other releases can be found at +[releases.k8s.io](http://releases.k8s.io). + +-- + + + + + +## kubectl attach + +Attach to a running container. + +### Synopsis + + +Attach to a a process that is already running inside an existing container. + +``` +kubectl attach POD -c CONTAINER +``` + +### Examples + +``` +// get output from running pod 123456-7890, using the first container by default +$ kubectl attach 123456-7890 + +// get output from ruby-container from pod 123456-7890 +$ kubectl attach 123456-7890 -c ruby-container date + +// switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780 +// and sends stdout/stderr from 'bash' back to the client +$ kubectl attach 123456-7890 -c ruby-container -i -t +``` + +### Options + +``` + -c, --container="": Container name + -h, --help=false: help for attach + -i, --stdin=false: Pass stdin to the container + -t, --tty=false: Stdin is a TTY +``` + +### Options inherited from parent commands + +``` + --alsologtostderr=false: log to standard error as well as files + --api-version="": The API version to use when talking to the server + --certificate-authority="": Path to a cert. file for the certificate authority. + --client-certificate="": Path to a client key file for TLS. + --client-key="": Path to a client key file for TLS. + --cluster="": The name of the kubeconfig cluster to use + --context="": The name of the kubeconfig context to use + --insecure-skip-tls-verify=false: If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure. + --kubeconfig="": Path to the kubeconfig file to use for CLI requests. + --log-backtrace-at=:0: when logging hits line file:N, emit a stack trace + --log-dir=: If non-empty, write log files in this directory + --log-flush-frequency=5s: Maximum number of seconds between log flushes + --logtostderr=true: log to standard error instead of files + --match-server-version=false: Require server version to match client version + --namespace="": If present, the namespace scope for this CLI request. + --password="": Password for basic authentication to the API server. + -s, --server="": The address and port of the Kubernetes API server + --stderrthreshold=2: logs at or above this threshold go to stderr + --token="": Bearer token for authentication to the API server. + --user="": The name of the kubeconfig user to use + --username="": Username for basic authentication to the API server. + --v=0: log level for V logs + --validate=false: If true, use a schema to validate the input before sending it + --vmodule=: comma-separated list of pattern=N settings for file-filtered logging +``` + +### SEE ALSO + +* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager + +###### Auto generated by spf13/cobra at 2015-07-30 17:45:25.860905122 +0000 UTC + + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_attach.md?pixel)]() + diff --git a/docs/user-guide/kubectl/kubectl_describe.md b/docs/user-guide/kubectl/kubectl_describe.md index 04bd4ea79b9..054e375d02b 100644 --- a/docs/user-guide/kubectl/kubectl_describe.md +++ b/docs/user-guide/kubectl/kubectl_describe.md @@ -49,9 +49,9 @@ will first check for an exact match on RESOURCE and NAME_PREFIX. If no such reso exists, it will output details for every resource that has a name prefixed with NAME_PREFIX Possible resources include (case insensitive): pods (po), services (svc), -replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), -limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota) or secrets. +replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), +persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), +namespaces (ns) or secrets. ``` kubectl describe (RESOURCE NAME_PREFIX | RESOURCE/NAME) @@ -114,7 +114,7 @@ $ kubectl describe pods frontend * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-07-15 01:43:24.778753787 +0000 UTC +###### Auto generated by spf13/cobra at 2015-07-31 07:12:36.111698336 +0000 UTC diff --git a/docs/user-guide/kubectl/kubectl_get.md b/docs/user-guide/kubectl/kubectl_get.md index f347d8b4d33..eb78c905aac 100644 --- a/docs/user-guide/kubectl/kubectl_get.md +++ b/docs/user-guide/kubectl/kubectl_get.md @@ -43,7 +43,7 @@ Display one or many resources. Possible resources include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota) or secrets. +resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets. By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s). @@ -125,7 +125,7 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-07-15 01:43:24.778535128 +0000 UTC +###### Auto generated by spf13/cobra at 2015-07-30 08:43:29.371131796 +0000 UTC diff --git a/docs/user-guide/kubectl/kubectl_scale.md b/docs/user-guide/kubectl/kubectl_scale.md index 3e4cc344344..f0f3644bd34 100644 --- a/docs/user-guide/kubectl/kubectl_scale.md +++ b/docs/user-guide/kubectl/kubectl_scale.md @@ -57,6 +57,9 @@ $ kubectl scale --replicas=3 replicationcontrollers foo // If the replication controller named foo's current size is 2, scale foo to 3. $ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo + +// Scale multiple replication controllers. +$ kubectl scale --replicas=5 rc/foo rc/bar ``` ### Options @@ -67,6 +70,7 @@ $ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo -o, --output="": Output mode. Use "-o name" for shorter output (resource/name). --replicas=-1: The new desired number of replicas. Required. --resource-version="": Precondition for resource version. Requires that the current resource version match this value in order to scale. + --timeout=0: The length of time to wait before giving up on a scale operation, zero means don't wait. ``` ### Options inherited from parent commands @@ -102,7 +106,7 @@ $ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.956739933 +0000 UTC +###### Auto generated by spf13/cobra at 2015-07-30 08:50:55.94117889 +0000 UTC diff --git a/docs/user-guide/logging.md b/docs/user-guide/logging.md index 484ae2b0af7..49af4f527d6 100644 --- a/docs/user-guide/logging.md +++ b/docs/user-guide/logging.md @@ -59,7 +59,7 @@ spec: ``` [Download example](../../examples/blog-logging/counter-pod.yaml) - + we can run the pod: diff --git a/docs/user-guide/managing-deployments.md b/docs/user-guide/managing-deployments.md index 2e7db4e0054..99aad699316 100644 --- a/docs/user-guide/managing-deployments.md +++ b/docs/user-guide/managing-deployments.md @@ -157,7 +157,7 @@ my-nginx-svc app=nginx app=nginx 10.0.152.174 80/TCP ## Using labels effectively -The examples we’ve used so far apply at most a single label to any resource. There are many scenarios where multiple labels should be used to distinguish sets from one another. +The examples we’ve used so far apply at most a single label to any resource. There are many scenarios where multiple labels should be used to distinguish sets from one another. For instance, different applications would use different values for the `app` label, but a multi-tier application, such as the [guestbook example](../../examples/guestbook/), would additionally need to distinguish each tier. The frontend could carry the following labels: @@ -279,7 +279,7 @@ my-nginx-o0ef1 1/1 Running 0 1h At some point, you’ll eventually need to update your deployed application, typically by specifying a new image or image tag, as in the canary deployment scenario above. `kubectl` supports several update operations, each of which is applicable to different scenarios. -To update a service without an outage, `kubectl` supports what is called [“rolling update”](kubectl/kubectl_rolling-update.md), which updates one pod at a time, rather than taking down the entire service at the same time. See the [rolling update design document](../design/simple-rolling-update.md) and the [example of rolling update](update-demo/) for more information. +To update a service without an outage, `kubectl` supports what is called [“rolling update”](kubectl/kubectl_rolling-update.md), which updates one pod at a time, rather than taking down the entire service at the same time. See the [rolling update design document](../design/simple-rolling-update.md) and the [example of rolling update](update-demo/) for more information. Let’s say you were running version 1.7.9 of nginx: diff --git a/docs/user-guide/monitoring.md b/docs/user-guide/monitoring.md index fe71dd2a65f..67ba0420586 100644 --- a/docs/user-guide/monitoring.md +++ b/docs/user-guide/monitoring.md @@ -59,7 +59,7 @@ The Kubelet acts as a bridge between the Kubernetes master and the nodes. It man ### InfluxDB and Grafana -A Grafana setup with InfluxDB is a very popular combination for monitoring in the open source world. InfluxDB exposes an easy to use API to write and fetch time series data. Heapster is setup to use this storage backend by default on most Kubernetes clusters. A detailed setup guide can be found [here](https://github.com/GoogleCloudPlatform/heapster/blob/master/docs/influxdb.md). InfluxDB and Grafana run in Pods. The pod exposes itself as a Kubernetes service which is how Heapster discovers it. +A Grafana setup with InfluxDB is a very popular combination for monitoring in the open source world. InfluxDB exposes an easy to use API to write and fetch time series data. Heapster is setup to use this storage backend by default on most Kubernetes clusters. A detailed setup guide can be found [here](https://github.com/GoogleCloudPlatform/heapster/blob/master/docs/influxdb.md). InfluxDB and Grafana run in Pods. The pod exposes itself as a Kubernetes service which is how Heapster discovers it. The Grafana container serves Grafana’s UI which provides an easy to configure dashboard interface. The default dashboard for Kubernetes contains an example dashboard that monitors resource usage of the cluster and the pods inside of it. This dashboard can easily be customized and expanded. Take a look at the storage schema for InfluxDB [here](https://github.com/GoogleCloudPlatform/heapster/blob/master/docs/storage-schema.md#metrics). @@ -88,7 +88,7 @@ Here is a snapshot of the a Google Cloud Monitoring dashboard showing cluster-wi Now that you’ve learned a bit about Heapster, feel free to try it out on your own clusters! The [Heapster repository](https://github.com/GoogleCloudPlatform/heapster) is available on GitHub. It contains detailed instructions to setup Heapster and its storage backends. Heapster runs by default on most Kubernetes clusters, so you may already have it! Feedback is always welcome. Please let us know if you run into any issues. Heapster and Kubernetes developers hang out in the [#google-containers](http://webchat.freenode.net/?channels=google-containers) IRC channel on freenode.net. You can also reach us on the [google-containers Google Groups mailing list](https://groups.google.com/forum/#!forum/google-containers). *** -*Authors: Vishnu Kannan and Victor Marmol, Google Software Engineers.* +*Authors: Vishnu Kannan and Victor Marmol, Google Software Engineers.* *This article was originally posted in [Kubernetes blog](http://blog.kubernetes.io/2015/05/resource-usage-monitoring-kubernetes.html).* diff --git a/docs/user-guide/overview.md b/docs/user-guide/overview.md index 67862f67925..9d537ece202 100644 --- a/docs/user-guide/overview.md +++ b/docs/user-guide/overview.md @@ -35,7 +35,7 @@ Documentation for other releases can be found at Kubernetes is an open-source system for managing containerized applications across multiple hosts in a cluster. Kubernetes is intended to make deploying containerized/microservice-based applications easy but powerful. -Kubernetes provides mechanisms for application deployment, scheduling, updating, maintenance, and scaling. A key feature of Kubernetes is that it actively manages the containers to ensure that the state of the cluster continually matches the user's intentions. An operations user should be able to launch a micro-service, letting the scheduler find the right placement. We also want to improve the tools and experience for how users can roll-out applications through patterns like canary deployments. +Kubernetes provides mechanisms for application deployment, scheduling, updating, maintenance, and scaling. A key feature of Kubernetes is that it actively manages the containers to ensure that the state of the cluster continually matches the user's intentions. An operations user should be able to launch a micro-service, letting the scheduler find the right placement. We also want to improve the tools and experience for how users can roll-out applications through patterns like canary deployments. Kubernetes supports [Docker](http://www.docker.io) and [Rocket](https://coreos.com/blog/rocket/) containers, and other container image formats and container runtimes will be supported in the future. @@ -45,7 +45,7 @@ In Kubernetes, all containers run inside [pods](pods.md). A pod can host a singl Users can create and manage pods themselves, but Kubernetes drastically simplifies system management by allowing users to delegate two common pod-related activities: deploying multiple pod replicas based on the same pod configuration, and creating replacement pods when a pod or its machine fails. The Kubernetes API object that manages these behaviors is called a [replication controller](replication-controller.md). It defines a pod in terms of a template, that the system then instantiates as some number of pods (specified by the user). The replicated set of pods might constitute an entire application, a micro-service, or one layer in a multi-tier application. Once the pods are created, the system continually monitors their health and that of the machines they are running on; if a pod fails due to a software problem or machine failure, the replication controller automatically creates a new pod on a healthy machine, to maintain the set of pods at the desired replication level. Multiple pods from the same or different applications can share the same machine. Note that a replication controller is needed even in the case of a single non-replicated pod if the user wants it to be re-created when it or its machine fails. -Frequently it is useful to refer to a set of pods, for example to limit the set of pods on which a mutating operation should be performed, or that should be queried for status. As a general mechanism, users can attach to most Kubernetes API objects arbitrary key-value pairs called [labels](labels.md), and then use a set of label selectors (key-value queries over labels) to constrain the target of API operations. Each resource also has a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object, called [annotations](annotations.md). +Frequently it is useful to refer to a set of pods, for example to limit the set of pods on which a mutating operation should be performed, or that should be queried for status. As a general mechanism, users can attach to most Kubernetes API objects arbitrary key-value pairs called [labels](labels.md), and then use a set of label selectors (key-value queries over labels) to constrain the target of API operations. Each resource also has a map of string keys and values that can be used by external tooling to store and retrieve arbitrary metadata about this object, called [annotations](annotations.md). Kubernetes supports a unique [networking model](../admin/networking.md). Kubernetes encourages a flat address space and does not dynamically allocate ports, instead allowing users to select whichever ports are convenient for them. To achieve this, it allocates an IP address for each pod. diff --git a/docs/user-guide/persistent-volumes.md b/docs/user-guide/persistent-volumes.md index 0dfc11fe22b..92b58904ffe 100644 --- a/docs/user-guide/persistent-volumes.md +++ b/docs/user-guide/persistent-volumes.md @@ -65,7 +65,7 @@ Managing storage is a distinct problem from managing compute. The `PersistentVol A `PersistentVolume` (PV) is a piece of networked storage in the cluster that has been provisioned by an administrator. It is a resource in the cluster just like a node is a cluster resource. PVs are volume plugins like Volumes, but have a lifecycle independent of any individual pod that uses the PV. This API object captures the details of the implementation of the storage, be that NFS, iSCSI, or a cloud-provider-specific storage system. -A `PersistentVolumeClaim` (PVC) is a request for storage by a user. It is similar to a pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g, can be mounted once read/write or many times read-only). +A `PersistentVolumeClaim` (PVC) is a request for storage by a user. It is similar to a pod. Pods consume node resources and PVCs consume PV resources. Pods can request specific levels of resources (CPU and Memory). Claims can request specific size and access modes (e.g, can be mounted once read/write or many times read-only). Please see the [detailed walkthrough with working examples](persistent-volumes/). @@ -75,7 +75,7 @@ Please see the [detailed walkthrough with working examples](persistent-volumes/) PVs are resources in the cluster. PVC are requests for those resources and also act as claim checks to the resource. The interaction between PVs and PVCs follows this lifecycle: ### Provisioning - + A cluster administrator creates some number of PVs. They carry the details of the real storage that is available for use by cluster users. They exist in the Kubernetes API and are available for consumption. ### Binding @@ -113,7 +113,7 @@ A `PersistentVolume's` reclaim policy tells the cluster what to do with the volu ## Persistent Volumes -Each PV contains a spec and status, which is the specification and status of the volume. +Each PV contains a spec and status, which is the specification and status of the volume. ```yaml diff --git a/docs/user-guide/persistent-volumes/README.md b/docs/user-guide/persistent-volumes/README.md index 572eea99496..22c48a17246 100644 --- a/docs/user-guide/persistent-volumes/README.md +++ b/docs/user-guide/persistent-volumes/README.md @@ -38,7 +38,7 @@ nginx serving content from your persistent volume. This guide assumes knowledge of Kubernetes fundamentals and that you have a cluster up and running. -See [Persistent Storage design document](../../design/persistent-storage.md) for more information. +See [Persistent Storage design document](../../design/persistent-storage.md) for more information. ## Provisioning @@ -51,7 +51,7 @@ for ease of development and testing. You'll create a local `HostPath` for this > IMPORTANT! For `HostPath` to work, you will need to run a single node cluster. Kubernetes does not support local storage on the host at this time. There is no guarantee your pod ends up on the correct node where the `HostPath` resides. - + ```console # This will be nginx's webroot @@ -70,7 +70,7 @@ pv0001 type=local 10737418240 RWO Available ## Requesting storage Users of Kubernetes request persistent storage for their pods. They don't know how the underlying cluster is provisioned. -They just know they can rely on their claim to storage and can manage its lifecycle independently from the many pods that may use it. +They just know they can rely on their claim to storage and can manage its lifecycle independently from the many pods that may use it. Claims must be created in the same namespace as the pods that use them. @@ -114,7 +114,7 @@ kubernetes component=apiserver,provider=kubernetes ## Next steps -You should be able to query your service endpoint and see what content nginx is serving. A "forbidden" error might mean you +You should be able to query your service endpoint and see what content nginx is serving. A "forbidden" error might mean you need to disable SELinux (setenforce 0). ```console diff --git a/docs/user-guide/pods.md b/docs/user-guide/pods.md index e9d8b680bf7..aa86404fb90 100644 --- a/docs/user-guide/pods.md +++ b/docs/user-guide/pods.md @@ -93,22 +93,22 @@ That approach would provide co-location, but would not provide most of the benef ## Durability of pods (or lack thereof) -Pods aren't intended to be treated as durable [pets](https://blog.engineyard.com/2014/pets-vs-cattle). They won't survive scheduling failures, node failures, or other evictions, such as due to lack of resources, or in the case of node maintenance. +Pods aren't intended to be treated as durable [pets](https://blog.engineyard.com/2014/pets-vs-cattle). They won't survive scheduling failures, node failures, or other evictions, such as due to lack of resources, or in the case of node maintenance. -In general, users shouldn't need to create pods directly. They should almost always use controllers (e.g., [replication controller](replication-controller.md)), even for singletons. Controllers provide self-healing with a cluster scope, as well as replication and rollout management. +In general, users shouldn't need to create pods directly. They should almost always use controllers (e.g., [replication controller](replication-controller.md)), even for singletons. Controllers provide self-healing with a cluster scope, as well as replication and rollout management. The use of collective APIs as the primary user-facing primitive is relatively common among cluster scheduling systems, including [Borg](https://research.google.com/pubs/pub43438.html), [Marathon](https://mesosphere.github.io/marathon/docs/rest-api.html), [Aurora](http://aurora.apache.org/documentation/latest/configuration-reference/#job-schema), and [Tupperware](http://www.slideshare.net/Docker/aravindnarayanan-facebook140613153626phpapp02-37588997). Pod is exposed as a primitive in order to facilitate: * scheduler and controller pluggability -* support for pod-level operations without the need to "proxy" them via controller APIs +* support for pod-level operations without the need to "proxy" them via controller APIs * decoupling of pod lifetime from controller lifetime, such as for bootstrapping * decoupling of controllers and services — the endpoint controller just watches pods * clean composition of Kubelet-level functionality with cluster-level functionality — Kubelet is effectively the "pod controller" * high-availability applications, which will expect pods to be replaced in advance of their termination and certainly in advance of deletion, such as in the case of planned evictions, image prefetching, or live pod migration [#3949](https://github.com/GoogleCloudPlatform/kubernetes/issues/3949) -The current best practice for pets is to create a replication controller with `replicas` equal to `1` and a corresponding service. If you find this cumbersome, please comment on [issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260). +The current best practice for pets is to create a replication controller with `replicas` equal to `1` and a corresponding service. If you find this cumbersome, please comment on [issue #260](https://github.com/GoogleCloudPlatform/kubernetes/issues/260). ## API Object diff --git a/docs/user-guide/prereqs.md b/docs/user-guide/prereqs.md index a5f40234bdd..d873f3e5a6b 100644 --- a/docs/user-guide/prereqs.md +++ b/docs/user-guide/prereqs.md @@ -33,7 +33,7 @@ Documentation for other releases can be found at # Kubernetes User Guide: Managing Applications: Prerequisites -To deploy and manage applications on Kubernetes, you’ll use the Kubernetes command-line tool, [kubectl](kubectl/kubectl.md). It lets you inspect your cluster resources, create, delete, and update components, and much more. You will use it to look at your new cluster and bring up example apps. +To deploy and manage applications on Kubernetes, you’ll use the Kubernetes command-line tool, [kubectl](kubectl/kubectl.md). It lets you inspect your cluster resources, create, delete, and update components, and much more. You will use it to look at your new cluster and bring up example apps. ## Installing kubectl diff --git a/docs/user-guide/production-pods.md b/docs/user-guide/production-pods.md index fc397e9cc65..2f88bda77d8 100644 --- a/docs/user-guide/production-pods.md +++ b/docs/user-guide/production-pods.md @@ -90,7 +90,7 @@ In addition to the local disk storage provided by `emptyDir`, Kubernetes support ## Distributing credentials -Many applications need credentials, such as passwords, OAuth tokens, and TLS keys, to authenticate with other applications, databases, and services. Storing these credentials in container images or environment variables is less than ideal, since the credentials can then be copied by anyone with access to the image, pod/container specification, host file system, or host Docker daemon. +Many applications need credentials, such as passwords, OAuth tokens, and TLS keys, to authenticate with other applications, databases, and services. Storing these credentials in container images or environment variables is less than ideal, since the credentials can then be copied by anyone with access to the image, pod/container specification, host file system, or host Docker daemon. Kubernetes provides a mechanism, called [*secrets*](secrets.md), that facilitates delivery of sensitive credentials to applications. A `Secret` is a simple resource containing a map of data. For instance, a simple secret with a username and password might look as follows: @@ -245,7 +245,7 @@ More examples can be found in our [blog article](http://blog.kubernetes.io/2015/ ## Resource management -Kubernetes’s scheduler will place applications only where they have adequate CPU and memory, but it can only do so if it knows how much [resources they require](compute-resources.md). The consequence of specifying too little CPU is that the containers could be starved of CPU if too many other containers were scheduled onto the same node. Similarly, containers could die unpredictably due to running out of memory if no memory were requested, which can be especially likely for large-memory applications. +Kubernetes’s scheduler will place applications only where they have adequate CPU and memory, but it can only do so if it knows how much [resources they require](compute-resources.md). The consequence of specifying too little CPU is that the containers could be starved of CPU if too many other containers were scheduled onto the same node. Similarly, containers could die unpredictably due to running out of memory if no memory were requested, which can be especially likely for large-memory applications. If no resource requirements are specified, a nominal amount of resources is assumed. (This default is applied via a [LimitRange](limitrange/) for the default [Namespace](namespaces.md). It can be viewed with `kubectl describe limitrange limits`.) You may explicitly specify the amount of resources required as follows: @@ -318,7 +318,7 @@ For more details (e.g., how to specify command-based probes), see the [example i Of course, nodes and applications may fail at any time, but many applications benefit from clean shutdown, such as to complete in-flight requests, when the termination of the application is deliberate. To support such cases, Kubernetes supports two kinds of notifications: Kubernetes will send SIGTERM to applications, which can be handled in order to effect graceful termination. SIGKILL is sent 10 seconds later if the application does not terminate sooner. -Kubernetes supports the (optional) specification of a [*pre-stop lifecycle hook*](container-environment.md#container-hooks), which will execute prior to sending SIGTERM. +Kubernetes supports the (optional) specification of a [*pre-stop lifecycle hook*](container-environment.md#container-hooks), which will execute prior to sending SIGTERM. The specification of a pre-stop hook is similar to that of probes, but without the timing-related parameters. For example: diff --git a/docs/user-guide/secrets.md b/docs/user-guide/secrets.md index 5cf9c004f44..c3124ea6451 100644 --- a/docs/user-guide/secrets.md +++ b/docs/user-guide/secrets.md @@ -36,7 +36,7 @@ Documentation for other releases can be found at Objects of type `secret` are intended to hold sensitive information, such as passwords, OAuth tokens, and ssh keys. Putting this information in a `secret` is safer and more flexible than putting it verbatim in a `pod` definition or in -a docker image. See [Secrets design document](../design/secrets.md) for more information. +a docker image. See [Secrets design document](../design/secrets.md) for more information. **Table of Contents** diff --git a/docs/user-guide/secrets/README.md b/docs/user-guide/secrets/README.md index a72b440babf..5c8a4d1b7ae 100644 --- a/docs/user-guide/secrets/README.md +++ b/docs/user-guide/secrets/README.md @@ -33,7 +33,7 @@ Documentation for other releases can be found at # Secrets example -Following this example, you will create a [secret](../secrets.md) and a [pod](../pods.md) that consumes that secret in a [volume](../volumes.md). See [Secrets design document](../../design/secrets.md) for more information. +Following this example, you will create a [secret](../secrets.md) and a [pod](../pods.md) that consumes that secret in a [volume](../volumes.md). See [Secrets design document](../../design/secrets.md) for more information. ## Step Zero: Prerequisites @@ -83,7 +83,7 @@ $ kubectl create -f docs/user-guide/secrets/secret-pod.yaml ``` This pod runs a binary that displays the content of one of the pieces of secret data in the secret -volume: +volume: ```console $ kubectl logs secret-test-pod diff --git a/docs/user-guide/service-accounts.md b/docs/user-guide/service-accounts.md index 76745b7cfec..13da2cd53e1 100644 --- a/docs/user-guide/service-accounts.md +++ b/docs/user-guide/service-accounts.md @@ -35,7 +35,7 @@ Documentation for other releases can be found at A service account provides an identity for processes that run in a Pod. -*This is a user introduction to Service Accounts. See also the +*This is a user introduction to Service Accounts. See also the [Cluster Admin Guide to Service Accounts](../admin/service-accounts-admin.md).* *Note: This document describes how service accounts behave in a cluster set up @@ -111,7 +111,7 @@ field of a pod to the name of the service account you wish to use. The service account has to exist at the time the pod is created, or it will be rejected. -You cannot update the service account of an already created pod. +You cannot update the service account of an already created pod. You can clean up the service account from this example like this: diff --git a/docs/user-guide/simple-yaml.md b/docs/user-guide/simple-yaml.md index 1c9795724e1..cf5f0c4a1fa 100644 --- a/docs/user-guide/simple-yaml.md +++ b/docs/user-guide/simple-yaml.md @@ -65,7 +65,7 @@ spec: ``` [Download example](pod.yaml) - + You can see your cluster's pods: @@ -117,7 +117,7 @@ spec: ``` [Download example](replication.yaml) - + To delete the replication controller (and the pods it created): diff --git a/docs/user-guide/ui.md b/docs/user-guide/ui.md index 158be00469a..be671dc9861 100644 --- a/docs/user-guide/ui.md +++ b/docs/user-guide/ui.md @@ -33,7 +33,7 @@ Documentation for other releases can be found at # Kubernetes User Interface -Kubernetes has a web-based user interface that displays the current cluster state graphically. +Kubernetes has a web-based user interface that displays the current cluster state graphically. ## Accessing the UI @@ -50,34 +50,34 @@ Normally, this should be taken care of automatically by the [`kube-addons.sh`](h ## Using the UI -The Kubernetes UI can be used to introspect your current cluster, such as checking how resources are used, or looking at error messages. You cannot, however, use the UI to modify your cluster. +The Kubernetes UI can be used to introspect your current cluster, such as checking how resources are used, or looking at error messages. You cannot, however, use the UI to modify your cluster. -### Node Resource Usage +### Node Resource Usage -After accessing Kubernetes UI, you'll see a homepage dynamically listing out all nodes in your current cluster, with related information including internal IP addresses, CPU usage, memory usage, and file systems usage. +After accessing Kubernetes UI, you'll see a homepage dynamically listing out all nodes in your current cluster, with related information including internal IP addresses, CPU usage, memory usage, and file systems usage. ![Kubernetes UI home page](k8s-ui-overview.png) ### Dashboard Views -Click on the "Views" button in the top-right of the page to see other views available, which include: Explore, Pods, Nodes, Replication Controllers, Services, and Events. +Click on the "Views" button in the top-right of the page to see other views available, which include: Explore, Pods, Nodes, Replication Controllers, Services, and Events. -#### Explore View +#### Explore View -The "Explore" view allows your to see the pods, replication controllers, and services in current cluster easily. +The "Explore" view allows your to see the pods, replication controllers, and services in current cluster easily. ![Kubernetes UI Explore View](k8s-ui-explore.png) The "Group by" dropdown list allows you to group these resources by a number of factors, such as type, name, host, etc. ![Kubernetes UI Explore View - Group by](k8s-ui-explore-groupby.png) You can also create filters by clicking on the down triangle of any listed resource instances and choose which filters you want to add. ![Kubernetes UI Explore View - Filter](k8s-ui-explore-filter.png) -To see more details of each resource instance, simply click on it. +To see more details of each resource instance, simply click on it. ![Kubernetes UI - Pod](k8s-ui-explore-poddetail.png) ### Other Views -Other views (Pods, Nodes, Replication Controllers, Services, and Events) simply list information about each type of resource. You can also click on any instance for more details. +Other views (Pods, Nodes, Replication Controllers, Services, and Events) simply list information about each type of resource. You can also click on any instance for more details. ![Kubernetes UI - Nodes](k8s-ui-nodes.png) -## More Information +## More Information For more information, see the [Kubernetes UI development document](http://releases.k8s.io/HEAD/www/README.md) in the www directory. diff --git a/docs/user-guide/update-demo/README.md b/docs/user-guide/update-demo/README.md index 9265a6df7eb..8baacbfa393 100644 --- a/docs/user-guide/update-demo/README.md +++ b/docs/user-guide/update-demo/README.md @@ -49,7 +49,7 @@ limitations under the License. # Rolling update example -This example demonstrates the usage of Kubernetes to perform a [rolling update](../kubectl/kubectl_rolling-update.md) on a running group of [pods](../../../docs/user-guide/pods.md). See [here](../managing-deployments.md#updating-your-application-without-a-service-outage) to understand why you need a rolling update. Also check [rolling update design document](../../design/simple-rolling-update.md) for more information. +This example demonstrates the usage of Kubernetes to perform a [rolling update](../kubectl/kubectl_rolling-update.md) on a running group of [pods](../../../docs/user-guide/pods.md). See [here](../managing-deployments.md#updating-your-application-without-a-service-outage) to understand why you need a rolling update. Also check [rolling update design document](../../design/simple-rolling-update.md) for more information. ### Step Zero: Prerequisites diff --git a/docs/user-guide/walkthrough/README.md b/docs/user-guide/walkthrough/README.md index 62f298f4976..624c87bb88c 100644 --- a/docs/user-guide/walkthrough/README.md +++ b/docs/user-guide/walkthrough/README.md @@ -166,7 +166,7 @@ spec: ``` [Download example](pod-redis.yaml) - + Notes: - The volume mount name is a reference to a specific empty dir volume. diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index 297a4f69a70..fa416d513ab 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -87,7 +87,7 @@ spec: ``` [Download example](pod-nginx-with-label.yaml) - + Create the labeled pod ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)): @@ -143,7 +143,7 @@ spec: ``` [Download example](replication-controller.yaml) - + #### Replication Controller Management @@ -196,7 +196,7 @@ spec: ``` [Download example](service.yaml) - + #### Service Management @@ -312,7 +312,7 @@ spec: ``` [Download example](pod-with-http-healthcheck.yaml) - + For more information about health checking, see [Container Probes](../pod-states.md#container-probes). diff --git a/docs/user-guide/working-with-resources.md b/docs/user-guide/working-with-resources.md index 20a6a630ce8..437a7654b07 100644 --- a/docs/user-guide/working-with-resources.md +++ b/docs/user-guide/working-with-resources.md @@ -36,7 +36,7 @@ Documentation for other releases can be found at *This document is aimed at users who have worked through some of the examples, and who want to learn more about using kubectl to manage resources such as pods and services. Users who want to access the REST API directly, -and developers who want to extend the Kubernetes API should +and developers who want to extend the Kubernetes API should refer to the [api conventions](../devel/api-conventions.md) and the [api document](../api.md).* @@ -68,7 +68,7 @@ $ wc -l /tmp/original.yaml /tmp/current.yaml 60 total ``` -The resource we posted had only 9 lines, but the one we got back had 51 lines. +The resource we posted had only 9 lines, but the one we got back had 51 lines. If you `diff -u /tmp/original.yaml /tmp/current.yaml`, you can see the fields added to the pod. The system adds fields in several ways: - Some fields are added synchronously with creation of the resource and some are set asynchronously. diff --git a/docs/whatisk8s.md b/docs/whatisk8s.md index 524684250b2..e7c00bba21a 100644 --- a/docs/whatisk8s.md +++ b/docs/whatisk8s.md @@ -31,11 +31,11 @@ Documentation for other releases can be found at -# What is Kubernetes? +# What is Kubernetes? -Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts. +Kubernetes is an open-source platform for automating deployment, scaling, and operations of application containers across clusters of hosts. -With Kubernetes, you are able to quickly and efficiently respond to customer demand: +With Kubernetes, you are able to quickly and efficiently respond to customer demand: - Scale your applications on the fly. - Seamlessly roll out new features. @@ -54,27 +54,27 @@ The Kubernetes project was started by Google in 2014. Kubernetes builds upon a [
-Looking for reasons why you should be using [containers](http://aucouranton.com/2014/06/13/linux-containers-parallels-lxc-openvz-docker-and-more/)? +Looking for reasons why you should be using [containers](http://aucouranton.com/2014/06/13/linux-containers-parallels-lxc-openvz-docker-and-more/)? Here are some key points: -* **Application-centric management**: +* **Application-centric management**: Raises the level of abstraction from running an OS on virtual hardware to running an application on an OS using logical resources. This provides the simplicity of PaaS with the flexibility of IaaS and enables you to run much more than just [12-factor apps](http://12factor.net/). -* **Dev and Ops separation of concerns**: +* **Dev and Ops separation of concerns**: Provides separatation of build and deployment; therefore, decoupling applications from infrastructure. -* **Agile application creation and deployment**: +* **Agile application creation and deployment**: Increased ease and efficiency of container image creation compared to VM image use. -* **Continuous development, integration, and deployment**: +* **Continuous development, integration, and deployment**: Provides for reliable and frequent container image build and deployment with quick and easy rollbacks (due to image immutability). -* **Loosely coupled, distributed, elastic, liberated [micro-services](http://martinfowler.com/articles/microservices.html)**: +* **Loosely coupled, distributed, elastic, liberated [micro-services](http://martinfowler.com/articles/microservices.html)**: Applications are broken into smaller, independent pieces and can be deployed and managed dynamically -- not a fat monolithic stack running on one big single-purpose machine. -* **Environmental consistency across development, testing, and production**: +* **Environmental consistency across development, testing, and production**: Runs the same on a laptop as it does in the cloud. -* **Cloud and OS distribution portability**: +* **Cloud and OS distribution portability**: Runs on Ubuntu, RHEL, on-prem, or Google Container Engine, which makes sense for all environments: build, test, and production. -* **Resource isolation**: +* **Resource isolation**: Predictable application performance. -* **Resource utilization**: +* **Resource utilization**: High efficiency and density. diff --git a/examples/cassandra/README.md b/examples/cassandra/README.md index aa3d73f8df9..6183f0937c7 100644 --- a/examples/cassandra/README.md +++ b/examples/cassandra/README.md @@ -101,7 +101,7 @@ spec: ``` [Download example](cassandra-controller.yaml) - + There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later) @@ -132,7 +132,7 @@ spec: ``` [Download example](cassandra-service.yaml) - + The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. @@ -242,7 +242,7 @@ spec: ``` [Download example](cassandra-controller.yaml) - + Most of this replication controller definition is identical to the Cassandra pod definition above, it simply gives the resplication controller a recipe to use when it creates new Cassandra pods. The other differentiating parts are the ```selector``` attribute which contains the controller's selector query, and the ```replicas``` attribute which specifies the desired number of replicas, in this case 1. diff --git a/examples/celery-rabbitmq/README.md b/examples/celery-rabbitmq/README.md index 2c36469df0f..04cc646a53f 100644 --- a/examples/celery-rabbitmq/README.md +++ b/examples/celery-rabbitmq/README.md @@ -35,7 +35,7 @@ Documentation for other releases can be found at ## Introduction -Celery is an asynchronous task queue based on distributed message passing. It is used to create execution units (i.e. tasks) which are then executed on one or more worker nodes, either synchronously or asynchronously. +Celery is an asynchronous task queue based on distributed message passing. It is used to create execution units (i.e. tasks) which are then executed on one or more worker nodes, either synchronously or asynchronously. Celery is implemented in Python. @@ -82,7 +82,7 @@ spec: ``` [Download example](rabbitmq-service.yaml) - + To start the service, run: @@ -127,7 +127,7 @@ spec: ``` [Download example](rabbitmq-controller.yaml) - + Running `$ kubectl create -f examples/celery-rabbitmq/rabbitmq-controller.yaml` brings up a replication controller that ensures one pod exists which is running a RabbitMQ instance. @@ -168,7 +168,7 @@ spec: ``` [Download example](celery-controller.yaml) - + There are several things to point out here... @@ -239,7 +239,7 @@ spec: ``` [Download example](flower-service.yaml) - + It is marked as external (LoadBalanced). However on many platforms you will have to add an explicit firewall rule to open port 5555. On GCE this can be done with: @@ -249,7 +249,7 @@ On GCE this can be done with: ``` Please remember to delete the rule after you are done with the example (on GCE: `$ gcloud compute firewall-rules delete kubernetes-minion-5555`) - + To bring up the pods, run this command `$ kubectl create -f examples/celery-rabbitmq/flower-controller.yaml`. This controller is defined as so: @@ -280,7 +280,7 @@ spec: ``` [Download example](flower-controller.yaml) - + This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed through the service endpoint. This image uses the following command to start Flower: diff --git a/examples/elasticsearch/README.md b/examples/elasticsearch/README.md index adbab558b93..b9c9ffd04ff 100644 --- a/examples/elasticsearch/README.md +++ b/examples/elasticsearch/README.md @@ -34,7 +34,7 @@ Documentation for other releases can be found at # Elasticsearch for Kubernetes This directory contains the source for a Docker image that creates an instance -of [Elasticsearch](https://www.elastic.co/products/elasticsearch) 1.5.2 which can +of [Elasticsearch](https://www.elastic.co/products/elasticsearch) 1.5.2 which can be used to automatically form clusters when used with [replication controllers](../../docs/user-guide/replication-controller.md). This will not work with the library Elasticsearch image because multicast discovery will not find the other pod IPs needed to form a cluster. This @@ -93,7 +93,7 @@ spec: ``` [Download example](music-rc.yaml) - + The `CLUSTER_NAME` variable gives a name to the cluster and allows multiple separate clusters to exist in the same namespace. @@ -102,10 +102,10 @@ nodes that should participate in this cluster. For our example we specify `name= match all pods that have the label `name` set to the value `music-db`. The `NAMESPACE` variable identifies the namespace to be used to search for Elasticsearch pods and this should be the same as the namespace specified -for the replication controller (in this case `mytunes`). +for the replication controller (in this case `mytunes`). Before creating pods with the replication controller a secret containing the bearer authentication token -should be set up. +should be set up. @@ -120,7 +120,7 @@ data: ``` [Download example](apiserver-secret.yaml) - + Replace `NAMESPACE` with the actual namespace to be used and `TOKEN` with the basic64 encoded versions of the bearer token reported by `kubectl config view` e.g. @@ -163,7 +163,7 @@ replicationcontrollers/music-db ``` It's also useful to have a [service](../../docs/user-guide/services.md) with an load balancer for accessing the Elasticsearch -cluster. +cluster. @@ -186,7 +186,7 @@ spec: ``` [Download example](music-service.yaml) - + Let's create the service with an external load balancer: diff --git a/examples/glusterfs/README.md b/examples/glusterfs/README.md index f1cc96cfa46..b4ae9ddaf9a 100644 --- a/examples/glusterfs/README.md +++ b/examples/glusterfs/README.md @@ -59,7 +59,7 @@ Here is a snippet of [glusterfs-endpoints.json](glusterfs-endpoints.json), ``` -The "IP" field should be filled with the address of a node in the Glusterfs server cluster. In this example, it is fine to give any valid value (from 1 to 65535) to the "port" field. +The "IP" field should be filled with the address of a node in the Glusterfs server cluster. In this example, it is fine to give any valid value (from 1 to 65535) to the "port" field. Create the endpoints, @@ -90,11 +90,11 @@ The following *volume* spec in [glusterfs-pod.json](glusterfs-pod.json) illustra } ``` -The parameters are explained as the followings. +The parameters are explained as the followings. -- **endpoints** is endpoints name that represents a Gluster cluster configuration. *kubelet* is optimized to avoid mount storm, it will randomly pick one from the endpoints to mount. If this host is unresponsive, the next Gluster host in the endpoints is automatically selected. -- **path** is the Glusterfs volume name. -- **readOnly** is the boolean that sets the mountpoint readOnly or readWrite. +- **endpoints** is endpoints name that represents a Gluster cluster configuration. *kubelet* is optimized to avoid mount storm, it will randomly pick one from the endpoints to mount. If this host is unresponsive, the next Gluster host in the endpoints is automatically selected. +- **path** is the Glusterfs volume name. +- **readOnly** is the boolean that sets the mountpoint readOnly or readWrite. Create a pod that has a container using Glusterfs volume, diff --git a/examples/guestbook-go/README.md b/examples/guestbook-go/README.md index f603baa57cf..d9a6cac1b08 100644 --- a/examples/guestbook-go/README.md +++ b/examples/guestbook-go/README.md @@ -37,16 +37,16 @@ This example shows how to build a simple multi-tier web application using Kubern If you are running a cluster in Google Container Engine (GKE), instead see the [Guestbook Example for Google Container Engine](https://cloud.google.com/container-engine/docs/tutorials/guestbook). -##### Table of Contents +##### Table of Contents - * [Step Zero: Prerequisites](#step-zero) - * [Step One: Create the Redis master pod](#step-one) - * [Step Two: Create the Redis master service](#step-two) - * [Step Three: Create the Redis slave pods](#step-three) - * [Step Four: Create the Redis slave service](#step-four) - * [Step Five: Create the guestbook pods](#step-five) - * [Step Six: Create the guestbook service](#step-six) - * [Step Seven: View the guestbook](#step-seven) + * [Step Zero: Prerequisites](#step-zero) + * [Step One: Create the Redis master pod](#step-one) + * [Step Two: Create the Redis master service](#step-two) + * [Step Three: Create the Redis slave pods](#step-three) + * [Step Four: Create the Redis slave service](#step-four) + * [Step Five: Create the guestbook pods](#step-five) + * [Step Six: Create the guestbook service](#step-six) + * [Step Seven: View the guestbook](#step-seven) * [Step Eight: Cleanup](#step-eight) ### Step Zero: Prerequisites @@ -64,7 +64,7 @@ Use the `examples/guestbook-go/redis-master-controller.json` file to create a [r ```console $ kubectl create -f examples/guestbook-go/redis-master-controller.json replicationcontrollers/redis-master - ``` + ``` 2. To verify that the redis-master-controller is up, list all the replication controllers in the cluster with the `kubectl get rc` command: @@ -102,7 +102,7 @@ Use the `examples/guestbook-go/redis-master-controller.json` file to create a [r ### Step Two: Create the Redis master service -A Kubernetes '[service](../../docs/user-guide/services.md)' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables or DNS. +A Kubernetes '[service](../../docs/user-guide/services.md)' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables or DNS. Services find the containers to load balance based on pod labels. The pod that you created in Step One has the label `app=redis` and `role=master`. The selector field of the service determines which pods will receive the traffic sent to the service. @@ -179,7 +179,7 @@ Just like the master, we want to have a service to proxy connections to the read services/redis-slave ``` -2. To verify that the redis-slave service is up, list all the services in the cluster with the `kubectl get services` command: +2. To verify that the redis-slave service is up, list all the services in the cluster with the `kubectl get services` command: ```console $ kubectl get services @@ -189,7 +189,7 @@ Just like the master, we want to have a service to proxy connections to the read ... ``` - Result: The service is created with labels `app=redis` and `role=slave` to identify that the pods are running the Redis slaves. + Result: The service is created with labels `app=redis` and `role=slave` to identify that the pods are running the Redis slaves. Tip: It is helpful to set labels on your services themselves--as we've done here--to make it easy to locate them later. @@ -264,7 +264,7 @@ You can now play with the guestbook that you just created by opening it in a bro If you are running Kubernetes locally, to view the guestbook, navigate to `http://localhost:3000` in your browser. * **Remote Host:** - 1. To view the guestbook on a remote host, locate the external IP of the load balancer in the **IP** column of the `kubectl get services` output. In our example, the internal IP address is `10.0.217.218` and the external IP address is `146.148.81.8` (*Note: you might need to scroll to see the IP column*). + 1. To view the guestbook on a remote host, locate the external IP of the load balancer in the **IP** column of the `kubectl get services` output. In our example, the internal IP address is `10.0.217.218` and the external IP address is `146.148.81.8` (*Note: you might need to scroll to see the IP column*). 2. Append port `3000` to the IP address (for example `http://146.148.81.8:3000`), and then navigate to that address in your browser. diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index 0bceeed02fc..a742e588e26 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -101,7 +101,7 @@ spec: ``` [Download example](redis-master-controller.yaml) - + Change to the `/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running: @@ -222,7 +222,7 @@ spec: ``` [Download example](redis-master-service.yaml) - + Create the service by running: @@ -296,7 +296,7 @@ spec: ``` [Download example](redis-slave-controller.yaml) - + and create the replication controller by running: @@ -347,7 +347,7 @@ spec: ``` [Download example](redis-slave-service.yaml) - + This time the selector for the service is `name=redis-slave`, because that identifies the pods running redis slaves. It may also be helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "label=value"` command. @@ -398,7 +398,7 @@ spec: ``` [Download example](frontend-controller.yaml) - + Using this file, you can turn up your frontend with: @@ -501,7 +501,7 @@ spec: ``` [Download example](frontend-service.yaml) - + #### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) diff --git a/examples/hazelcast/README.md b/examples/hazelcast/README.md index 2e2a2f43e9d..ddb1d3ad61a 100644 --- a/examples/hazelcast/README.md +++ b/examples/hazelcast/README.md @@ -56,7 +56,7 @@ Source is freely available at: ### Simple Single Pod Hazelcast Node -In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. +In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. In this case, we shall not run a single Hazelcast pod, because the discovery mechanism now relies on a service definition. @@ -84,7 +84,7 @@ spec: ``` [Download example](hazelcast-service.yaml) - + The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. @@ -139,7 +139,7 @@ spec: ``` [Download example](hazelcast-controller.yaml) - + There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingle. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later). diff --git a/examples/iscsi/README.md b/examples/iscsi/README.md index 7d63edba7be..357dff02090 100644 --- a/examples/iscsi/README.md +++ b/examples/iscsi/README.md @@ -38,8 +38,8 @@ Documentation for other releases can be found at If you use Fedora 21 on Kubernetes node, then first install iSCSI initiator on the node: # yum -y install iscsi-initiator-utils - - + + then edit */etc/iscsi/initiatorname.iscsi* and */etc/iscsi/iscsid.conf* to match your iSCSI target configuration. I mostly followed these [instructions](http://www.server-world.info/en/note?os=Fedora_21&p=iscsi) to setup iSCSI target. and these [instructions](http://www.server-world.info/en/note?os=Fedora_21&p=iscsi&f=2) to setup iSCSI initiator. @@ -50,7 +50,7 @@ GCE does not provide preconfigured Fedora 21 image, so I set up the iSCSI target ## Step 2. Creating the pod with iSCSI persistent storage -Once you have installed iSCSI initiator and new Kubernetes, you can create a pod based on my example *iscsi.json*. In the pod JSON, you need to provide *targetPortal* (the iSCSI target's **IP** address and *port* if not the default port 3260), target's *iqn*, *lun*, and the type of the filesystem that has been created on the lun, and *readOnly* boolean. +Once you have installed iSCSI initiator and new Kubernetes, you can create a pod based on my example *iscsi.json*. In the pod JSON, you need to provide *targetPortal* (the iSCSI target's **IP** address and *port* if not the default port 3260), target's *iqn*, *lun*, and the type of the filesystem that has been created on the lun, and *readOnly* boolean. **Note:** If you have followed the instructions in the links above you may have partitioned the device, the iSCSI volume plugin does not diff --git a/examples/k8petstore/README.md b/examples/k8petstore/README.md index 72754a9daaa..c2b702d60e8 100644 --- a/examples/k8petstore/README.md +++ b/examples/k8petstore/README.md @@ -43,9 +43,9 @@ This is a follow up to the [Guestbook Example](../guestbook/README.md)'s [Go imp This application will run a web server which returns REDIS records for a petstore application. It is meant to simulate and test high load on Kubernetes or any other docker based system. -If you are new to Kubernetes, and you haven't run guestbook yet, +If you are new to Kubernetes, and you haven't run guestbook yet, -you might want to stop here and go back and run guestbook app first. +you might want to stop here and go back and run guestbook app first. The guestbook tutorial will teach you a lot about the basics of Kubernetes, and we've tried not to be redundant here. @@ -61,15 +61,15 @@ This project depends on three docker images which you can build for yourself and in your dockerhub "dockerhub-name". Since these images are already published under other parties like redis, jayunit100, and so on, -so you don't need to build the images to run the app. +so you don't need to build the images to run the app. If you do want to build the images, you will need to build and push the images in this repository. -For a list of those images, see the `build-and-push` shell script - it builds and pushes all the images for you, just +For a list of those images, see the `build-and-push` shell script - it builds and pushes all the images for you, just modify the dockerhub user name in it accordingly. -## Get started with the WEBAPP +## Get started with the WEBAPP The web app is written in Go, and borrowed from the original Guestbook example by brendan burns. @@ -87,13 +87,13 @@ If that is all working, you can finally run `k8petstore.sh` in any Kubernetes cl The web front end provides users an interface for watching pet store transactions in real time as they occur. -To generate those transactions, you can use the bigpetstore data generator. Alternatively, you could just write a +To generate those transactions, you can use the bigpetstore data generator. Alternatively, you could just write a shell script which calls "curl localhost:3000/k8petstore/rpush/blahblahblah" over and over again :). But thats not nearly -as fun, and its not a good test of a real world scenario where payloads scale and have lots of information content. +as fun, and its not a good test of a real world scenario where payloads scale and have lots of information content. -Similarly, you can locally run and test the data generator code, which is Java based, you can pull it down directly from +Similarly, you can locally run and test the data generator code, which is Java based, you can pull it down directly from apache bigtop. @@ -101,13 +101,13 @@ Directions for that are here : https://github.com/apache/bigtop/tree/master/bigt You will likely want to checkout the branch 2b2392bf135e9f1256bd0b930f05ae5aef8bbdcb, which is the exact commit which the current k8petstore was tested on. -## Now what? +## Now what? Once you have done the above 3 steps, you have a working, from source, locally runnable version of the k8petstore app, now, we can try to run it in Kubernetes. ## Hacking, testing, benchmarking -Once the app is running, you can access the app in your browser, you should see a chart +Once the app is running, you can access the app in your browser, you should see a chart and the k8petstore title page, as well as an indicator of transaction throughput, and so on. @@ -117,7 +117,7 @@ You can modify the HTML pages, add new REST paths to the Go app, and so on. Now that you are done hacking around on the app, you can run it in Kubernetes. To do this, you will want to rebuild the docker images (most likely, for the Go web-server app), but less likely for the other images which you are less likely to need to change. Then you will push those images to dockerhub. -Now, how to run the entire application in Kubernetes? +Now, how to run the entire application in Kubernetes? To simplify running this application, we have a single file, k8petstore.sh, which writes out json files on to disk. This allows us to have dynamic parameters, without needing to worry about managing multiple json files. @@ -127,13 +127,13 @@ So, to run this app in Kubernetes, simply run [The all in one k8petstore.sh shel Note that at the top of the script there are a few self explanatory parameters to set, among which the Public IPs parameter is where you can checkout the web ui (at $PUBLIC_IP:3000), which will show a plot and read outs of transaction throughput. -In the mean time, because the public IP will be deprecated in Kubernetes v1, we provide other 2 scripts k8petstore-loadbalancer.sh and k8petstore-nodeport.sh. As the names suggest, they rely on LoadBalancer and NodePort respectively. More details can be found [here](../../docs/user-guide/services.md#external-services). +In the mean time, because the public IP will be deprecated in Kubernetes v1, we provide other 2 scripts k8petstore-loadbalancer.sh and k8petstore-nodeport.sh. As the names suggest, they rely on LoadBalancer and NodePort respectively. More details can be found [here](../../docs/user-guide/services.md#external-services). ## Future -In the future, we plan to add cassandra support. Redis is a fabulous in memory data store, but it is not meant for truly available and resilient storage. +In the future, we plan to add cassandra support. Redis is a fabulous in memory data store, but it is not meant for truly available and resilient storage. -Thus we plan to add another tier of queueing, which empties the REDIS transactions into a cassandra store which persists. +Thus we plan to add another tier of queueing, which empties the REDIS transactions into a cassandra store which persists. ## Questions diff --git a/examples/k8petstore/bps-data-generator/README.md b/examples/k8petstore/bps-data-generator/README.md index 6e47189d255..e7cb87d2ac8 100644 --- a/examples/k8petstore/bps-data-generator/README.md +++ b/examples/k8petstore/bps-data-generator/README.md @@ -35,7 +35,7 @@ Documentation for other releases can be found at This container is maintained as part of the apache bigtop project. -To create it, simply +To create it, simply `git clone https://github.com/apache/bigtop` @@ -43,7 +43,7 @@ and checkout the last exact version (will be updated periodically). `git checkout -b aNewBranch 2b2392bf135e9f1256bd0b930f05ae5aef8bbdcb` -then, cd to bigtop-bigpetstore/bigpetstore-transaction-queue, and run the docker file, i.e. +then, cd to bigtop-bigpetstore/bigpetstore-transaction-queue, and run the docker file, i.e. `Docker build -t -i jayunit100/bps-transaction-queue`. diff --git a/examples/mysql-wordpress-pd/README.md b/examples/mysql-wordpress-pd/README.md index 46d6f29b29a..968796cd846 100644 --- a/examples/mysql-wordpress-pd/README.md +++ b/examples/mysql-wordpress-pd/README.md @@ -132,7 +132,7 @@ spec: ``` [Download example](mysql.yaml) - + Note that we've defined a volume mount for `/var/lib/mysql`, and specified a volume that uses the persistent disk (`mysql-disk`) that you created. Once you've edited the file to set your database password, create the pod as follows, where `` is the path to your Kubernetes installation: @@ -187,7 +187,7 @@ spec: ``` [Download example](mysql-service.yaml) - + Start the service like this: @@ -242,7 +242,7 @@ spec: ``` [Download example](wordpress.yaml) - + Create the pod: @@ -283,7 +283,7 @@ spec: ``` [Download example](wordpress-service.yaml) - + Note the `type: LoadBalancer` setting. This will set up the wordpress service behind an external IP. Note also that we've set the service port to 80. We'll return to that shortly. diff --git a/examples/phabricator/README.md b/examples/phabricator/README.md index ffab31d04df..01a458e6dd8 100644 --- a/examples/phabricator/README.md +++ b/examples/phabricator/README.md @@ -99,7 +99,7 @@ To start Phabricator server use the file [`examples/phabricator/phabricator-cont ``` [Download example](phabricator-controller.json) - + Create the phabricator pod in your Kubernetes cluster by running: @@ -189,7 +189,7 @@ To automate this process and make sure that a proper host is authorized even if ``` [Download example](authenticator-controller.json) - + To create the pod run: @@ -238,7 +238,7 @@ Use the file [`examples/phabricator/phabricator-service.json`](phabricator-servi ``` [Download example](phabricator-service.json) - + To create the service run: diff --git a/examples/rbd/README.md b/examples/rbd/README.md index 17cbe6fdff1..0c5ff15e7ff 100644 --- a/examples/rbd/README.md +++ b/examples/rbd/README.md @@ -36,16 +36,16 @@ Documentation for other releases can be found at Install Ceph on the Kubernetes host. For example, on Fedora 21 # yum -y install ceph - + If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/rootfs/docker-ceph) - + Then get the keyring from the Ceph cluster and copy it to */etc/ceph/keyring*. Once you have installed Ceph and new Kubernetes, you can create a pod based on my examples [rbd.json](rbd.json) [rbd-with-secret.json](rbd-with-secret.json). In the pod JSON, you need to provide the following information. - *monitors*: Ceph monitors. - *pool*: The name of the RADOS pool, if not provided, default *rbd* pool is used. -- *image*: The image name that rbd has created. +- *image*: The image name that rbd has created. - *user*: The RADOS user name. If not provided, default *admin* is used. - *keyring*: The path to the keyring file. If not provided, default */etc/ceph/keyring* is used. - *secretName*: The name of the authentication secrets. If provided, *secretName* overrides *keyring*. Note, see below about how to create a secret. @@ -58,7 +58,7 @@ If Ceph authentication secret is provided, the secret should be first be base64 ```console # kubectl create -f examples/rbd/secret/ceph-secret.yaml -``` +``` # Get started diff --git a/examples/rethinkdb/README.md b/examples/rethinkdb/README.md index c70f4ffb31d..2efec300bec 100644 --- a/examples/rethinkdb/README.md +++ b/examples/rethinkdb/README.md @@ -130,7 +130,7 @@ We request for an external load balancer in the [admin-service.yaml](admin-servi type: LoadBalancer ``` -The external load balancer allows us to access the service from outside via an external IP, which is 104.197.19.120 in this case. +The external load balancer allows us to access the service from outside via an external IP, which is 104.197.19.120 in this case. Note that you may need to create a firewall rule to allow the traffic, assuming you are using Google Compute Engine: diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 9558edf2255..9e41c0a17cd 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -115,6 +115,9 @@ GCE_PARALLEL_FLAKY_TESTS=( "PD" "ServiceAccounts" "Service\sendpoints\slatency" + "Service.*basic\sendpoint" + "Probing.*should\snot.*initial\sdelay" + "Service.*multiport\sendpoint" "Services.*change\sthe\stype" "Services.*functioning\sexternal\sload\sbalancer" "Services.*identically\snamed" diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 5ece3132474..f0fe3bd1be8 100644 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -125,7 +125,6 @@ readonly KUBE_STATIC_LIBRARIES=( kube-apiserver kube-controller-manager kube-scheduler - hyperkube ) kube::golang::is_statically_linked_library() { diff --git a/hack/lib/version.sh b/hack/lib/version.sh index 1e02d7eaaf9..36c012db9b5 100644 --- a/hack/lib/version.sh +++ b/hack/lib/version.sh @@ -50,6 +50,13 @@ kube::version::get_version_vars() { # Use git describe to find the version based on annotated tags. if [[ -n ${KUBE_GIT_VERSION-} ]] || KUBE_GIT_VERSION=$("${git[@]}" describe --tags --abbrev=14 "${KUBE_GIT_COMMIT}^{commit}" 2>/dev/null); then + # This translates the "git describe" to an actual semver.org + # compatible semantic version that looks something like this: + # v1.1.0-alpha.0.6+84c76d1142ea4d + # + # TODO: We continue calling this "git version" because so many + # downstream consumers are expecting it there. + KUBE_GIT_VERSION=$(echo "${KUBE_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{14\}\)$/.\1\+\2/") if [[ "${KUBE_GIT_TREE_STATE}" == "dirty" ]]; then # git describe --dirty only considers changes to existing files, but # that is problematic since new untracked .go files affect the build, @@ -57,6 +64,7 @@ kube::version::get_version_vars() { KUBE_GIT_VERSION+="-dirty" fi + # Try to match the "git describe" output to a regex to try to extract # the "major" and "minor" versions and whether this is the exact tagged # version or whether the tree is between two tagged versions. diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 189cdab8f88..19bff6c19ed 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -90,6 +90,9 @@ CTLRMGR_PORT=${CTLRMGR_PORT:-10252} PROXY_PORT=${PROXY_PORT:-8001} PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. +# ensure ~/.kube/config isn't loaded by tests +HOME="${KUBE_TEMP}" + # Check kubectl kube::log::status "Running kubectl with no options" "${KUBE_OUTPUT_HOSTBIN}/kubectl" @@ -614,6 +617,17 @@ __EOF__ # Post-condition: 3 replicas kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' + ### Scale multiple replication controllers + kubectl create -f examples/guestbook/redis-master-controller.yaml "${kube_flags[@]}" + kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}" + # Command + kubectl scale rc/redis-master rc/redis-slave --replicas=4 + # Post-condition: 4 replicas each + kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4' + kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4' + # Clean-up + kubectl delete rc redis-{master,slave} "${kube_flags[@]}" + ### Expose replication controller as service # Pre-condition: 3 replicas kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3' diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index c4e083c068e..5edf631e181 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -1200,6 +1200,18 @@ func deepCopy_api_Pod(in Pod, out *Pod, c *conversion.Cloner) error { return nil } +func deepCopy_api_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { + if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + func deepCopy_api_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status @@ -2144,6 +2156,7 @@ func init() { deepCopy_api_PersistentVolumeSpec, deepCopy_api_PersistentVolumeStatus, deepCopy_api_Pod, + deepCopy_api_PodAttachOptions, deepCopy_api_PodCondition, deepCopy_api_PodExecOptions, deepCopy_api_PodList, diff --git a/pkg/api/errors/etcd/etcd.go b/pkg/api/errors/etcd/etcd.go index 4a6656c74ef..3b85aaffcd3 100644 --- a/pkg/api/errors/etcd/etcd.go +++ b/pkg/api/errors/etcd/etcd.go @@ -18,14 +18,14 @@ package etcd import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" ) // InterpretGetError converts a generic etcd error on a retrieval // operation into the appropriate API error. func InterpretGetError(err error, kind, name string) error { switch { - case tools.IsEtcdNotFound(err): + case etcdstorage.IsEtcdNotFound(err): return errors.NewNotFound(kind, name) default: return err @@ -36,7 +36,7 @@ func InterpretGetError(err error, kind, name string) error { // operation into the appropriate API error. func InterpretCreateError(err error, kind, name string) error { switch { - case tools.IsEtcdNodeExist(err): + case etcdstorage.IsEtcdNodeExist(err): return errors.NewAlreadyExists(kind, name) default: return err @@ -47,7 +47,7 @@ func InterpretCreateError(err error, kind, name string) error { // operation into the appropriate API error. func InterpretUpdateError(err error, kind, name string) error { switch { - case tools.IsEtcdTestFailed(err), tools.IsEtcdNodeExist(err): + case etcdstorage.IsEtcdTestFailed(err), etcdstorage.IsEtcdNodeExist(err): return errors.NewConflict(kind, name, err) default: return err @@ -58,7 +58,7 @@ func InterpretUpdateError(err error, kind, name string) error { // operation into the appropriate API error. func InterpretDeleteError(err error, kind, name string) error { switch { - case tools.IsEtcdNotFound(err): + case etcdstorage.IsEtcdNotFound(err): return errors.NewNotFound(kind, name) default: return err diff --git a/pkg/api/latest/latest.go b/pkg/api/latest/latest.go index 37edae708af..c08370fdf9c 100644 --- a/pkg/api/latest/latest.go +++ b/pkg/api/latest/latest.go @@ -63,6 +63,8 @@ var RESTMapper meta.RESTMapper // userResources is a group of resources mostly used by a kubectl user var userResources = []string{"rc", "svc", "pods", "pvc"} +const importPrefix = "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + func init() { // Use the first API version in the list of registered versions as the latest. Version = registered.RegisteredVersions[0] @@ -75,28 +77,14 @@ func init() { Versions = append(Versions, versions[i]) } - mapper := meta.NewDefaultRESTMapper( - versions, - func(version string) (*meta.VersionInterfaces, bool) { - interfaces, err := InterfacesFor(version) - if err != nil { - return nil, false - } - return interfaces, true - }, - ) - // the list of kinds that are scoped at the root of the api hierarchy // if a kind is not enumerated here, it is assumed to have a namespace scope - kindToRootScope := map[string]bool{ - "Node": true, - "Minion": true, - "Namespace": true, - "PersistentVolume": true, - } - - // setup aliases for groups of resources - mapper.AddResourceAlias("all", userResources...) + rootScoped := util.NewStringSet( + "Node", + "Minion", + "Namespace", + "PersistentVolume", + ) // these kinds should be excluded from the list of resources ignoredKinds := util.NewStringSet( @@ -107,20 +95,11 @@ func init() { "PodExecOptions", "PodProxyOptions") - // enumerate all supported versions, get the kinds, and register with the mapper how to address our resources. - for _, version := range versions { - for kind := range api.Scheme.KnownTypes(version) { - if ignoredKinds.Has(kind) { - continue - } - scope := meta.RESTScopeNamespace - if kindToRootScope[kind] { - scope = meta.RESTScopeRoot - } - mapper.Add(scope, kind, version, false) - } - } + mapper := api.NewDefaultRESTMapper(versions, InterfacesFor, importPrefix, ignoredKinds, rootScoped) + // setup aliases for groups of resources + mapper.AddResourceAlias("all", userResources...) RESTMapper = mapper + api.RegisterRESTMapper(RESTMapper) } // InterfacesFor returns the default Codec and ResourceVersioner for a given version diff --git a/pkg/api/mapper.go b/pkg/api/mapper.go new file mode 100644 index 00000000000..4cc3dd34473 --- /dev/null +++ b/pkg/api/mapper.go @@ -0,0 +1,57 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" +) + +var RESTMapper meta.RESTMapper + +func init() { + RESTMapper = meta.MultiRESTMapper{} +} + +func RegisterRESTMapper(m meta.RESTMapper) { + RESTMapper = append(RESTMapper.(meta.MultiRESTMapper), m) +} + +func NewDefaultRESTMapper(versions []string, interfacesFunc meta.VersionInterfacesFunc, importPathPrefix string, + ignoredKinds, rootScoped util.StringSet) *meta.DefaultRESTMapper { + + mapper := meta.NewDefaultRESTMapper(versions, interfacesFunc) + // enumerate all supported versions, get the kinds, and register with the mapper how to address our resources. + for _, version := range versions { + for kind, oType := range Scheme.KnownTypes(version) { + // TODO: Remove import path prefix check. + // We check the import path prefix because we currently stuff both "api" and "experimental" objects + // into the same group within Scheme since Scheme has no notion of groups yet. + if !strings.HasPrefix(oType.PkgPath(), importPathPrefix) || ignoredKinds.Has(kind) { + continue + } + scope := meta.RESTScopeNamespace + if rootScoped.Has(kind) { + scope = meta.RESTScopeRoot + } + mapper.Add(scope, kind, version, false) + } + } + return mapper +} diff --git a/pkg/api/meta/restmapper.go b/pkg/api/meta/restmapper.go index 3970c365b67..05f62be969d 100644 --- a/pkg/api/meta/restmapper.go +++ b/pkg/api/meta/restmapper.go @@ -83,8 +83,8 @@ type DefaultRESTMapper struct { } // VersionInterfacesFunc returns the appropriate codec, typer, and metadata accessor for a -// given api version, or false if no such api version exists. -type VersionInterfacesFunc func(apiVersion string) (*VersionInterfaces, bool) +// given api version, or an error if no such api version exists. +type VersionInterfacesFunc func(apiVersion string) (*VersionInterfaces, error) // NewDefaultRESTMapper initializes a mapping between Kind and APIVersion // to a resource name and back based on the objects in a runtime.Scheme @@ -226,8 +226,8 @@ func (m *DefaultRESTMapper) RESTMapping(kind string, versions ...string) (*RESTM return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", version, kind) } - interfaces, ok := m.interfacesFunc(version) - if !ok { + interfaces, err := m.interfacesFunc(version) + if err != nil { return nil, fmt.Errorf("the provided version %q has no relevant versions", version) } diff --git a/pkg/api/meta/restmapper_test.go b/pkg/api/meta/restmapper_test.go index 954f69c7591..e78e7ef232f 100644 --- a/pkg/api/meta/restmapper_test.go +++ b/pkg/api/meta/restmapper_test.go @@ -17,6 +17,7 @@ limitations under the License. package meta import ( + "errors" "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" @@ -54,12 +55,14 @@ var validCodec = fakeCodec{} var validAccessor = resourceAccessor{} var validConvertor = fakeConvertor{} -func fakeInterfaces(version string) (*VersionInterfaces, bool) { - return &VersionInterfaces{Codec: validCodec, ObjectConvertor: validConvertor, MetadataAccessor: validAccessor}, true +func fakeInterfaces(version string) (*VersionInterfaces, error) { + return &VersionInterfaces{Codec: validCodec, ObjectConvertor: validConvertor, MetadataAccessor: validAccessor}, nil } -func unmatchedVersionInterfaces(version string) (*VersionInterfaces, bool) { - return nil, false +var unmatchedErr = errors.New("no version") + +func unmatchedVersionInterfaces(version string) (*VersionInterfaces, error) { + return nil, unmatchedErr } func TestRESTMapperVersionAndKindForResource(t *testing.T) { diff --git a/pkg/api/register.go b/pkg/api/register.go index e0ecfb38e18..675f9e5e618 100644 --- a/pkg/api/register.go +++ b/pkg/api/register.go @@ -59,6 +59,7 @@ func init() { &PersistentVolumeClaimList{}, &DeleteOptions{}, &ListOptions{}, + &PodAttachOptions{}, &PodLogOptions{}, &PodExecOptions{}, &PodProxyOptions{}, @@ -106,6 +107,7 @@ func (*PersistentVolumeClaim) IsAnAPIObject() {} func (*PersistentVolumeClaimList) IsAnAPIObject() {} func (*DeleteOptions) IsAnAPIObject() {} func (*ListOptions) IsAnAPIObject() {} +func (*PodAttachOptions) IsAnAPIObject() {} func (*PodLogOptions) IsAnAPIObject() {} func (*PodExecOptions) IsAnAPIObject() {} func (*PodProxyOptions) IsAnAPIObject() {} diff --git a/pkg/api/types.go b/pkg/api/types.go index 744b2100c17..bf46ac4f1fb 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -1517,6 +1517,27 @@ type PodLogOptions struct { Previous bool } +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + TypeMeta `json:",inline"` + + // Stdin if true indicates that stdin is to be redirected for the attach call + Stdin bool `json:"stdin,omitempty"` + + // Stdout if true indicates that stdout is to be redirected for the attach call + Stdout bool `json:"stdout,omitempty"` + + // Stderr if true indicates that stderr is to be redirected for the attach call + Stderr bool `json:"stderr,omitempty"` + + // TTY if true indicates that a tty will be allocated for the attach call + TTY bool `json:"tty,omitempty"` + + // Container to attach to. + Container string `json:"container,omitempty"` +} + // PodExecOptions is the query options to a Pod's remote exec call type PodExecOptions struct { TypeMeta diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index 52cdbe805f6..8fa007c32f3 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -1392,6 +1392,21 @@ func convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error return nil } +func convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *PodAttachOptions, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*api.PodAttachOptions))(in) + } + if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + func convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *PodCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.PodCondition))(in) @@ -3642,6 +3657,21 @@ func convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error return nil } +func convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { + if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { + defaulting.(func(*PodAttachOptions))(in) + } + if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { + return err + } + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + func convert_v1_PodCondition_To_api_PodCondition(in *PodCondition, out *api.PodCondition, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*PodCondition))(in) @@ -4595,6 +4625,7 @@ func init() { convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, convert_api_PersistentVolume_To_v1_PersistentVolume, + convert_api_PodAttachOptions_To_v1_PodAttachOptions, convert_api_PodCondition_To_v1_PodCondition, convert_api_PodExecOptions_To_v1_PodExecOptions, convert_api_PodList_To_v1_PodList, @@ -4707,6 +4738,7 @@ func init() { convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, convert_v1_PersistentVolume_To_api_PersistentVolume, + convert_v1_PodAttachOptions_To_api_PodAttachOptions, convert_v1_PodCondition_To_api_PodCondition, convert_v1_PodExecOptions_To_api_PodExecOptions, convert_v1_PodList_To_api_PodList, diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index 91c9b8d3f68..3575a7c25b5 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -1203,6 +1203,18 @@ func deepCopy_v1_Pod(in Pod, out *Pod, c *conversion.Cloner) error { return nil } +func deepCopy_v1_PodAttachOptions(in PodAttachOptions, out *PodAttachOptions, c *conversion.Cloner) error { + if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { + return err + } + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + func deepCopy_v1_PodCondition(in PodCondition, out *PodCondition, c *conversion.Cloner) error { out.Type = in.Type out.Status = in.Status @@ -2152,6 +2164,7 @@ func init() { deepCopy_v1_PersistentVolumeSpec, deepCopy_v1_PersistentVolumeStatus, deepCopy_v1_Pod, + deepCopy_v1_PodAttachOptions, deepCopy_v1_PodCondition, deepCopy_v1_PodExecOptions, deepCopy_v1_PodList, diff --git a/pkg/api/v1/register.go b/pkg/api/v1/register.go index 082ac87897d..e497f6ed4a3 100644 --- a/pkg/api/v1/register.go +++ b/pkg/api/v1/register.go @@ -74,6 +74,7 @@ func addKnownTypes() { &PersistentVolumeClaimList{}, &DeleteOptions{}, &ListOptions{}, + &PodAttachOptions{}, &PodLogOptions{}, &PodExecOptions{}, &PodProxyOptions{}, @@ -121,6 +122,7 @@ func (*PersistentVolumeClaim) IsAnAPIObject() {} func (*PersistentVolumeClaimList) IsAnAPIObject() {} func (*DeleteOptions) IsAnAPIObject() {} func (*ListOptions) IsAnAPIObject() {} +func (*PodAttachOptions) IsAnAPIObject() {} func (*PodLogOptions) IsAnAPIObject() {} func (*PodExecOptions) IsAnAPIObject() {} func (*PodProxyOptions) IsAnAPIObject() {} diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index 7b115f490e1..99c92a6d5ce 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -1475,6 +1475,28 @@ type PodLogOptions struct { Previous bool `json:"previous,omitempty" description:"return previous terminated container logs; defaults to false"` } +// PodAttachOptions is the query options to a Pod's remote attach call +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +type PodAttachOptions struct { + TypeMeta `json:",inline"` + + // Stdin if true indicates that stdin is to be redirected for the attach call + Stdin bool `json:"stdin,omitempty" description:"redirect the standard input stream of the pod for this call; defaults to false"` + + // Stdout if true indicates that stdout is to be redirected for the attach call + Stdout bool `json:"stdout,omitempty" description:"redirect the standard output stream of the pod for this call; defaults to true"` + + // Stderr if true indicates that stderr is to be redirected for the attach call + Stderr bool `json:"stderr,omitempty" description:"redirect the standard error stream of the pod for this call; defaults to true"` + + // TTY if true indicates that a tty will be allocated for the attach call, this is passed through to the container runtime so the tty + // is allocated on the worker node by the container runtime. + TTY bool `json:"tty,omitempty" description:"allocate a terminal for this attach call; defaults to false"` + + // Container to attach to. + Container string `json:"container,omitempty" description:"the container in which to execute the command. Defaults to only container if there is only one container in the pod."` +} + // PodExecOptions is the query options to a Pod's remote exec call type PodExecOptions struct { TypeMeta `json:",inline"` diff --git a/pkg/api/validation/schema.go b/pkg/api/validation/schema.go index 866af396e5f..daec08f1987 100644 --- a/pkg/api/validation/schema.go +++ b/pkg/api/validation/schema.go @@ -114,7 +114,7 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, apiVersion, fieldName, t if len(fieldName) > 0 { fieldName = fieldName + "." } - //handle required fields + // handle required fields for _, requiredKey := range model.Required { if _, ok := fields[requiredKey]; !ok { allErrs = append(allErrs, fmt.Errorf("field %s: is required", requiredKey)) @@ -123,10 +123,7 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, apiVersion, fieldName, t for key, value := range fields { details, ok := properties.At(key) if !ok { - glog.Infof("unknown field: %s", key) - // Some properties can be missing because of - // https://github.com/GoogleCloudPlatform/kubernetes/issues/6842. - glog.Info("this may be a false alarm, see https://github.com/GoogleCloudPlatform/kubernetes/issues/6842") + allErrs = append(allErrs, fmt.Errorf("found invalid field %s for %s", key, typeName)) continue } if details.Type == nil && details.Ref == nil { diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 9b01efe127e..6ace35819d9 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -88,16 +88,7 @@ func interfacesFor(version string) (*meta.VersionInterfaces, error) { } func newMapper() *meta.DefaultRESTMapper { - return meta.NewDefaultRESTMapper( - versions, - func(version string) (*meta.VersionInterfaces, bool) { - interfaces, err := interfacesFor(version) - if err != nil { - return nil, false - } - return interfaces, true - }, - ) + return meta.NewDefaultRESTMapper(versions, interfacesFor) } func addTestTypes() { diff --git a/pkg/apiserver/authn.go b/pkg/apiserver/authn.go index eb71b372957..d40aa3eb0e8 100644 --- a/pkg/apiserver/authn.go +++ b/pkg/apiserver/authn.go @@ -22,7 +22,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authenticator" "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authenticator/bearertoken" "github.com/GoogleCloudPlatform/kubernetes/pkg/serviceaccount" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/auth/authenticator/password/passwordfile" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/auth/authenticator/request/basicauth" @@ -32,7 +32,7 @@ import ( ) // NewAuthenticator returns an authenticator.Request or an error -func NewAuthenticator(basicAuthFile, clientCAFile, tokenFile, serviceAccountKeyFile string, serviceAccountLookup bool, storage tools.StorageInterface) (authenticator.Request, error) { +func NewAuthenticator(basicAuthFile, clientCAFile, tokenFile, serviceAccountKeyFile string, serviceAccountLookup bool, storage storage.Interface) (authenticator.Request, error) { var authenticators []authenticator.Request if len(basicAuthFile) > 0 { @@ -104,7 +104,7 @@ func newAuthenticatorFromTokenFile(tokenAuthFile string) (authenticator.Request, } // newServiceAccountAuthenticator returns an authenticator.Request or an error -func newServiceAccountAuthenticator(keyfile string, lookup bool, storage tools.StorageInterface) (authenticator.Request, error) { +func newServiceAccountAuthenticator(keyfile string, lookup bool, storage storage.Interface) (authenticator.Request, error) { publicKey, err := serviceaccount.ReadPublicKey(keyfile) if err != nil { return nil, err diff --git a/pkg/apiserver/errors.go b/pkg/apiserver/errors.go index e234f0472a2..503d45ac5ba 100644 --- a/pkg/apiserver/errors.go +++ b/pkg/apiserver/errors.go @@ -21,7 +21,7 @@ import ( "net/http" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) @@ -52,7 +52,7 @@ func errToAPIStatus(err error) *api.Status { status := http.StatusInternalServerError switch { //TODO: replace me with NewConflictErr - case tools.IsEtcdTestFailed(err): + case etcdstorage.IsEtcdTestFailed(err): status = http.StatusConflict } // Log errors that were not converted to an error status diff --git a/pkg/client/remotecommand/remotecommand.go b/pkg/client/remotecommand/remotecommand.go index 09ce4317ff3..666b7065142 100644 --- a/pkg/client/remotecommand/remotecommand.go +++ b/pkg/client/remotecommand/remotecommand.go @@ -25,6 +25,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/queryparams" + "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/httpstream/spdy" "github.com/golang/glog" @@ -40,32 +41,74 @@ func (u *defaultUpgrader) upgrade(req *client.Request, config *client.Config) (h return req.Upgrade(config, spdy.NewRoundTripper) } -// Executor executes a command on a pod container -type Executor struct { - req *client.Request - config *client.Config - command []string - stdin io.Reader - stdout io.Writer - stderr io.Writer - tty bool +type Streamer struct { + req *client.Request + config *client.Config + stdin io.Reader + stdout io.Writer + stderr io.Writer + tty bool upgrader upgrader } +// Executor executes a command on a pod container +type Executor struct { + Streamer + command []string +} + // New creates a new RemoteCommandExecutor func New(req *client.Request, config *client.Config, command []string, stdin io.Reader, stdout, stderr io.Writer, tty bool) *Executor { return &Executor{ - req: req, - config: config, command: command, - stdin: stdin, - stdout: stdout, - stderr: stderr, - tty: tty, + Streamer: Streamer{ + req: req, + config: config, + stdin: stdin, + stdout: stdout, + stderr: stderr, + tty: tty, + }, } } +type Attach struct { + Streamer +} + +// NewAttach creates a new RemoteAttach +func NewAttach(req *client.Request, config *client.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) *Attach { + return &Attach{ + Streamer: Streamer{ + req: req, + config: config, + stdin: stdin, + stdout: stdout, + stderr: stderr, + tty: tty, + }, + } +} + +// Execute sends a remote command execution request, upgrading the +// connection and creating streams to represent stdin/stdout/stderr. Data is +// copied between these streams and the supplied stdin/stdout/stderr parameters. +func (e *Attach) Execute() error { + opts := api.PodAttachOptions{ + Stdin: (e.stdin != nil), + Stdout: (e.stdout != nil), + Stderr: (!e.tty && e.stderr != nil), + TTY: e.tty, + } + + if err := e.setupRequestParameters(&opts); err != nil { + return err + } + + return e.doStream() +} + // Execute sends a remote command execution request, upgrading the // connection and creating streams to represent stdin/stdout/stderr. Data is // copied between these streams and the supplied stdin/stdout/stderr parameters. @@ -78,7 +121,15 @@ func (e *Executor) Execute() error { Command: e.command, } - versioned, err := api.Scheme.ConvertToVersion(&opts, e.config.Version) + if err := e.setupRequestParameters(&opts); err != nil { + return err + } + + return e.doStream() +} + +func (e *Streamer) setupRequestParameters(obj runtime.Object) error { + versioned, err := api.Scheme.ConvertToVersion(obj, e.config.Version) if err != nil { return err } @@ -91,7 +142,10 @@ func (e *Executor) Execute() error { e.req.Param(k, vv) } } + return nil +} +func (e *Streamer) doStream() error { if e.upgrader == nil { e.upgrader = &defaultUpgrader{} } @@ -134,7 +188,7 @@ func (e *Executor) Execute() error { }() defer errorStream.Reset() - if opts.Stdin { + if e.stdin != nil { headers.Set(api.StreamType, api.StreamTypeStdin) remoteStdin, err := conn.CreateStream(headers) if err != nil { @@ -151,7 +205,7 @@ func (e *Executor) Execute() error { waitCount := 0 completedStreams := 0 - if opts.Stdout { + if e.stdout != nil { waitCount++ headers.Set(api.StreamType, api.StreamTypeStdout) remoteStdout, err := conn.CreateStream(headers) @@ -162,7 +216,7 @@ func (e *Executor) Execute() error { go cp(api.StreamTypeStdout, e.stdout, remoteStdout) } - if opts.Stderr && !e.tty { + if e.stderr != nil && !e.tty { waitCount++ headers.Set(api.StreamType, api.StreamTypeStderr) remoteStderr, err := conn.CreateStream(headers) diff --git a/pkg/client/remotecommand/remotecommand_test.go b/pkg/client/remotecommand/remotecommand_test.go index 72f96d44b23..9e247fe37d2 100644 --- a/pkg/client/remotecommand/remotecommand_test.go +++ b/pkg/client/remotecommand/remotecommand_test.go @@ -190,3 +190,80 @@ func TestRequestExecuteRemoteCommand(t *testing.T) { server.Close() } } + +// TODO: this test is largely cut and paste, refactor to share code +func TestRequestAttachRemoteCommand(t *testing.T) { + testCases := []struct { + Stdin string + Stdout string + Stderr string + Error string + Tty bool + }{ + { + Error: "bail", + }, + { + Stdin: "a", + Stdout: "b", + Stderr: "c", + }, + { + Stdin: "a", + Stdout: "b", + Tty: true, + }, + } + + for i, testCase := range testCases { + localOut := &bytes.Buffer{} + localErr := &bytes.Buffer{} + + server := httptest.NewServer(fakeExecServer(t, i, testCase.Stdin, testCase.Stdout, testCase.Stderr, testCase.Error, testCase.Tty)) + + url, _ := url.ParseRequestURI(server.URL) + c := client.NewRESTClient(url, "x", nil, -1, -1) + req := c.Post().Resource("testing") + + conf := &client.Config{ + Host: server.URL, + } + e := NewAttach(req, conf, strings.NewReader(testCase.Stdin), localOut, localErr, testCase.Tty) + //e.upgrader = testCase.Upgrader + err := e.Execute() + hasErr := err != nil + + if len(testCase.Error) > 0 { + if !hasErr { + t.Errorf("%d: expected an error", i) + } else { + if e, a := testCase.Error, err.Error(); !strings.Contains(a, e) { + t.Errorf("%d: expected error stream read '%v', got '%v'", i, e, a) + } + } + + server.Close() + continue + } + + if hasErr { + t.Errorf("%d: unexpected error: %v", i, err) + server.Close() + continue + } + + if len(testCase.Stdout) > 0 { + if e, a := testCase.Stdout, localOut; e != a.String() { + t.Errorf("%d: expected stdout data '%s', got '%s'", i, e, a) + } + } + + if testCase.Stderr != "" { + if e, a := testCase.Stderr, localErr; e != a.String() { + t.Errorf("%d: expected stderr data '%s', got '%s'", i, e, a) + } + } + + server.Close() + } +} diff --git a/pkg/cloudprovider/aws/aws_routes.go b/pkg/cloudprovider/aws/aws_routes.go index 3666fcab94a..cca1eb0b858 100644 --- a/pkg/cloudprovider/aws/aws_routes.go +++ b/pkg/cloudprovider/aws/aws_routes.go @@ -62,8 +62,13 @@ func (s *AWSCloud) ListRoutes(clusterName string) ([]*cloudprovider.Route, error continue } + instance, err := s.getInstanceById(instanceID) + if err != nil { + return nil, err + } + instanceName := orEmpty(instance.PrivateDNSName) routeName := clusterName + "-" + destinationCIDR - routes = append(routes, &cloudprovider.Route{routeName, instanceID, destinationCIDR}) + routes = append(routes, &cloudprovider.Route{routeName, instanceName, destinationCIDR}) } return routes, nil diff --git a/pkg/cloudprovider/gce/gce.go b/pkg/cloudprovider/gce/gce.go index 8b618496f9d..196b986d433 100644 --- a/pkg/cloudprovider/gce/gce.go +++ b/pkg/cloudprovider/gce/gce.go @@ -58,6 +58,7 @@ type GCECloud struct { projectID string zone string instanceID string + externalID string networkName string // Used for accessing the metadata server @@ -124,6 +125,14 @@ func getInstanceID() (string, error) { return parts[0], nil } +func getCurrentExternalID() (string, error) { + externalID, err := metadata.Get("instance/id") + if err != nil { + return "", fmt.Errorf("couldn't get external ID: %v", err) + } + return externalID, nil +} + func getNetworkName() (string, error) { result, err := metadata.Get("instance/network-interfaces/0/network") if err != nil { @@ -149,6 +158,10 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { if err != nil { return nil, err } + externalID, err := getCurrentExternalID() + if err != nil { + return nil, err + } networkName, err := getNetworkName() if err != nil { return nil, err @@ -185,6 +198,7 @@ func newGCECloud(config io.Reader) (*GCECloud, error) { projectID: projectID, zone: zone, instanceID: instanceID, + externalID: externalID, networkName: networkName, metadataAccess: getMetadata, }, nil @@ -276,13 +290,19 @@ func (gce *GCECloud) targetPoolURL(name, region string) string { func waitForOp(op *compute.Operation, getOperation func() (*compute.Operation, error)) error { pollOp := op + consecPollFails := 0 for pollOp.Status != "DONE" { var err error - // TODO: add some backoff here. - time.Sleep(time.Second) + time.Sleep(3 * time.Second) pollOp, err = getOperation() if err != nil { - return err + if consecPollFails == 2 { + // Only bail if we've seen 3 consecutive polling errors. + return err + } + consecPollFails++ + } else { + consecPollFails = 0 } } if pollOp.Error != nil && len(pollOp.Error.Errors) > 0 { @@ -628,8 +648,16 @@ func (gce *GCECloud) NodeAddresses(_ string) ([]api.NodeAddress, error) { }, nil } +func (gce *GCECloud) isCurrentInstance(instance string) bool { + return gce.instanceID == canonicalizeInstanceName(instance) +} + // ExternalID returns the cloud provider ID of the specified instance (deprecated). func (gce *GCECloud) ExternalID(instance string) (string, error) { + // if we are asking about the current instance, just go to metadata + if gce.isCurrentInstance(instance) { + return gce.externalID, nil + } inst, err := gce.getInstanceByName(instance) if err != nil { return "", err diff --git a/pkg/expapi/deep_copy.go b/pkg/expapi/deep_copy.go new file mode 100644 index 00000000000..2becaae4d02 --- /dev/null +++ b/pkg/expapi/deep_copy.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package expapi + +func addDeepCopyFuncs() {} diff --git a/pkg/expapi/latest/latest.go b/pkg/expapi/latest/latest.go new file mode 100644 index 00000000000..628b84c00a6 --- /dev/null +++ b/pkg/expapi/latest/latest.go @@ -0,0 +1,75 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package latest + +import ( + "fmt" + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/registered" + _ "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi" + "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/v1" + "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" +) + +var ( + Version string + Versions []string + + accessor = meta.NewAccessor() + Codec runtime.Codec + SelfLinker = runtime.SelfLinker(accessor) + RESTMapper meta.RESTMapper +) + +const importPrefix = "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi" + +func init() { + Version = registered.RegisteredVersions[0] + Codec = runtime.CodecFor(api.Scheme, Version) + // Put the registered versions in Versions in reverse order. + for i := len(registered.RegisteredVersions) - 1; i >= 0; i-- { + Versions = append(Versions, registered.RegisteredVersions[i]) + } + + // the list of kinds that are scoped at the root of the api hierarchy + // if a kind is not enumerated here, it is assumed to have a namespace scope + rootScoped := util.NewStringSet() + + ignoredKinds := util.NewStringSet() + + RESTMapper = api.NewDefaultRESTMapper(Versions, InterfacesFor, importPrefix, ignoredKinds, rootScoped) + api.RegisterRESTMapper(RESTMapper) +} + +// InterfacesFor returns the default Codec and ResourceVersioner for a given version +// string, or an error if the version is not known. +func InterfacesFor(version string) (*meta.VersionInterfaces, error) { + switch version { + case "v1": + return &meta.VersionInterfaces{ + Codec: v1.Codec, + ObjectConvertor: api.Scheme, + MetadataAccessor: accessor, + }, nil + default: + return nil, fmt.Errorf("unsupported storage version: %s (valid: %s)", version, strings.Join(Versions, ", ")) + } +} diff --git a/pkg/expapi/register.go b/pkg/expapi/register.go new file mode 100644 index 00000000000..4198ad56282 --- /dev/null +++ b/pkg/expapi/register.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package expapi + +func init() {} diff --git a/pkg/expapi/types.go b/pkg/expapi/types.go new file mode 100644 index 00000000000..f30be24d406 --- /dev/null +++ b/pkg/expapi/types.go @@ -0,0 +1,29 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +This file (together with pkg/expapi/v1/types.go) contain the experimental +types in kubernetes. These API objects are experimental, meaning that the +APIs may be broken at any time by the kubernetes team. + +DISCLAIMER: The implementation of the experimental API group itself is +a temporary one meant as a stopgap solution until kubernetes has proper +support for multiple API groups. The transition may require changes +beyond registration differences. In other words, experimental API group +support is experimental. +*/ + +package expapi diff --git a/pkg/expapi/v1/conversion.go b/pkg/expapi/v1/conversion.go new file mode 100644 index 00000000000..2f72ba4d382 --- /dev/null +++ b/pkg/expapi/v1/conversion.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +func addConversionFuncs() {} diff --git a/pkg/expapi/v1/deep_copy.go b/pkg/expapi/v1/deep_copy.go new file mode 100644 index 00000000000..af00429d520 --- /dev/null +++ b/pkg/expapi/v1/deep_copy.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +func addDeepCopyFuncs() {} diff --git a/pkg/expapi/v1/defaults.go b/pkg/expapi/v1/defaults.go new file mode 100644 index 00000000000..268770da898 --- /dev/null +++ b/pkg/expapi/v1/defaults.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +func addDefaultingFuncs() {} diff --git a/pkg/expapi/v1/register.go b/pkg/expapi/v1/register.go new file mode 100644 index 00000000000..c3ab661be26 --- /dev/null +++ b/pkg/expapi/v1/register.go @@ -0,0 +1,30 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" +) + +var Codec = runtime.CodecFor(api.Scheme, "v1") + +func init() { + addDeepCopyFuncs() + addConversionFuncs() + addDefaultingFuncs() +} diff --git a/pkg/expapi/v1/types.go b/pkg/expapi/v1/types.go new file mode 100644 index 00000000000..2a5c8ca4cba --- /dev/null +++ b/pkg/expapi/v1/types.go @@ -0,0 +1,17 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 diff --git a/pkg/kubectl/cmd/attach.go b/pkg/kubectl/cmd/attach.go new file mode 100644 index 00000000000..17e9e50290f --- /dev/null +++ b/pkg/kubectl/cmd/attach.go @@ -0,0 +1,211 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "fmt" + "io" + "os" + "os/signal" + "syscall" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/remotecommand" + cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" + "github.com/docker/docker/pkg/term" + "github.com/golang/glog" + "github.com/spf13/cobra" +) + +const ( + attach_example = `// get output from running pod 123456-7890, using the first container by default +$ kubectl attach 123456-7890 + +// get output from ruby-container from pod 123456-7890 +$ kubectl attach 123456-7890 -c ruby-container date + +// switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780 +// and sends stdout/stderr from 'bash' back to the client +$ kubectl attach 123456-7890 -c ruby-container -i -t` +) + +func NewCmdAttach(f *cmdutil.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command { + options := &AttachOptions{ + In: cmdIn, + Out: cmdOut, + Err: cmdErr, + + Attach: &DefaultRemoteAttach{}, + } + cmd := &cobra.Command{ + Use: "attach POD -c CONTAINER", + Short: "Attach to a running container.", + Long: "Attach to a a process that is already running inside an existing container.", + Example: attach_example, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(options.Complete(f, cmd, args)) + cmdutil.CheckErr(options.Validate()) + cmdutil.CheckErr(options.Run()) + }, + } + // TODO support UID + cmd.Flags().StringVarP(&options.ContainerName, "container", "c", "", "Container name") + cmd.Flags().BoolVarP(&options.Stdin, "stdin", "i", false, "Pass stdin to the container") + cmd.Flags().BoolVarP(&options.TTY, "tty", "t", false, "Stdin is a TTY") + return cmd +} + +// RemoteAttach defines the interface accepted by the Attach command - provided for test stubbing +type RemoteAttach interface { + Attach(req *client.Request, config *client.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error +} + +// DefaultRemoteAttach is the standard implementation of attaching +type DefaultRemoteAttach struct{} + +func (*DefaultRemoteAttach) Attach(req *client.Request, config *client.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { + attach := remotecommand.NewAttach(req, config, stdin, stdout, stderr, tty) + return attach.Execute() +} + +// AttachOptions declare the arguments accepted by the Exec command +type AttachOptions struct { + Namespace string + PodName string + ContainerName string + Stdin bool + TTY bool + + In io.Reader + Out io.Writer + Err io.Writer + + Attach RemoteAttach + Client *client.Client + Config *client.Config +} + +// Complete verifies command line arguments and loads data from the command environment +func (p *AttachOptions) Complete(f *cmdutil.Factory, cmd *cobra.Command, argsIn []string) error { + if len(argsIn) == 0 { + return cmdutil.UsageError(cmd, "POD is required for attach") + } + if len(argsIn) > 1 { + return cmdutil.UsageError(cmd, fmt.Sprintf("expected a single argument: POD, saw %d: %s", len(argsIn), argsIn)) + } + p.PodName = argsIn[0] + + namespace, _, err := f.DefaultNamespace() + if err != nil { + return err + } + p.Namespace = namespace + + config, err := f.ClientConfig() + if err != nil { + return err + } + p.Config = config + + client, err := f.Client() + if err != nil { + return err + } + p.Client = client + + return nil +} + +// Validate checks that the provided attach options are specified. +func (p *AttachOptions) Validate() error { + if len(p.PodName) == 0 { + return fmt.Errorf("pod name must be specified") + } + if p.Out == nil || p.Err == nil { + return fmt.Errorf("both output and error output must be provided") + } + if p.Attach == nil || p.Client == nil || p.Config == nil { + return fmt.Errorf("client, client config, and attach must be provided") + } + return nil +} + +// Run executes a validated remote execution against a pod. +func (p *AttachOptions) Run() error { + pod, err := p.Client.Pods(p.Namespace).Get(p.PodName) + if err != nil { + return err + } + + if pod.Status.Phase != api.PodRunning { + return fmt.Errorf("pod %s is not running and cannot be attached to; current phase is %s", p.PodName, pod.Status.Phase) + } + + containerName := p.ContainerName + if len(containerName) == 0 { + glog.V(4).Infof("defaulting container name to %s", pod.Spec.Containers[0].Name) + containerName = pod.Spec.Containers[0].Name + } + + // TODO: refactor with terminal helpers from the edit utility once that is merged + var stdin io.Reader + tty := p.TTY + if p.Stdin { + stdin = p.In + if tty { + if file, ok := stdin.(*os.File); ok { + inFd := file.Fd() + if term.IsTerminal(inFd) { + oldState, err := term.SetRawTerminal(inFd) + if err != nil { + glog.Fatal(err) + } + // this handles a clean exit, where the command finished + defer term.RestoreTerminal(inFd, oldState) + + // SIGINT is handled by term.SetRawTerminal (it runs a goroutine that listens + // for SIGINT and restores the terminal before exiting) + + // this handles SIGTERM + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, syscall.SIGTERM) + go func() { + <-sigChan + term.RestoreTerminal(inFd, oldState) + os.Exit(0) + }() + } else { + fmt.Fprintln(p.Err, "STDIN is not a terminal") + } + } else { + tty = false + fmt.Fprintln(p.Err, "Unable to use a TTY - input is not the right kind of file") + } + } + } + + // TODO: consider abstracting into a client invocation or client helper + req := p.Client.RESTClient.Post(). + Resource("pods"). + Name(pod.Name). + Namespace(pod.Namespace). + SubResource("attach"). + Param("container", containerName) + + return p.Attach.Attach(req, p.Config, stdin, p.Out, p.Err, tty) +} diff --git a/pkg/kubectl/cmd/attach_test.go b/pkg/kubectl/cmd/attach_test.go new file mode 100644 index 00000000000..ab2f610ca49 --- /dev/null +++ b/pkg/kubectl/cmd/attach_test.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "bytes" + "fmt" + "io" + "net/http" + "testing" + + "github.com/spf13/cobra" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client" +) + +type fakeRemoteAttach struct { + req *client.Request + attachErr error +} + +func (f *fakeRemoteAttach) Attach(req *client.Request, config *client.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { + f.req = req + return f.attachErr +} + +func TestPodAndContainerAttach(t *testing.T) { + tests := []struct { + args []string + p *AttachOptions + name string + expectError bool + expectedPod string + expectedContainer string + }{ + { + p: &AttachOptions{}, + expectError: true, + name: "empty", + }, + { + p: &AttachOptions{}, + args: []string{"foo", "bar"}, + expectError: true, + name: "too many args", + }, + { + p: &AttachOptions{}, + args: []string{"foo"}, + expectedPod: "foo", + name: "no container, no flags", + }, + { + p: &AttachOptions{ContainerName: "bar"}, + args: []string{"foo"}, + expectedPod: "foo", + expectedContainer: "bar", + name: "container in flag", + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &client.FakeRESTClient{ + Codec: codec, + Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { return nil, nil }), + } + tf.Namespace = "test" + tf.ClientConfig = &client.Config{} + + cmd := &cobra.Command{} + options := test.p + err := options.Complete(f, cmd, test.args) + if test.expectError && err == nil { + t.Errorf("unexpected non-error (%s)", test.name) + } + if !test.expectError && err != nil { + t.Errorf("unexpected error: %v (%s)", err, test.name) + } + if err != nil { + continue + } + if options.PodName != test.expectedPod { + t.Errorf("expected: %s, got: %s (%s)", test.expectedPod, options.PodName, test.name) + } + if options.ContainerName != test.expectedContainer { + t.Errorf("expected: %s, got: %s (%s)", test.expectedContainer, options.ContainerName, test.name) + } + } +} + +func TestAttach(t *testing.T) { + version := testapi.Version() + tests := []struct { + name, version, podPath, attachPath, container string + pod *api.Pod + attachErr bool + }{ + { + name: "pod attach", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + attachPath: "/api/" + version + "/namespaces/test/pods/foo/attach", + pod: attachPod(), + }, + { + name: "pod attach error", + version: version, + podPath: "/api/" + version + "/namespaces/test/pods/foo", + attachPath: "/api/" + version + "/namespaces/test/pods/foo/attach", + pod: attachPod(), + attachErr: true, + }, + } + for _, test := range tests { + f, tf, codec := NewAPIFactory() + tf.Client = &client.FakeRESTClient{ + Codec: codec, + Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) { + switch p, m := req.URL.Path, req.Method; { + case p == test.podPath && m == "GET": + body := objBody(codec, test.pod) + return &http.Response{StatusCode: 200, Body: body}, nil + default: + // Ensures no GET is performed when deleting by name + t.Errorf("%s: unexpected request: %s %#v\n%#v", test.name, req.Method, req.URL, req) + return nil, fmt.Errorf("unexpected request") + } + }), + } + tf.Namespace = "test" + tf.ClientConfig = &client.Config{Version: test.version} + bufOut := bytes.NewBuffer([]byte{}) + bufErr := bytes.NewBuffer([]byte{}) + bufIn := bytes.NewBuffer([]byte{}) + ex := &fakeRemoteAttach{} + if test.attachErr { + ex.attachErr = fmt.Errorf("attach error") + } + params := &AttachOptions{ + ContainerName: "bar", + In: bufIn, + Out: bufOut, + Err: bufErr, + Attach: ex, + } + cmd := &cobra.Command{} + if err := params.Complete(f, cmd, []string{"foo"}); err != nil { + t.Fatal(err) + } + err := params.Run() + if test.attachErr && err != ex.attachErr { + t.Errorf("%s: Unexpected exec error: %v", test.name, err) + continue + } + if !test.attachErr && err != nil { + t.Errorf("%s: Unexpected error: %v", test.name, err) + continue + } + if !test.attachErr && ex.req.URL().Path != test.attachPath { + t.Errorf("%s: Did not get expected path for exec request", test.name) + continue + } + } +} + +func attachPod() *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "test", ResourceVersion: "10"}, + Spec: api.PodSpec{ + RestartPolicy: api.RestartPolicyAlways, + DNSPolicy: api.DNSClusterFirst, + Containers: []api.Container{ + { + Name: "bar", + }, + }, + }, + Status: api.PodStatus{ + Phase: api.PodRunning, + }, + } +} diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index f08e96514c3..b5b27ceacc5 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -95,13 +95,14 @@ __custom_func() { * pods (aka 'po') * replicationcontrollers (aka 'rc') * services (aka 'svc') - * nodes (aka 'no') * events (aka 'ev') + * nodes (aka 'no') + * namespaces (aka 'ns') * secrets - * limits - * persistentVolumes (aka 'pv') - * persistentVolumeClaims (aka 'pvc') - * quota + * persistentvolumes (aka 'pv') + * persistentvolumeclaims (aka 'pvc') + * limitranges (aka 'limits') + * resourcequotas (aka 'quota') ` ) @@ -135,6 +136,7 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`, cmds.AddCommand(NewCmdRollingUpdate(f, out)) cmds.AddCommand(NewCmdScale(f, out)) + cmds.AddCommand(NewCmdAttach(f, in, out, err)) cmds.AddCommand(NewCmdExec(f, in, out, err)) cmds.AddCommand(NewCmdPortForward(f)) cmds.AddCommand(NewCmdProxy(f, out)) diff --git a/pkg/kubectl/cmd/cmd_test.go b/pkg/kubectl/cmd/cmd_test.go index 707badf27ca..a20a119e639 100644 --- a/pkg/kubectl/cmd/cmd_test.go +++ b/pkg/kubectl/cmd/cmd_test.go @@ -18,6 +18,7 @@ package cmd import ( "bytes" + "errors" "fmt" "io" "io/ioutil" @@ -64,6 +65,15 @@ func (*internalType) IsAnAPIObject() {} func (*externalType) IsAnAPIObject() {} func (*ExternalType2) IsAnAPIObject() {} +var versionErr = errors.New("not a version") + +func versionErrIfFalse(b bool) error { + if b { + return nil + } + return versionErr +} + func newExternalScheme() (*runtime.Scheme, meta.RESTMapper, runtime.Codec) { scheme := runtime.NewScheme() scheme.AddKnownTypeWithName("", "Type", &internalType{}) @@ -73,12 +83,12 @@ func newExternalScheme() (*runtime.Scheme, meta.RESTMapper, runtime.Codec) { codec := runtime.CodecFor(scheme, "unlikelyversion") validVersion := testapi.Version() - mapper := meta.NewDefaultRESTMapper([]string{"unlikelyversion", validVersion}, func(version string) (*meta.VersionInterfaces, bool) { + mapper := meta.NewDefaultRESTMapper([]string{"unlikelyversion", validVersion}, func(version string) (*meta.VersionInterfaces, error) { return &meta.VersionInterfaces{ Codec: runtime.CodecFor(scheme, version), ObjectConvertor: scheme, MetadataAccessor: meta.NewAccessor(), - }, (version == validVersion || version == "unlikelyversion") + }, versionErrIfFalse(version == validVersion || version == "unlikelyversion") }) for _, version := range []string{"unlikelyversion", validVersion} { for kind := range scheme.KnownTypes(version) { diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index b3719be9383..dd075996a83 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -43,9 +43,9 @@ will first check for an exact match on RESOURCE and NAME_PREFIX. If no such reso exists, it will output details for every resource that has a name prefixed with NAME_PREFIX Possible resources include (case insensitive): pods (po), services (svc), -replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), -limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota) or secrets.` +replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), +persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), +namespaces (ns) or secrets.` describe_example = `// Describe a node $ kubectl describe nodes kubernetes-minion-emt8.c.myproject.internal @@ -112,7 +112,7 @@ func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s infos, err := r.Infos() if err != nil { if apierrors.IsNotFound(err) && len(args) == 2 { - return DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out) + return DescribeMatchingResources(mapper, typer, describer, f, cmdNamespace, args[0], args[1], out, err) } return err } @@ -128,7 +128,7 @@ func RunDescribe(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []s return nil } -func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper, describer kubectl.Describer, f *cmdutil.Factory, namespace, rsrc, prefix string, out io.Writer) error { +func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper, describer kubectl.Describer, f *cmdutil.Factory, namespace, rsrc, prefix string, out io.Writer, originalError error) error { r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()). NamespaceParam(namespace).DefaultNamespace(). ResourceTypeOrNameArgs(true, rsrc). @@ -152,7 +152,7 @@ func DescribeMatchingResources(mapper meta.RESTMapper, typer runtime.ObjectTyper } } if !isFound { - return fmt.Errorf("%v %q not found", rsrc, prefix) + return originalError } return nil } diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go index c5a67acce25..5a03c967100 100644 --- a/pkg/kubectl/cmd/get.go +++ b/pkg/kubectl/cmd/get.go @@ -35,7 +35,7 @@ const ( Possible resources include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota) or secrets. +resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets. By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s).` @@ -100,7 +100,9 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string } if len(args) == 0 { - fmt.Fprint(out, "You must specify the type of resource to get. ", valid_resources) + fmt.Fprint(out, "You must specify the type of resource to get. ", valid_resources, ` * componentstatuses (aka 'cs') + * endpoints (aka 'ep') +`) return cmdutil.UsageError(cmd, "Required resource not specified.") } diff --git a/pkg/kubectl/cmd/scale.go b/pkg/kubectl/cmd/scale.go index 0628dee0862..14ccc01ba75 100644 --- a/pkg/kubectl/cmd/scale.go +++ b/pkg/kubectl/cmd/scale.go @@ -26,6 +26,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors" ) const ( @@ -39,7 +40,10 @@ scale is sent to the server.` $ kubectl scale --replicas=3 replicationcontrollers foo // If the replication controller named foo's current size is 2, scale foo to 3. -$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo` +$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo + +// Scale multiple replication controllers. +$ kubectl scale --replicas=5 rc/foo rc/bar` ) // NewCmdScale returns a cobra command with the appropriate configuration and flags to run scale @@ -61,6 +65,7 @@ func NewCmdScale(f *cmdutil.Factory, out io.Writer) *cobra.Command { cmd.Flags().String("resource-version", "", "Precondition for resource version. Requires that the current resource version match this value in order to scale.") cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the replication controller match this value in order to scale.") cmd.Flags().Int("replicas", -1, "The new desired number of replicas. Required.") + cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a scale operation, zero means don't wait.") cmd.MarkFlagRequired("replicas") cmdutil.AddOutputFlagsForMutation(cmd) return cmd @@ -102,10 +107,6 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri if err != nil { return err } - if len(infos) > 1 { - return fmt.Errorf("multiple resources provided: %v", args) - } - info := infos[0] scaler, err := f.Scaler(mapping) if err != nil { @@ -113,13 +114,28 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri } resourceVersion := cmdutil.GetFlagString(cmd, "resource-version") + if len(resourceVersion) != 0 && len(infos) > 1 { + return fmt.Errorf("cannot use --resource-version with multiple controllers") + } currentSize := cmdutil.GetFlagInt(cmd, "current-replicas") + if currentSize != -1 && len(infos) > 1 { + return fmt.Errorf("cannot use --current-replicas with multiple controllers") + } precondition := &kubectl.ScalePrecondition{currentSize, resourceVersion} retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) - waitForReplicas := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout) - if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil { - return err + var waitForReplicas *kubectl.RetryParams + if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 { + waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout) } - cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled") - return nil + + errs := []error{} + for _, info := range infos { + if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil { + errs = append(errs, err) + continue + } + cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled") + } + + return errors.NewAggregate(errs) } diff --git a/pkg/kubectl/resource/visitor.go b/pkg/kubectl/resource/visitor.go index ca79b2ded5d..3107ed16f75 100644 --- a/pkg/kubectl/resource/visitor.go +++ b/pkg/kubectl/resource/visitor.go @@ -217,7 +217,7 @@ type URLVisitor struct { func (v *URLVisitor) Visit(fn VisitorFunc) error { res, err := http.Get(v.URL.String()) if err != nil { - return fmt.Errorf("unable to access URL %q: %v\n", v.URL, err) + return err } defer res.Body.Close() if res.StatusCode != 200 { @@ -415,7 +415,7 @@ func (v *FileVisitor) Visit(fn VisitorFunc) error { } else { var err error if f, err = os.Open(v.Path); err != nil { - return fmt.Errorf("unable to open %q: %v", v.Path, err) + return err } } defer f.Close() @@ -464,12 +464,12 @@ func (v *StreamVisitor) Visit(fn VisitorFunc) error { continue } if err := ValidateSchema(ext.RawJSON, v.Schema); err != nil { - return err + return fmt.Errorf("error validating %q: %v", v.Source, err) } info, err := v.InfoForData(ext.RawJSON, v.Source) if err != nil { if v.IgnoreErrors { - fmt.Fprintf(os.Stderr, "error: could not read an encoded object from %s: %v\n", v.Source, err) + fmt.Fprintf(os.Stderr, "error: could not read an encoded object: %v\n", err) glog.V(4).Infof("Unreadable: %s", string(ext.RawJSON)) continue } diff --git a/pkg/kubelet/cadvisor/cadvisor_linux.go b/pkg/kubelet/cadvisor/cadvisor_linux.go index 5f253e92b71..1c1535fed8f 100644 --- a/pkg/kubelet/cadvisor/cadvisor_linux.go +++ b/pkg/kubelet/cadvisor/cadvisor_linux.go @@ -44,6 +44,8 @@ var _ Interface = new(cadvisorClient) // TODO(vmarmol): Make configurable. // The amount of time for which to keep stats in memory. const statsCacheDuration = 2 * time.Minute +const maxHousekeepingInterval = 15 * time.Second +const allowDynamicHousekeeping = true // Creates a cAdvisor and exports its API on the specified port if port > 0. func New(port uint) (Interface, error) { @@ -53,7 +55,7 @@ func New(port uint) (Interface, error) { } // Create and start the cAdvisor container manager. - m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs) + m, err := manager.New(memory.New(statsCacheDuration, nil), sysFs, maxHousekeepingInterval, allowDynamicHousekeeping) if err != nil { return nil, err } diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index c7a062f74c7..609345537cb 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -1012,6 +1012,7 @@ func (dm *DockerManager) AttachContainer(containerId string, stdin io.Reader, st InputStream: stdin, OutputStream: stdout, ErrorStream: stderr, + Stream: true, Logs: true, Stdin: stdin != nil, Stdout: stdout != nil, diff --git a/pkg/master/master.go b/pkg/master/master.go index d7cf999a78f..a66e7dd5adf 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -36,6 +36,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/admission" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/rest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" @@ -43,6 +44,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authorizer" "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/handlers" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" + explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/healthz" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" @@ -69,6 +71,8 @@ import ( etcdallocator "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator/etcd" ipallocator "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator" serviceaccountetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/serviceaccount/etcd" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/ui" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -87,22 +91,25 @@ const ( // Config is a structure used to configure a Master. type Config struct { - DatabaseStorage tools.StorageInterface - EventTTL time.Duration - MinionRegexp string - KubeletClient client.KubeletClient + DatabaseStorage storage.Interface + ExpDatabaseStorage storage.Interface + EventTTL time.Duration + MinionRegexp string + KubeletClient client.KubeletClient // allow downstream consumers to disable the core controller loops EnableCoreControllers bool EnableLogsSupport bool EnableUISupport bool // allow downstream consumers to disable swagger EnableSwaggerSupport bool - // allow v1 to be conditionally disabled + // allow api versions to be conditionally disabled DisableV1 bool + EnableExp bool // allow downstream consumers to disable the index route EnableIndex bool EnableProfiling bool APIPrefix string + ExpAPIPrefix string CorsAllowedOriginList util.StringList Authenticator authenticator.Request // TODO(roberthbailey): Remove once the server no longer supports http basic auth. @@ -179,12 +186,14 @@ type Master struct { enableSwaggerSupport bool enableProfiling bool apiPrefix string + expAPIPrefix string corsAllowedOriginList util.StringList authenticator authenticator.Request authorizer authorizer.Authorizer admissionControl admission.Interface masterCount int v1 bool + exp bool requestContextMapper api.RequestContextMapper // External host is the name that should be used in external (public internet) URLs for this master @@ -223,17 +232,14 @@ type Master struct { clock util.Clock } -// NewEtcdStorage returns a StorageInterface for the provided arguments or an error if the version +// NewEtcdStorage returns a storage.Interface for the provided arguments or an error if the version // is incorrect. -func NewEtcdStorage(client tools.EtcdClient, version string, prefix string) (etcdStorage tools.StorageInterface, err error) { - if version == "" { - version = latest.Version - } - versionInterfaces, err := latest.InterfacesFor(version) +func NewEtcdStorage(client tools.EtcdClient, interfacesFunc meta.VersionInterfacesFunc, version, prefix string) (etcdStorage storage.Interface, err error) { + versionInterfaces, err := interfacesFunc(version) if err != nil { return etcdStorage, err } - return tools.NewEtcdStorage(client, versionInterfaces.Codec, prefix), nil + return etcdstorage.NewEtcdStorage(client, versionInterfaces.Codec, prefix), nil } // setDefaults fills in any fields not set that are required to have valid data. @@ -335,11 +341,13 @@ func New(c *Config) *Master { enableSwaggerSupport: c.EnableSwaggerSupport, enableProfiling: c.EnableProfiling, apiPrefix: c.APIPrefix, + expAPIPrefix: c.ExpAPIPrefix, corsAllowedOriginList: c.CorsAllowedOriginList, authenticator: c.Authenticator, authorizer: c.Authorizer, admissionControl: c.AdmissionControl, v1: !c.DisableV1, + exp: c.EnableExp, requestContextMapper: c.RequestContextMapper, cacheTimeout: c.CacheTimeout, @@ -470,6 +478,7 @@ func (m *Master) init(c *Config) { // TODO: Factor out the core API registration m.storage = map[string]rest.Storage{ "pods": podStorage.Pod, + "pods/attach": podStorage.Attach, "pods/status": podStorage.Status, "pods/log": podStorage.Log, "pods/exec": podStorage.Exec, @@ -564,6 +573,16 @@ func (m *Master) init(c *Config) { requestInfoResolver := &apiserver.APIRequestInfoResolver{util.NewStringSet(strings.TrimPrefix(defaultVersion.Root, "/")), defaultVersion.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, requestInfoResolver, apiVersions) + if m.exp { + expVersion := m.expapi(c) + if err := expVersion.InstallREST(m.handlerContainer); err != nil { + glog.Fatalf("Unable to setup experimental api: %v", err) + } + apiserver.AddApiWebService(m.handlerContainer, c.ExpAPIPrefix, []string{expVersion.Version}) + expRequestInfoResolver := &apiserver.APIRequestInfoResolver{util.NewStringSet(strings.TrimPrefix(expVersion.Root, "/")), expVersion.Mapper} + apiserver.InstallServiceErrorHandler(m.handlerContainer, expRequestInfoResolver, []string{expVersion.Version}) + } + // Register root handler. // We do not register this using restful Webservice since we do not want to surface this in api docs. // Allow master to be embedded in contexts which already have something registered at the root @@ -721,7 +740,7 @@ func (m *Master) getServersToValidate(c *Config) map[string]apiserver.Server { addr = etcdUrl.Host port = 4001 } - serversToValidate[fmt.Sprintf("etcd-%d", ix)] = apiserver.Server{Addr: addr, Port: port, Path: "/health", Validate: tools.EtcdHealthCheck} + serversToValidate[fmt.Sprintf("etcd-%d", ix)] = apiserver.Server{Addr: addr, Port: port, Path: "/health", Validate: etcdstorage.EtcdHealthCheck} } return serversToValidate } @@ -758,6 +777,30 @@ func (m *Master) api_v1() *apiserver.APIGroupVersion { return version } +// expapi returns the resources and codec for the experimental api +func (m *Master) expapi(c *Config) *apiserver.APIGroupVersion { + storage := map[string]rest.Storage{} + return &apiserver.APIGroupVersion{ + Root: m.expAPIPrefix, + + Creater: api.Scheme, + Convertor: api.Scheme, + Typer: api.Scheme, + + Mapper: explatest.RESTMapper, + Codec: explatest.Codec, + Linker: explatest.SelfLinker, + Storage: storage, + Version: explatest.Version, + + Admit: m.admissionControl, + Context: m.requestContextMapper, + + ProxyDialerFn: m.dialer, + MinRequestTimeout: m.minRequestTimeout, + } +} + // findExternalAddress returns ExternalIP of provided node with fallback to LegacyHostIP. func findExternalAddress(node *api.Node) (string, error) { var fallback string diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index 16ddfcce4fa..0f4ef374870 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -21,7 +21,9 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/registrytest" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" ) @@ -31,7 +33,8 @@ func TestGetServersToValidate(t *testing.T) { config := Config{} fakeClient := tools.NewFakeEtcdClient(t) fakeClient.Machines = []string{"http://machine1:4001", "http://machine2", "http://machine3:4003"} - config.DatabaseStorage = tools.NewEtcdStorage(fakeClient, latest.Codec, etcdtest.PathPrefix()) + config.DatabaseStorage = etcdstorage.NewEtcdStorage(fakeClient, latest.Codec, etcdtest.PathPrefix()) + config.ExpDatabaseStorage = etcdstorage.NewEtcdStorage(fakeClient, explatest.Codec, etcdtest.PathPrefix()) master.nodeRegistry = registrytest.NewMinionRegistry([]string{"node1", "node2"}, api.NodeResources{}) diff --git a/pkg/namespace/namespace_controller_test.go b/pkg/namespace/namespace_controller_test.go index 0dd480d23a1..0be0cbf4e17 100644 --- a/pkg/namespace/namespace_controller_test.go +++ b/pkg/namespace/namespace_controller_test.go @@ -21,7 +21,6 @@ import ( "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) @@ -138,7 +137,7 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { func TestRunStop(t *testing.T) { o := testclient.NewObjects(api.Scheme, api.Scheme) - client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)} nsMgr := NewNamespaceManager(client, 1*time.Second) if nsMgr.StopEverything != nil { diff --git a/pkg/registry/controller/etcd/etcd.go b/pkg/registry/controller/etcd/etcd.go index 6fa75e4b11a..75a08e1f02c 100644 --- a/pkg/registry/controller/etcd/etcd.go +++ b/pkg/registry/controller/etcd/etcd.go @@ -24,7 +24,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // rest implements a RESTStorage for replication controllers against etcd @@ -37,7 +37,7 @@ type REST struct { var controllerPrefix = "/controllers" // NewREST returns a RESTStorage object that will work against replication controllers. -func NewREST(s tools.StorageInterface) *REST { +func NewREST(s storage.Interface) *REST { store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.ReplicationController{} }, diff --git a/pkg/registry/controller/etcd/etcd_test.go b/pkg/registry/controller/etcd/etcd_test.go index b2c309a2f1e..3477892a86d 100644 --- a/pkg/registry/controller/etcd/etcd_test.go +++ b/pkg/registry/controller/etcd/etcd_test.go @@ -30,6 +30,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/coreos/go-etcd/etcd" @@ -40,10 +42,10 @@ const ( FAIL ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } @@ -624,10 +626,10 @@ func TestEtcdWatchControllersFields(t *testing.T) { }, } testEtcdActions := []string{ - tools.EtcdCreate, - tools.EtcdSet, - tools.EtcdCAS, - tools.EtcdDelete} + etcdstorage.EtcdCreate, + etcdstorage.EtcdSet, + etcdstorage.EtcdCAS, + etcdstorage.EtcdDelete} controller := &api.ReplicationController{ ObjectMeta: api.ObjectMeta{ @@ -653,7 +655,7 @@ func TestEtcdWatchControllersFields(t *testing.T) { node := &etcd.Node{ Value: string(controllerBytes), } - if action == tools.EtcdDelete { + if action == etcdstorage.EtcdDelete { prevNode = node } fakeClient.WaitForWatchCompletion() diff --git a/pkg/registry/endpoint/etcd/etcd.go b/pkg/registry/endpoint/etcd/etcd.go index 7ca422d742e..0068487a334 100644 --- a/pkg/registry/endpoint/etcd/etcd.go +++ b/pkg/registry/endpoint/etcd/etcd.go @@ -24,7 +24,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // rest implements a RESTStorage for endpoints against etcd @@ -33,7 +33,7 @@ type REST struct { } // NewStorage returns a RESTStorage object that will work against endpoints. -func NewStorage(s tools.StorageInterface) *REST { +func NewStorage(s storage.Interface) *REST { prefix := "/services/endpoints" return &REST{ &etcdgeneric.Etcd{ diff --git a/pkg/registry/endpoint/etcd/etcd_test.go b/pkg/registry/endpoint/etcd/etcd_test.go index 45adbe7c3b9..9ad3a9cd621 100644 --- a/pkg/registry/endpoint/etcd/etcd_test.go +++ b/pkg/registry/endpoint/etcd/etcd_test.go @@ -25,6 +25,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -32,10 +34,10 @@ import ( "github.com/coreos/go-etcd/etcd" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/etcd/etcd.go b/pkg/registry/etcd/etcd.go index 16dab56f1e3..b9acedff0cb 100644 --- a/pkg/registry/etcd/etcd.go +++ b/pkg/registry/etcd/etcd.go @@ -26,7 +26,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/endpoint" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" ) @@ -44,17 +44,17 @@ const ( // Registry implements BindingRegistry, ControllerRegistry, EndpointRegistry, // MinionRegistry, PodRegistry and ServiceRegistry, backed by etcd. type Registry struct { - tools.StorageInterface + storage.Interface pods pod.Registry endpoints endpoint.Registry } // NewRegistry creates an etcd registry. -func NewRegistry(storage tools.StorageInterface, pods pod.Registry, endpoints endpoint.Registry) *Registry { +func NewRegistry(storage storage.Interface, pods pod.Registry, endpoints endpoint.Registry) *Registry { registry := &Registry{ - StorageInterface: storage, - pods: pods, - endpoints: endpoints, + Interface: storage, + pods: pods, + endpoints: endpoints, } return registry } @@ -158,7 +158,7 @@ func (r *Registry) UpdateService(ctx api.Context, svc *api.Service) (*api.Servic // WatchServices begins watching for new, changed, or deleted service configurations. func (r *Registry) WatchServices(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - version, err := tools.ParseWatchResourceVersion(resourceVersion, "service") + version, err := storage.ParseWatchResourceVersion(resourceVersion, "service") if err != nil { return nil, err } @@ -171,10 +171,10 @@ func (r *Registry) WatchServices(ctx api.Context, label labels.Selector, field f return nil, err } // TODO: use generic.SelectionPredicate - return r.Watch(key, version, tools.Everything) + return r.Watch(key, version, storage.Everything) } if field.Empty() { - return r.WatchList(makeServiceListKey(ctx), version, tools.Everything) + return r.WatchList(makeServiceListKey(ctx), version, storage.Everything) } return nil, fmt.Errorf("only the 'name' and default (everything) field selectors are supported") } diff --git a/pkg/registry/etcd/etcd_test.go b/pkg/registry/etcd/etcd_test.go index 8771273ac07..57ad7bc1016 100644 --- a/pkg/registry/etcd/etcd_test.go +++ b/pkg/registry/etcd/etcd_test.go @@ -31,6 +31,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod" podetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" @@ -38,13 +39,13 @@ import ( ) func NewTestEtcdRegistry(client tools.EtcdClient) *Registry { - storage := tools.NewEtcdStorage(client, latest.Codec, etcdtest.PathPrefix()) + storage := etcdstorage.NewEtcdStorage(client, latest.Codec, etcdtest.PathPrefix()) registry := NewRegistry(storage, nil, nil) return registry } func NewTestEtcdRegistryWithPods(client tools.EtcdClient) *Registry { - etcdStorage := tools.NewEtcdStorage(client, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(client, latest.Codec, etcdtest.PathPrefix()) podStorage := podetcd.NewStorage(etcdStorage, nil) endpointStorage := endpointetcd.NewStorage(etcdStorage) registry := NewRegistry(etcdStorage, pod.NewRegistry(podStorage.Pod), endpoint.NewRegistry(endpointStorage)) diff --git a/pkg/registry/event/registry.go b/pkg/registry/event/registry.go index 3856dbd9c6c..c3025fd2bab 100644 --- a/pkg/registry/event/registry.go +++ b/pkg/registry/event/registry.go @@ -21,7 +21,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // registry implements custom changes to generic.Etcd. @@ -31,7 +31,7 @@ type registry struct { // NewEtcdRegistry returns a registry which will store Events in the given // EtcdStorage. ttl is the time that Events will be retained by the system. -func NewEtcdRegistry(s tools.StorageInterface, ttl uint64) generic.Registry { +func NewEtcdRegistry(s storage.Interface, ttl uint64) generic.Registry { prefix := "/events" return registry{ Etcd: &etcdgeneric.Etcd{ diff --git a/pkg/registry/event/registry_test.go b/pkg/registry/event/registry_test.go index 7ea78814ff9..c54fbad9e20 100644 --- a/pkg/registry/event/registry_test.go +++ b/pkg/registry/event/registry_test.go @@ -26,6 +26,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -39,7 +40,7 @@ func NewTestEventEtcdRegistry(t *testing.T) (*tools.FakeEtcdClient, generic.Regi f := tools.NewFakeEtcdClient(t) f.TestIndex = true - s := tools.NewEtcdStorage(f, testapi.Codec(), etcdtest.PathPrefix()) + s := etcdstorage.NewEtcdStorage(f, testapi.Codec(), etcdtest.PathPrefix()) return f, NewEtcdRegistry(s, testTTL) } diff --git a/pkg/registry/generic/etcd/etcd.go b/pkg/registry/generic/etcd/etcd.go index 21db4d79ce2..2429c340b5b 100644 --- a/pkg/registry/generic/etcd/etcd.go +++ b/pkg/registry/generic/etcd/etcd.go @@ -29,7 +29,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" @@ -103,7 +103,7 @@ type Etcd struct { ReturnDeletedObject bool // Used for all etcd access functions - Storage tools.StorageInterface + Storage storage.Interface } // NamespaceKeyRootFunc is the default function for constructing etcd paths to resource directories enforcing namespace rules. @@ -282,7 +282,7 @@ func (e *Etcd) Update(ctx api.Context, obj runtime.Object) (runtime.Object, bool // TODO: expose TTL creating := false out := e.NewFunc() - err = e.Storage.GuaranteedUpdate(key, out, true, func(existing runtime.Object, res tools.ResponseMeta) (runtime.Object, *uint64, error) { + err = e.Storage.GuaranteedUpdate(key, out, true, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { version, err := e.Storage.Versioner().ObjectResourceVersion(existing) if err != nil { return nil, nil, err @@ -455,7 +455,7 @@ func (e *Etcd) Watch(ctx api.Context, label labels.Selector, field fields.Select // WatchPredicate starts a watch for the items that m matches. func (e *Etcd) WatchPredicate(ctx api.Context, m generic.Matcher, resourceVersion string) (watch.Interface, error) { - version, err := tools.ParseWatchResourceVersion(resourceVersion, e.EndpointName) + version, err := storage.ParseWatchResourceVersion(resourceVersion, e.EndpointName) if err != nil { return nil, err } diff --git a/pkg/registry/generic/etcd/etcd_test.go b/pkg/registry/generic/etcd/etcd_test.go index 85e08d39a1a..7b0b9603269 100644 --- a/pkg/registry/generic/etcd/etcd_test.go +++ b/pkg/registry/generic/etcd/etcd_test.go @@ -26,6 +26,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -69,7 +70,7 @@ func hasCreated(t *testing.T, pod *api.Pod) func(runtime.Object) bool { func NewTestGenericEtcdRegistry(t *testing.T) (*tools.FakeEtcdClient, *Etcd) { f := tools.NewFakeEtcdClient(t) f.TestIndex = true - s := tools.NewEtcdStorage(f, testapi.Codec(), etcdtest.PathPrefix()) + s := etcdstorage.NewEtcdStorage(f, testapi.Codec(), etcdtest.PathPrefix()) strategy := &testRESTStrategy{api.Scheme, api.SimpleNameGenerator, true, false, true} podPrefix := "/pods" return f, &Etcd{ diff --git a/pkg/registry/limitrange/registry.go b/pkg/registry/limitrange/registry.go index 45fd524d2eb..b6efa9e6fef 100644 --- a/pkg/registry/limitrange/registry.go +++ b/pkg/registry/limitrange/registry.go @@ -21,7 +21,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // registry implements custom changes to generic.Etcd. @@ -30,7 +30,7 @@ type registry struct { } // NewEtcdRegistry returns a registry which will store LimitRange in the given storage -func NewEtcdRegistry(s tools.StorageInterface) generic.Registry { +func NewEtcdRegistry(s storage.Interface) generic.Registry { prefix := "/limitranges" return registry{ Etcd: &etcdgeneric.Etcd{ diff --git a/pkg/registry/limitrange/registry_test.go b/pkg/registry/limitrange/registry_test.go index 900da79d09a..ea5d6020a6a 100644 --- a/pkg/registry/limitrange/registry_test.go +++ b/pkg/registry/limitrange/registry_test.go @@ -27,6 +27,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic" etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -37,7 +38,7 @@ import ( func NewTestLimitRangeEtcdRegistry(t *testing.T) (*tools.FakeEtcdClient, generic.Registry) { f := tools.NewFakeEtcdClient(t) f.TestIndex = true - s := tools.NewEtcdStorage(f, testapi.Codec(), etcdtest.PathPrefix()) + s := etcdstorage.NewEtcdStorage(f, testapi.Codec(), etcdtest.PathPrefix()) return f, NewEtcdRegistry(s) } diff --git a/pkg/registry/minion/etcd/etcd.go b/pkg/registry/minion/etcd/etcd.go index 9c8bd7c73e3..48388ed6ed5 100644 --- a/pkg/registry/minion/etcd/etcd.go +++ b/pkg/registry/minion/etcd/etcd.go @@ -26,7 +26,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) type REST struct { @@ -49,7 +49,7 @@ func (r *StatusREST) Update(ctx api.Context, obj runtime.Object) (runtime.Object } // NewStorage returns a RESTStorage object that will work against nodes. -func NewStorage(s tools.StorageInterface, connection client.ConnectionInfoGetter) (*REST, *StatusREST) { +func NewStorage(s storage.Interface, connection client.ConnectionInfoGetter) (*REST, *StatusREST) { prefix := "/minions" store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.Node{} }, diff --git a/pkg/registry/minion/etcd/etcd_test.go b/pkg/registry/minion/etcd/etcd_test.go index 6fe9ed47239..884c4fa0271 100644 --- a/pkg/registry/minion/etcd/etcd_test.go +++ b/pkg/registry/minion/etcd/etcd_test.go @@ -29,6 +29,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" @@ -47,10 +49,10 @@ func (fakeConnectionInfoGetter) GetConnectionInfo(host string) (string, uint, ht return "http", 12345, nil, nil } -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/namespace/etcd/etcd.go b/pkg/registry/namespace/etcd/etcd.go index aad15116686..e5c77f6ca2c 100644 --- a/pkg/registry/namespace/etcd/etcd.go +++ b/pkg/registry/namespace/etcd/etcd.go @@ -28,7 +28,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/namespace" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) @@ -49,7 +49,7 @@ type FinalizeREST struct { } // NewStorage returns a RESTStorage object that will work against namespaces -func NewStorage(s tools.StorageInterface) (*REST, *StatusREST, *FinalizeREST) { +func NewStorage(s storage.Interface) (*REST, *StatusREST, *FinalizeREST) { prefix := "/namespaces" store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.Namespace{} }, diff --git a/pkg/registry/namespace/etcd/etcd_test.go b/pkg/registry/namespace/etcd/etcd_test.go index c1cbbd9fe67..864210e1924 100644 --- a/pkg/registry/namespace/etcd/etcd_test.go +++ b/pkg/registry/namespace/etcd/etcd_test.go @@ -26,6 +26,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/namespace" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -33,14 +35,14 @@ import ( "github.com/coreos/go-etcd/etcd" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } -func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient, tools.StorageInterface) { +func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient, s := newEtcdStorage(t) storage, _, _ := NewStorage(s) return storage, fakeEtcdClient, s diff --git a/pkg/registry/persistentvolume/etcd/etcd.go b/pkg/registry/persistentvolume/etcd/etcd.go index dcb4c305641..acb3362280e 100644 --- a/pkg/registry/persistentvolume/etcd/etcd.go +++ b/pkg/registry/persistentvolume/etcd/etcd.go @@ -26,7 +26,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/persistentvolume" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // rest implements a RESTStorage for persistentvolumes against etcd @@ -35,7 +35,7 @@ type REST struct { } // NewREST returns a RESTStorage object that will work against PersistentVolume objects. -func NewStorage(s tools.StorageInterface) (*REST, *StatusREST) { +func NewStorage(s storage.Interface) (*REST, *StatusREST) { prefix := "/persistentvolumes" store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.PersistentVolume{} }, diff --git a/pkg/registry/persistentvolume/etcd/etcd_test.go b/pkg/registry/persistentvolume/etcd/etcd_test.go index b1ea67d7913..bfd71f0e1da 100644 --- a/pkg/registry/persistentvolume/etcd/etcd_test.go +++ b/pkg/registry/persistentvolume/etcd/etcd_test.go @@ -27,6 +27,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/registrytest" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -38,10 +40,10 @@ type testRegistry struct { *registrytest.GenericRegistry } -func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient, tools.StorageInterface) { +func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) storage, statusStorage := NewStorage(etcdStorage) return storage, statusStorage, fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/persistentvolumeclaim/etcd/etcd.go b/pkg/registry/persistentvolumeclaim/etcd/etcd.go index 43c43b0e546..384601976e9 100644 --- a/pkg/registry/persistentvolumeclaim/etcd/etcd.go +++ b/pkg/registry/persistentvolumeclaim/etcd/etcd.go @@ -24,7 +24,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/persistentvolumeclaim" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // rest implements a RESTStorage for persistentvolumeclaims against etcd @@ -33,7 +33,7 @@ type REST struct { } // NewREST returns a RESTStorage object that will work against PersistentVolumeClaim objects. -func NewStorage(s tools.StorageInterface) (*REST, *StatusREST) { +func NewStorage(s storage.Interface) (*REST, *StatusREST) { prefix := "/persistentvolumeclaims" store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.PersistentVolumeClaim{} }, diff --git a/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go b/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go index 3325abc448c..6f298d5a739 100644 --- a/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go +++ b/pkg/registry/persistentvolumeclaim/etcd/etcd_test.go @@ -27,6 +27,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/registrytest" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -38,10 +40,10 @@ type testRegistry struct { *registrytest.GenericRegistry } -func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient, tools.StorageInterface) { +func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) storage, statusStorage := NewStorage(etcdStorage) return storage, statusStorage, fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/pod/etcd/etcd.go b/pkg/registry/pod/etcd/etcd.go index bb7b4e2f716..1c8391bda26 100644 --- a/pkg/registry/pod/etcd/etcd.go +++ b/pkg/registry/pod/etcd/etcd.go @@ -35,7 +35,7 @@ import ( genericrest "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/rest" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors" ) @@ -47,6 +47,7 @@ type PodStorage struct { Log *LogREST Proxy *ProxyREST Exec *ExecREST + Attach *AttachREST PortForward *PortForwardREST } @@ -56,7 +57,7 @@ type REST struct { } // NewStorage returns a RESTStorage object that will work against pods. -func NewStorage(s tools.StorageInterface, k client.ConnectionInfoGetter) PodStorage { +func NewStorage(s storage.Interface, k client.ConnectionInfoGetter) PodStorage { prefix := "/pods" store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.Pod{} }, @@ -96,6 +97,7 @@ func NewStorage(s tools.StorageInterface, k client.ConnectionInfoGetter) PodStor Log: &LogREST{store: store, kubeletConn: k}, Proxy: &ProxyREST{store: store}, Exec: &ExecREST{store: store, kubeletConn: k}, + Attach: &AttachREST{store: store, kubeletConn: k}, PortForward: &PortForwardREST{store: store, kubeletConn: k}, } } @@ -143,7 +145,7 @@ func (r *BindingREST) setPodHostAndAnnotations(ctx api.Context, podID, oldMachin if err != nil { return nil, err } - err = r.store.Storage.GuaranteedUpdate(podKey, &api.Pod{}, false, tools.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { + err = r.store.Storage.GuaranteedUpdate(podKey, &api.Pod{}, false, storage.SimpleUpdate(func(obj runtime.Object) (runtime.Object, error) { pod, ok := obj.(*api.Pod) if !ok { return nil, fmt.Errorf("unexpected object: %#v", obj) @@ -284,6 +286,43 @@ func (r *ProxyREST) Connect(ctx api.Context, id string, opts runtime.Object) (re // Support both GET and POST methods. Over time, we want to move all clients to start using POST and then stop supporting GET. var upgradeableMethods = []string{"GET", "POST"} +// AttachREST implements the attach subresource for a Pod +type AttachREST struct { + store *etcdgeneric.Etcd + kubeletConn client.ConnectionInfoGetter +} + +// Implement Connecter +var _ = rest.Connecter(&AttachREST{}) + +// New creates a new Pod object +func (r *AttachREST) New() runtime.Object { + return &api.Pod{} +} + +// Connect returns a handler for the pod exec proxy +func (r *AttachREST) Connect(ctx api.Context, name string, opts runtime.Object) (rest.ConnectHandler, error) { + attachOpts, ok := opts.(*api.PodAttachOptions) + if !ok { + return nil, fmt.Errorf("Invalid options object: %#v", opts) + } + location, transport, err := pod.AttachLocation(r.store, r.kubeletConn, ctx, name, attachOpts) + if err != nil { + return nil, err + } + return genericrest.NewUpgradeAwareProxyHandler(location, transport, true), nil +} + +// NewConnectOptions returns the versioned object that represents exec parameters +func (r *AttachREST) NewConnectOptions() (runtime.Object, bool, string) { + return &api.PodAttachOptions{}, false, "" +} + +// ConnectMethods returns the methods supported by exec +func (r *AttachREST) ConnectMethods() []string { + return upgradeableMethods +} + // ExecREST implements the exec subresource for a Pod type ExecREST struct { store *etcdgeneric.Etcd diff --git a/pkg/registry/pod/etcd/etcd_test.go b/pkg/registry/pod/etcd/etcd_test.go index e89ab1513f7..e8999e63a68 100644 --- a/pkg/registry/pod/etcd/etcd_test.go +++ b/pkg/registry/pod/etcd/etcd_test.go @@ -33,6 +33,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/securitycontext" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -40,14 +42,14 @@ import ( "github.com/coreos/go-etcd/etcd" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } -func newStorage(t *testing.T) (*REST, *BindingREST, *StatusREST, *tools.FakeEtcdClient, tools.StorageInterface) { +func newStorage(t *testing.T) (*REST, *BindingREST, *StatusREST, *tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient, etcdStorage := newEtcdStorage(t) storage := NewStorage(etcdStorage, nil) return storage.Pod, storage.Binding, storage.Status, fakeEtcdClient, etcdStorage diff --git a/pkg/registry/pod/rest.go b/pkg/registry/pod/rest.go index d35e8302eda..e5bcd5807db 100644 --- a/pkg/registry/pod/rest.go +++ b/pkg/registry/pod/rest.go @@ -188,7 +188,6 @@ func ResourceLocation(getter ResourceGetter, ctx api.Context, id string) (*url.U // LogLocation returns a the log URL for a pod container. If opts.Container is blank // and only one container is present in the pod, that container is used. func LogLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ctx api.Context, name string, opts *api.PodLogOptions) (*url.URL, http.RoundTripper, error) { - pod, err := getPod(getter, ctx, name) if err != nil { return nil, nil, err @@ -228,17 +227,62 @@ func LogLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ct return loc, nodeTransport, nil } +func streamParams(params url.Values, opts runtime.Object) error { + switch opts := opts.(type) { + case *api.PodExecOptions: + if opts.Stdin { + params.Add(api.ExecStdinParam, "1") + } + if opts.Stdout { + params.Add(api.ExecStdoutParam, "1") + } + if opts.Stderr { + params.Add(api.ExecStderrParam, "1") + } + if opts.TTY { + params.Add(api.ExecTTYParam, "1") + } + for _, c := range opts.Command { + params.Add("command", c) + } + case *api.PodAttachOptions: + if opts.Stdin { + params.Add(api.ExecStdinParam, "1") + } + if opts.Stdout { + params.Add(api.ExecStdoutParam, "1") + } + if opts.Stderr { + params.Add(api.ExecStderrParam, "1") + } + if opts.TTY { + params.Add(api.ExecTTYParam, "1") + } + default: + return fmt.Errorf("Unknown object for streaming: %v", opts) + } + return nil +} + +// AttachLocation returns the attach URL for a pod container. If opts.Container is blank +// and only one container is present in the pod, that container is used. +func AttachLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ctx api.Context, name string, opts *api.PodAttachOptions) (*url.URL, http.RoundTripper, error) { + return streamLocation(getter, connInfo, ctx, name, opts, opts.Container, "attach") +} + // ExecLocation returns the exec URL for a pod container. If opts.Container is blank // and only one container is present in the pod, that container is used. func ExecLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ctx api.Context, name string, opts *api.PodExecOptions) (*url.URL, http.RoundTripper, error) { + return streamLocation(getter, connInfo, ctx, name, opts, opts.Container, "exec") +} +func streamLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ctx api.Context, name string, opts runtime.Object, container, path string) (*url.URL, http.RoundTripper, error) { pod, err := getPod(getter, ctx, name) if err != nil { return nil, nil, err } // Try to figure out a container - container := opts.Container if container == "" { if len(pod.Spec.Containers) == 1 { container = pod.Spec.Containers[0].Name @@ -256,25 +300,13 @@ func ExecLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, c return nil, nil, err } params := url.Values{} - if opts.Stdin { - params.Add(api.ExecStdinParam, "1") - } - if opts.Stdout { - params.Add(api.ExecStdoutParam, "1") - } - if opts.Stderr { - params.Add(api.ExecStderrParam, "1") - } - if opts.TTY { - params.Add(api.ExecTTYParam, "1") - } - for _, c := range opts.Command { - params.Add("command", c) + if err := streamParams(params, opts); err != nil { + return nil, nil, err } loc := &url.URL{ Scheme: nodeScheme, Host: fmt.Sprintf("%s:%d", nodeHost, nodePort), - Path: fmt.Sprintf("/exec/%s/%s/%s", pod.Namespace, name, container), + Path: fmt.Sprintf("/%s/%s/%s/%s", path, pod.Namespace, name, container), RawQuery: params.Encode(), } return loc, nodeTransport, nil @@ -282,7 +314,6 @@ func ExecLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, c // PortForwardLocation returns a the port-forward URL for a pod. func PortForwardLocation(getter ResourceGetter, connInfo client.ConnectionInfoGetter, ctx api.Context, name string) (*url.URL, http.RoundTripper, error) { - pod, err := getPod(getter, ctx, name) if err != nil { return nil, nil, err diff --git a/pkg/registry/podtemplate/etcd/etcd.go b/pkg/registry/podtemplate/etcd/etcd.go index 4140c0702a1..96d49695b9a 100644 --- a/pkg/registry/podtemplate/etcd/etcd.go +++ b/pkg/registry/podtemplate/etcd/etcd.go @@ -24,7 +24,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/podtemplate" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // rest implements a RESTStorage for pod templates against etcd @@ -33,7 +33,7 @@ type REST struct { } // NewREST returns a RESTStorage object that will work against pod templates. -func NewREST(s tools.StorageInterface) *REST { +func NewREST(s storage.Interface) *REST { prefix := "/podtemplates" store := etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.PodTemplate{} }, diff --git a/pkg/registry/podtemplate/etcd/etcd_test.go b/pkg/registry/podtemplate/etcd/etcd_test.go index 0ac4adf5f22..f6277c9ae82 100644 --- a/pkg/registry/podtemplate/etcd/etcd_test.go +++ b/pkg/registry/podtemplate/etcd/etcd_test.go @@ -22,14 +22,16 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/rest/resttest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/resourcequota/etcd/etcd.go b/pkg/registry/resourcequota/etcd/etcd.go index 9b5b1c9eba5..11225fcf0a1 100644 --- a/pkg/registry/resourcequota/etcd/etcd.go +++ b/pkg/registry/resourcequota/etcd/etcd.go @@ -24,7 +24,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/resourcequota" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // rest implements a RESTStorage for resourcequotas against etcd @@ -33,7 +33,7 @@ type REST struct { } // NewStorage returns a RESTStorage object that will work against ResourceQuota objects. -func NewStorage(s tools.StorageInterface) (*REST, *StatusREST) { +func NewStorage(s storage.Interface) (*REST, *StatusREST) { prefix := "/resourcequotas" store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.ResourceQuota{} }, diff --git a/pkg/registry/resourcequota/etcd/etcd_test.go b/pkg/registry/resourcequota/etcd/etcd_test.go index c7b7d5f16e3..1b195d50a6b 100644 --- a/pkg/registry/resourcequota/etcd/etcd_test.go +++ b/pkg/registry/resourcequota/etcd/etcd_test.go @@ -31,6 +31,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/resourcequota" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" @@ -38,14 +40,14 @@ import ( "github.com/coreos/go-etcd/etcd" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, latest.Codec, etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } -func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient, tools.StorageInterface) { +func newStorage(t *testing.T) (*REST, *StatusREST, *tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient, h := newEtcdStorage(t) storage, statusStorage := NewStorage(h) return storage, statusStorage, fakeEtcdClient, h diff --git a/pkg/registry/secret/etcd/etcd.go b/pkg/registry/secret/etcd/etcd.go index c9d6b0d7ca6..6d678c351e1 100644 --- a/pkg/registry/secret/etcd/etcd.go +++ b/pkg/registry/secret/etcd/etcd.go @@ -24,7 +24,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // REST implements a RESTStorage for secrets against etcd @@ -33,7 +33,7 @@ type REST struct { } // NewStorage returns a registry which will store Secret in the given etcdStorage -func NewStorage(s tools.StorageInterface) *REST { +func NewStorage(s storage.Interface) *REST { prefix := "/secrets" store := &etcdgeneric.Etcd{ diff --git a/pkg/registry/secret/etcd/etcd_test.go b/pkg/registry/secret/etcd/etcd_test.go index 83047bdc95b..6a32e5dee67 100644 --- a/pkg/registry/secret/etcd/etcd_test.go +++ b/pkg/registry/secret/etcd/etcd_test.go @@ -22,14 +22,16 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/rest/resttest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/service/allocator/etcd/etcd.go b/pkg/registry/service/allocator/etcd/etcd.go index 924391a83ed..57d45721b6b 100644 --- a/pkg/registry/service/allocator/etcd/etcd.go +++ b/pkg/registry/service/allocator/etcd/etcd.go @@ -27,7 +27,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" ) var ( @@ -42,7 +43,7 @@ type Etcd struct { lock sync.Mutex alloc allocator.Snapshottable - storage tools.StorageInterface + storage storage.Interface last string baseKey string @@ -55,7 +56,7 @@ var _ service.RangeRegistry = &Etcd{} // NewEtcd returns an allocator that is backed by Etcd and can manage // persisting the snapshot state of allocation after each allocation is made. -func NewEtcd(alloc allocator.Snapshottable, baseKey string, kind string, storage tools.StorageInterface) *Etcd { +func NewEtcd(alloc allocator.Snapshottable, baseKey string, kind string, storage storage.Interface) *Etcd { return &Etcd{ alloc: alloc, storage: storage, @@ -141,7 +142,7 @@ func (e *Etcd) Release(item int) error { // tryUpdate performs a read-update to persist the latest snapshot state of allocation. func (e *Etcd) tryUpdate(fn func() error) error { err := e.storage.GuaranteedUpdate(e.baseKey, &api.RangeAllocation{}, true, - tools.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { + storage.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { existing := input.(*api.RangeAllocation) if len(existing.ResourceVersion) == 0 { return nil, fmt.Errorf("cannot allocate resources of type %s at this time", e.kind) @@ -171,7 +172,7 @@ func (e *Etcd) Refresh() (*api.RangeAllocation, error) { existing := &api.RangeAllocation{} if err := e.storage.Get(e.baseKey, existing, false); err != nil { - if tools.IsEtcdNotFound(err) { + if etcdstorage.IsEtcdNotFound(err) { return nil, nil } return nil, etcderr.InterpretGetError(err, e.kind, "") @@ -198,7 +199,7 @@ func (e *Etcd) CreateOrUpdate(snapshot *api.RangeAllocation) error { last := "" err := e.storage.GuaranteedUpdate(e.baseKey, &api.RangeAllocation{}, true, - tools.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { + storage.SimpleUpdate(func(input runtime.Object) (output runtime.Object, err error) { existing := input.(*api.RangeAllocation) switch { case len(snapshot.ResourceVersion) != 0 && len(existing.ResourceVersion) != 0: diff --git a/pkg/registry/service/allocator/etcd/etcd_test.go b/pkg/registry/service/allocator/etcd/etcd_test.go index a3d3c48b19b..a3ff425db5e 100644 --- a/pkg/registry/service/allocator/etcd/etcd_test.go +++ b/pkg/registry/service/allocator/etcd/etcd_test.go @@ -26,14 +26,16 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/service/ipallocator/etcd/etcd_test.go b/pkg/registry/service/ipallocator/etcd/etcd_test.go index 9c7620ecf5d..e85d9119b86 100644 --- a/pkg/registry/service/ipallocator/etcd/etcd_test.go +++ b/pkg/registry/service/ipallocator/etcd/etcd_test.go @@ -29,14 +29,16 @@ import ( allocator_etcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/allocator/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/ipallocator" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } diff --git a/pkg/registry/serviceaccount/etcd/etcd.go b/pkg/registry/serviceaccount/etcd/etcd.go index c160c3be8fa..98e3bed8567 100644 --- a/pkg/registry/serviceaccount/etcd/etcd.go +++ b/pkg/registry/serviceaccount/etcd/etcd.go @@ -24,7 +24,7 @@ import ( etcdgeneric "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/serviceaccount" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // REST implements a RESTStorage for service accounts against etcd @@ -35,7 +35,7 @@ type REST struct { const Prefix = "/serviceaccounts" // NewStorage returns a RESTStorage object that will work against service accounts objects. -func NewStorage(s tools.StorageInterface) *REST { +func NewStorage(s storage.Interface) *REST { store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.ServiceAccount{} }, NewListFunc: func() runtime.Object { return &api.ServiceAccountList{} }, diff --git a/pkg/registry/serviceaccount/etcd/etcd_test.go b/pkg/registry/serviceaccount/etcd/etcd_test.go index 3eef9c627d2..9f74985bd6f 100644 --- a/pkg/registry/serviceaccount/etcd/etcd_test.go +++ b/pkg/registry/serviceaccount/etcd/etcd_test.go @@ -22,14 +22,16 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/rest/resttest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + etcdstorage "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" ) -func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, tools.StorageInterface) { +func newEtcdStorage(t *testing.T) (*tools.FakeEtcdClient, storage.Interface) { fakeEtcdClient := tools.NewFakeEtcdClient(t) fakeEtcdClient.TestIndex = true - etcdStorage := tools.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcdstorage.NewEtcdStorage(fakeEtcdClient, testapi.Codec(), etcdtest.PathPrefix()) return fakeEtcdClient, etcdStorage } diff --git a/pkg/serviceaccount/tokengetter.go b/pkg/serviceaccount/tokengetter.go index 869cc92211e..458f9ab6c43 100644 --- a/pkg/serviceaccount/tokengetter.go +++ b/pkg/serviceaccount/tokengetter.go @@ -23,7 +23,7 @@ import ( secretetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/secret/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/serviceaccount" serviceaccountetcd "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/serviceaccount/etcd" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" ) // ServiceAccountTokenGetter defines functions to retrieve a named service account and secret @@ -73,7 +73,7 @@ func (r *registryGetter) GetSecret(namespace, name string) (*api.Secret, error) // NewGetterFromStorageInterface returns a ServiceAccountTokenGetter that // uses the specified storage to retrieve service accounts and secrets. -func NewGetterFromStorageInterface(storage tools.StorageInterface) ServiceAccountTokenGetter { +func NewGetterFromStorageInterface(storage storage.Interface) ServiceAccountTokenGetter { return NewGetterFromRegistries( serviceaccount.NewRegistry(serviceaccountetcd.NewStorage(storage)), secret.NewRegistry(secretetcd.NewStorage(storage)), diff --git a/pkg/storage/doc.go b/pkg/storage/doc.go new file mode 100644 index 00000000000..dca0d5b7096 --- /dev/null +++ b/pkg/storage/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Interfaces for database-related operations. +package storage diff --git a/pkg/tools/etcd_object.go b/pkg/storage/etcd/api_object_versioner.go similarity index 88% rename from pkg/tools/etcd_object.go rename to pkg/storage/etcd/api_object_versioner.go index eda9532be62..41a605f33ed 100644 --- a/pkg/tools/etcd_object.go +++ b/pkg/storage/etcd/api_object_versioner.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tools +package etcd import ( "strconv" @@ -22,6 +22,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) @@ -29,7 +30,7 @@ import ( // for objects that have an embedded ObjectMeta or ListMeta field. type APIObjectVersioner struct{} -// UpdateObject implements StorageVersioner +// UpdateObject implements Versioner func (a APIObjectVersioner) UpdateObject(obj runtime.Object, expiration *time.Time, resourceVersion uint64) error { objectMeta, err := api.ObjectMetaFor(obj) if err != nil { @@ -46,7 +47,7 @@ func (a APIObjectVersioner) UpdateObject(obj runtime.Object, expiration *time.Ti return nil } -// UpdateList implements StorageVersioner +// UpdateList implements Versioner func (a APIObjectVersioner) UpdateList(obj runtime.Object, resourceVersion uint64) error { listMeta, err := api.ListMetaFor(obj) if err != nil || listMeta == nil { @@ -60,7 +61,7 @@ func (a APIObjectVersioner) UpdateList(obj runtime.Object, resourceVersion uint6 return nil } -// ObjectResourceVersion implements StorageVersioner +// ObjectResourceVersion implements Versioner func (a APIObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { meta, err := api.ObjectMetaFor(obj) if err != nil { @@ -73,5 +74,5 @@ func (a APIObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, e return strconv.ParseUint(version, 10, 64) } -// APIObjectVersioner implements StorageVersioner -var _ StorageVersioner = APIObjectVersioner{} +// APIObjectVersioner implements Versioner +var _ storage.Versioner = APIObjectVersioner{} diff --git a/pkg/tools/etcd_object_test.go b/pkg/storage/etcd/api_object_versioner_test.go similarity index 99% rename from pkg/tools/etcd_object_test.go rename to pkg/storage/etcd/api_object_versioner_test.go index 6675fe35013..3e3129ce1ab 100644 --- a/pkg/tools/etcd_object_test.go +++ b/pkg/storage/etcd/api_object_versioner_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tools +package etcd import ( "testing" diff --git a/pkg/storage/etcd/doc.go b/pkg/storage/etcd/doc.go new file mode 100644 index 00000000000..44a2b9d4450 --- /dev/null +++ b/pkg/storage/etcd/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd diff --git a/pkg/tools/etcd_helper.go b/pkg/storage/etcd/etcd_helper.go similarity index 92% rename from pkg/tools/etcd_helper.go rename to pkg/storage/etcd/etcd_helper.go index 730faf3a617..53a9e302e77 100644 --- a/pkg/tools/etcd_helper.go +++ b/pkg/storage/etcd/etcd_helper.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tools +package etcd import ( "errors" @@ -27,6 +27,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/metrics" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" @@ -35,7 +37,7 @@ import ( "github.com/golang/glog" ) -func NewEtcdStorage(client EtcdClient, codec runtime.Codec, prefix string) StorageInterface { +func NewEtcdStorage(client tools.EtcdClient, codec runtime.Codec, prefix string) storage.Interface { return &etcdHelper{ client: client, codec: codec, @@ -46,13 +48,13 @@ func NewEtcdStorage(client EtcdClient, codec runtime.Codec, prefix string) Stora } } -// etcdHelper is the reference implementation of StorageInterface. +// etcdHelper is the reference implementation of storage.Interface. type etcdHelper struct { - client EtcdClient + client tools.EtcdClient codec runtime.Codec copier runtime.ObjectCopier // optional, has to be set to perform any atomic operations - versioner StorageVersioner + versioner storage.Versioner // prefix for all etcd keys pathPrefix string @@ -70,17 +72,17 @@ func init() { metrics.Register() } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) Backends() []string { return h.client.GetCluster() } -// Implements StorageInterface. -func (h *etcdHelper) Versioner() StorageVersioner { +// Implements storage.Interface. +func (h *etcdHelper) Versioner() storage.Versioner { return h.versioner } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) Create(key string, obj, out runtime.Object, ttl uint64) error { key = h.prefixEtcdKey(key) data, err := h.codec.Encode(obj) @@ -108,7 +110,7 @@ func (h *etcdHelper) Create(key string, obj, out runtime.Object, ttl uint64) err return err } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) Set(key string, obj, out runtime.Object, ttl uint64) error { var response *etcd.Response data, err := h.codec.Encode(obj) @@ -149,7 +151,7 @@ func (h *etcdHelper) Set(key string, obj, out runtime.Object, ttl uint64) error return err } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) Delete(key string, out runtime.Object) error { key = h.prefixEtcdKey(key) if _, err := conversion.EnforcePtr(out); err != nil { @@ -168,7 +170,7 @@ func (h *etcdHelper) Delete(key string, out runtime.Object) error { return err } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) RecursiveDelete(key string, recursive bool) error { key = h.prefixEtcdKey(key) startTime := time.Now() @@ -177,23 +179,23 @@ func (h *etcdHelper) RecursiveDelete(key string, recursive bool) error { return err } -// Implements StorageInterface. -func (h *etcdHelper) Watch(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) { +// Implements storage.Interface. +func (h *etcdHelper) Watch(key string, resourceVersion uint64, filter storage.FilterFunc) (watch.Interface, error) { key = h.prefixEtcdKey(key) w := newEtcdWatcher(false, nil, filter, h.codec, h.versioner, nil, h) go w.etcdWatch(h.client, key, resourceVersion) return w, nil } -// Implements StorageInterface. -func (h *etcdHelper) WatchList(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) { +// Implements storage.Interface. +func (h *etcdHelper) WatchList(key string, resourceVersion uint64, filter storage.FilterFunc) (watch.Interface, error) { key = h.prefixEtcdKey(key) w := newEtcdWatcher(true, exceptKey(key), filter, h.codec, h.versioner, nil, h) go w.etcdWatch(h.client, key, resourceVersion) return w, nil } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) Get(key string, objPtr runtime.Object, ignoreNotFound bool) error { key = h.prefixEtcdKey(key) _, _, _, err := h.bodyAndExtractObj(key, objPtr, ignoreNotFound) @@ -244,7 +246,7 @@ func (h *etcdHelper) extractObj(response *etcd.Response, inErr error, objPtr run return body, node, err } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) GetToList(key string, listObj runtime.Object) error { trace := util.NewTrace("GetToList " + getTypeName(listObj)) listPtr, err := runtime.GetItemsPtr(listObj) @@ -318,7 +320,7 @@ func (h *etcdHelper) decodeNodeList(nodes []*etcd.Node, slicePtr interface{}) er return nil } -// Implements StorageInterface. +// Implements storage.Interface. func (h *etcdHelper) List(key string, listObj runtime.Object) error { trace := util.NewTrace("List " + getTypeName(listObj)) defer trace.LogIfLong(time.Second) @@ -364,18 +366,8 @@ func (h *etcdHelper) listEtcdNode(key string) ([]*etcd.Node, uint64, error) { return result.Node.Nodes, result.EtcdIndex, nil } -type SimpleEtcdUpdateFunc func(runtime.Object) (runtime.Object, error) - -// SimpleUpdateFunc converts SimpleEtcdUpdateFunc into EtcdUpdateFunc -func SimpleUpdate(fn SimpleEtcdUpdateFunc) StorageUpdateFunc { - return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { - out, err := fn(input) - return out, nil, err - } -} - -// Implements StorageInterface. -func (h *etcdHelper) GuaranteedUpdate(key string, ptrToType runtime.Object, ignoreNotFound bool, tryUpdate StorageUpdateFunc) error { +// Implements storage.Interface. +func (h *etcdHelper) GuaranteedUpdate(key string, ptrToType runtime.Object, ignoreNotFound bool, tryUpdate storage.UpdateFunc) error { v, err := conversion.EnforcePtr(ptrToType) if err != nil { // Panic is appropriate, because this is a programming error. @@ -388,7 +380,7 @@ func (h *etcdHelper) GuaranteedUpdate(key string, ptrToType runtime.Object, igno if err != nil { return err } - meta := ResponseMeta{} + meta := storage.ResponseMeta{} if node != nil { meta.TTL = node.TTL if node.Expiration != nil { diff --git a/pkg/tools/etcd_helper_test.go b/pkg/storage/etcd/etcd_helper_test.go similarity index 91% rename from pkg/tools/etcd_helper_test.go rename to pkg/storage/etcd/etcd_helper_test.go index 32bcf3d2f92..4fa597fb0e5 100644 --- a/pkg/tools/etcd_helper_test.go +++ b/pkg/storage/etcd/etcd_helper_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tools +package etcd import ( "errors" @@ -34,6 +34,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/coreos/go-etcd/etcd" "github.com/stretchr/testify/assert" @@ -65,7 +67,7 @@ func init() { ) } -func newEtcdHelper(client EtcdClient, codec runtime.Codec, prefix string) etcdHelper { +func newEtcdHelper(client tools.EtcdClient, codec runtime.Codec, prefix string) etcdHelper { return *NewEtcdStorage(client, codec, prefix).(*etcdHelper) } @@ -75,7 +77,7 @@ func TestIsEtcdNotFound(t *testing.T) { t.Errorf("Expected %#v to return %v, but it did not", err, isNotFound) } } - try(EtcdErrorNotFound, true) + try(tools.EtcdErrorNotFound, true) try(&etcd.EtcdError{ErrorCode: 101}, false) try(nil, false) try(fmt.Errorf("some other kind of error"), false) @@ -90,10 +92,10 @@ func getEncodedPod(name string) string { } func TestList(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") - fakeClient.Data[key] = EtcdResponseWithError{ + fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ EtcdIndex: 10, Node: &etcd.Node{ @@ -160,10 +162,10 @@ func TestList(t *testing.T) { // TestListAcrossDirectories ensures that the client excludes directories and flattens tree-response - simulates cross-namespace query func TestListAcrossDirectories(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") - fakeClient.Data[key] = EtcdResponseWithError{ + fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ EtcdIndex: 10, Node: &etcd.Node{ @@ -243,10 +245,10 @@ func TestListAcrossDirectories(t *testing.T) { } func TestListExcludesDirectories(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") - fakeClient.Data[key] = EtcdResponseWithError{ + fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ EtcdIndex: 10, Node: &etcd.Node{ @@ -314,7 +316,7 @@ func TestListExcludesDirectories(t *testing.T) { } func TestGet(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") expect := api.Pod{ @@ -336,10 +338,10 @@ func TestGet(t *testing.T) { } func TestGetNotFoundErr(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) key1 := etcdtest.AddPrefix("/some/key") - fakeClient.Data[key1] = EtcdResponseWithError{ + fakeClient.Data[key1] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, @@ -348,13 +350,13 @@ func TestGetNotFoundErr(t *testing.T) { }, } key2 := etcdtest.AddPrefix("/some/key2") - fakeClient.Data[key2] = EtcdResponseWithError{ + fakeClient.Data[key2] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, } key3 := etcdtest.AddPrefix("/some/key3") - fakeClient.Data[key3] = EtcdResponseWithError{ + fakeClient.Data[key3] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: "", @@ -380,7 +382,7 @@ func TestGetNotFoundErr(t *testing.T) { func TestCreate(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) returnedObj := &api.Pod{} err := helper.Create("/some/key", obj, returnedObj, 5) @@ -406,7 +408,7 @@ func TestCreate(t *testing.T) { func TestCreateNilOutParam(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) err := helper.Create("/some/key", obj, nil, 5) if err != nil { @@ -416,7 +418,7 @@ func TestCreateNilOutParam(t *testing.T) { func TestSet(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) returnedObj := &api.Pod{} err := helper.Set("/some/key", obj, returnedObj, 5) @@ -443,7 +445,7 @@ func TestSet(t *testing.T) { func TestSetFailCAS(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}} - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) fakeClient.CasErr = fakeClient.NewError(123) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) err := helper.Set("/some/key", obj, nil, 5) @@ -454,11 +456,11 @@ func TestSetFailCAS(t *testing.T) { func TestSetWithVersion(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}} - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") - fakeClient.Data[key] = EtcdResponseWithError{ + fakeClient.Data[key] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(testapi.Codec(), obj), @@ -491,7 +493,7 @@ func TestSetWithVersion(t *testing.T) { func TestSetWithoutResourceVersioner(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) helper.versioner = nil returnedObj := &api.Pod{} @@ -519,7 +521,7 @@ func TestSetWithoutResourceVersioner(t *testing.T) { func TestSetNilOutParam(t *testing.T) { obj := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) helper := newEtcdHelper(fakeClient, testapi.Codec(), etcdtest.PathPrefix()) helper.versioner = nil err := helper.Set("/some/key", obj, nil, 3) @@ -529,7 +531,7 @@ func TestSetNilOutParam(t *testing.T) { } func TestGuaranteedUpdate(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true helper := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") @@ -537,7 +539,7 @@ func TestGuaranteedUpdate(t *testing.T) { // Create a new node. fakeClient.ExpectNotFoundGet(key) obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { return obj, nil })) if err != nil { @@ -556,7 +558,7 @@ func TestGuaranteedUpdate(t *testing.T) { // Update an existing node. callbackCalled := false objUpdate := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 2} - err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { callbackCalled = true if in.(*TestResource).Value != 1 { @@ -584,7 +586,7 @@ func TestGuaranteedUpdate(t *testing.T) { } func TestGuaranteedUpdateTTL(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true helper := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") @@ -592,7 +594,7 @@ func TestGuaranteedUpdateTTL(t *testing.T) { // Create a new node. fakeClient.ExpectNotFoundGet(key) obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { if res.TTL != 0 { t.Fatalf("unexpected response meta: %#v", res) } @@ -618,7 +620,7 @@ func TestGuaranteedUpdateTTL(t *testing.T) { // Update an existing node. callbackCalled := false objUpdate := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 2} - err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { if res.TTL != 10 { t.Fatalf("unexpected response meta: %#v", res) } @@ -650,7 +652,7 @@ func TestGuaranteedUpdateTTL(t *testing.T) { // Update an existing node and change ttl callbackCalled = false objUpdate = &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 3} - err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, func(in runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) { if res.TTL != 10 { t.Fatalf("unexpected response meta: %#v", res) } @@ -685,7 +687,7 @@ func TestGuaranteedUpdateTTL(t *testing.T) { } func TestGuaranteedUpdateNoChange(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true helper := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") @@ -693,7 +695,7 @@ func TestGuaranteedUpdateNoChange(t *testing.T) { // Create a new node. fakeClient.ExpectNotFoundGet(key) obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { return obj, nil })) if err != nil { @@ -703,7 +705,7 @@ func TestGuaranteedUpdateNoChange(t *testing.T) { // Update an existing node with the same data callbackCalled := false objUpdate := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + err = helper.GuaranteedUpdate("/some/key", &TestResource{}, true, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { fakeClient.Err = errors.New("should not be called") callbackCalled = true return objUpdate, nil @@ -717,7 +719,7 @@ func TestGuaranteedUpdateNoChange(t *testing.T) { } func TestGuaranteedUpdateKeyNotFound(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true helper := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") @@ -726,7 +728,7 @@ func TestGuaranteedUpdateKeyNotFound(t *testing.T) { fakeClient.ExpectNotFoundGet(key) obj := &TestResource{ObjectMeta: api.ObjectMeta{Name: "foo"}, Value: 1} - f := SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + f := storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { return obj, nil }) @@ -744,7 +746,7 @@ func TestGuaranteedUpdateKeyNotFound(t *testing.T) { } func TestGuaranteedUpdate_CreateCollision(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) fakeClient.TestIndex = true helper := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) key := etcdtest.AddPrefix("/some/key") @@ -763,7 +765,7 @@ func TestGuaranteedUpdate_CreateCollision(t *testing.T) { defer wgDone.Done() firstCall := true - err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { + err := helper.GuaranteedUpdate("/some/key", &TestResource{}, true, storage.SimpleUpdate(func(in runtime.Object) (runtime.Object, error) { defer func() { firstCall = false }() if firstCall { @@ -842,7 +844,7 @@ func TestGetEtcdVersion_NotListening(t *testing.T) { } func TestPrefixEtcdKey(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) prefix := path.Join("/", etcdtest.PathPrefix()) helper := newEtcdHelper(fakeClient, testapi.Codec(), prefix) diff --git a/pkg/tools/etcd_util.go b/pkg/storage/etcd/etcd_util.go similarity index 83% rename from pkg/tools/etcd_util.go rename to pkg/storage/etcd/etcd_util.go index ebcab56ded9..c5f0231d05a 100644 --- a/pkg/tools/etcd_util.go +++ b/pkg/storage/etcd/etcd_util.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tools +package etcd import ( "encoding/json" @@ -23,41 +23,42 @@ import ( "net/http" "os/exec" - "github.com/coreos/go-etcd/etcd" + "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + goetcd "github.com/coreos/go-etcd/etcd" "github.com/golang/glog" ) // IsEtcdNotFound returns true iff err is an etcd not found error. func IsEtcdNotFound(err error) bool { - return isEtcdErrorNum(err, EtcdErrorCodeNotFound) + return isEtcdErrorNum(err, tools.EtcdErrorCodeNotFound) } // IsEtcdNodeExist returns true iff err is an etcd node aleady exist error. func IsEtcdNodeExist(err error) bool { - return isEtcdErrorNum(err, EtcdErrorCodeNodeExist) + return isEtcdErrorNum(err, tools.EtcdErrorCodeNodeExist) } // IsEtcdTestFailed returns true iff err is an etcd write conflict. func IsEtcdTestFailed(err error) bool { - return isEtcdErrorNum(err, EtcdErrorCodeTestFailed) + return isEtcdErrorNum(err, tools.EtcdErrorCodeTestFailed) } // IsEtcdWatchStoppedByUser returns true iff err is a client triggered stop. func IsEtcdWatchStoppedByUser(err error) bool { - return etcd.ErrWatchStoppedByUser == err + return goetcd.ErrWatchStoppedByUser == err } // isEtcdErrorNum returns true iff err is an etcd error, whose errorCode matches errorCode func isEtcdErrorNum(err error, errorCode int) bool { - etcdError, ok := err.(*etcd.EtcdError) + etcdError, ok := err.(*goetcd.EtcdError) return ok && etcdError != nil && etcdError.ErrorCode == errorCode } // etcdErrorIndex returns the index associated with the error message and whether the // index was available. func etcdErrorIndex(err error) (uint64, bool) { - if etcdError, ok := err.(*etcd.EtcdError); ok { + if etcdError, ok := err.(*goetcd.EtcdError); ok { return etcdError.Index, true } return 0, false @@ -90,7 +91,7 @@ func startEtcd() (*exec.Cmd, error) { return cmd, nil } -func NewEtcdClientStartServerIfNecessary(server string) (EtcdClient, error) { +func NewEtcdClientStartServerIfNecessary(server string) (tools.EtcdClient, error) { _, err := GetEtcdVersion(server) if err != nil { glog.Infof("Failed to find etcd, attempting to start.") @@ -101,7 +102,7 @@ func NewEtcdClientStartServerIfNecessary(server string) (EtcdClient, error) { } servers := []string{server} - return etcd.NewClient(servers), nil + return goetcd.NewClient(servers), nil } type etcdHealth struct { diff --git a/pkg/tools/etcd_watcher.go b/pkg/storage/etcd/etcd_watcher.go similarity index 86% rename from pkg/tools/etcd_watcher.go rename to pkg/storage/etcd/etcd_watcher.go index ab1ac597b81..403709712bb 100644 --- a/pkg/tools/etcd_watcher.go +++ b/pkg/storage/etcd/etcd_watcher.go @@ -14,18 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tools +package etcd import ( - "strconv" "sync" "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" - "github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" "github.com/coreos/go-etcd/etcd" @@ -41,31 +40,6 @@ const ( EtcdDelete = "delete" ) -// FilterFunc is a predicate which takes an API object and returns true -// iff the object should remain in the set. -type FilterFunc func(obj runtime.Object) bool - -// Everything is a FilterFunc which accepts all objects. -func Everything(runtime.Object) bool { - return true -} - -// ParseWatchResourceVersion takes a resource version argument and converts it to -// the etcd version we should pass to helper.Watch(). Because resourceVersion is -// an opaque value, the default watch behavior for non-zero watch is to watch -// the next value (if you pass "1", you will see updates from "2" onwards). -func ParseWatchResourceVersion(resourceVersion, kind string) (uint64, error) { - if resourceVersion == "" || resourceVersion == "0" { - return 0, nil - } - version, err := strconv.ParseUint(resourceVersion, 10, 64) - if err != nil { - // TODO: Does this need to be a ValidationErrorList? I can't convince myself it does. - return 0, errors.NewInvalid(kind, "", fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("resourceVersion", resourceVersion, err.Error())}) - } - return version + 1, nil -} - // TransformFunc attempts to convert an object to another object for use with a watcher. type TransformFunc func(runtime.Object) (runtime.Object, error) @@ -82,12 +56,12 @@ func exceptKey(except string) includeFunc { // etcdWatcher converts a native etcd watch to a watch.Interface. type etcdWatcher struct { encoding runtime.Codec - versioner StorageVersioner + versioner storage.Versioner transform TransformFunc list bool // If we're doing a recursive watch, should be true. include includeFunc - filter FilterFunc + filter storage.FilterFunc etcdIncoming chan *etcd.Response etcdError chan error @@ -110,7 +84,7 @@ const watchWaitDuration = 100 * time.Millisecond // newEtcdWatcher returns a new etcdWatcher; if list is true, watch sub-nodes. If you provide a transform // and a versioner, the versioner must be able to handle the objects that transform creates. -func newEtcdWatcher(list bool, include includeFunc, filter FilterFunc, encoding runtime.Codec, versioner StorageVersioner, transform TransformFunc, cache etcdCache) *etcdWatcher { +func newEtcdWatcher(list bool, include includeFunc, filter storage.FilterFunc, encoding runtime.Codec, versioner storage.Versioner, transform TransformFunc, cache etcdCache) *etcdWatcher { w := &etcdWatcher{ encoding: encoding, versioner: versioner, @@ -142,7 +116,7 @@ func newEtcdWatcher(list bool, include includeFunc, filter FilterFunc, encoding // etcdWatch calls etcd's Watch function, and handles any errors. Meant to be called // as a goroutine. -func (w *etcdWatcher) etcdWatch(client EtcdClient, key string, resourceVersion uint64) { +func (w *etcdWatcher) etcdWatch(client tools.EtcdClient, key string, resourceVersion uint64) { defer util.HandleCrash() defer close(w.etcdError) if resourceVersion == 0 { @@ -160,7 +134,7 @@ func (w *etcdWatcher) etcdWatch(client EtcdClient, key string, resourceVersion u } // etcdGetInitialWatchState turns an etcd Get request into a watch equivalent -func etcdGetInitialWatchState(client EtcdClient, key string, recursive bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) { +func etcdGetInitialWatchState(client tools.EtcdClient, key string, recursive bool, incoming chan<- *etcd.Response) (resourceVersion uint64, err error) { resp, err := client.Get(key, false, recursive) if err != nil { if !IsEtcdNotFound(err) { diff --git a/pkg/tools/etcd_watcher_test.go b/pkg/storage/etcd/etcd_watcher_test.go similarity index 87% rename from pkg/tools/etcd_watcher_test.go rename to pkg/storage/etcd/etcd_watcher_test.go index a6e96129941..5edfde8d69b 100644 --- a/pkg/tools/etcd_watcher_test.go +++ b/pkg/storage/etcd/etcd_watcher_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package tools +package etcd import ( "fmt" @@ -22,9 +22,10 @@ import ( "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" "github.com/coreos/go-etcd/etcd" @@ -165,7 +166,7 @@ func TestWatchInterpretations(t *testing.T) { } func TestWatchInterpretation_ResponseNotSet(t *testing.T) { - w := newEtcdWatcher(false, nil, Everything, codec, versioner, nil, &fakeEtcdCache{}) + w := newEtcdWatcher(false, nil, storage.Everything, codec, versioner, nil, &fakeEtcdCache{}) w.emit = func(e watch.Event) { t.Errorf("Unexpected emit: %v", e) } @@ -179,7 +180,7 @@ func TestWatchInterpretation_ResponseNotSet(t *testing.T) { func TestWatchInterpretation_ResponseNoNode(t *testing.T) { actions := []string{"create", "set", "compareAndSwap", "delete"} for _, action := range actions { - w := newEtcdWatcher(false, nil, Everything, codec, versioner, nil, &fakeEtcdCache{}) + w := newEtcdWatcher(false, nil, storage.Everything, codec, versioner, nil, &fakeEtcdCache{}) w.emit = func(e watch.Event) { t.Errorf("Unexpected emit: %v", e) } @@ -193,7 +194,7 @@ func TestWatchInterpretation_ResponseNoNode(t *testing.T) { func TestWatchInterpretation_ResponseBadData(t *testing.T) { actions := []string{"create", "set", "compareAndSwap", "delete"} for _, action := range actions { - w := newEtcdWatcher(false, nil, Everything, codec, versioner, nil, &fakeEtcdCache{}) + w := newEtcdWatcher(false, nil, storage.Everything, codec, versioner, nil, &fakeEtcdCache{}) w.emit = func(e watch.Event) { t.Errorf("Unexpected emit: %v", e) } @@ -215,12 +216,12 @@ func TestWatchInterpretation_ResponseBadData(t *testing.T) { func TestWatchEtcdError(t *testing.T) { codec := latest.Codec - fakeClient := NewFakeEtcdClient(t) - fakeClient.expectNotFoundGetSet["/some/key"] = struct{}{} + fakeClient := tools.NewFakeEtcdClient(t) + fakeClient.ExpectNotFoundGet("/some/key") fakeClient.WatchImmediateError = fmt.Errorf("immediate error") h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.Watch("/some/key", 4, Everything) + watching, err := h.Watch("/some/key", 4, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -244,13 +245,13 @@ func TestWatchEtcdError(t *testing.T) { func TestWatch(t *testing.T) { codec := latest.Codec - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) key := "/some/key" prefixedKey := etcdtest.AddPrefix(key) - fakeClient.expectNotFoundGetSet[prefixedKey] = struct{}{} + fakeClient.ExpectNotFoundGet(prefixedKey) h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.Watch(key, 0, Everything) + watching, err := h.Watch(key, 0, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -322,13 +323,13 @@ func TestWatchEtcdState(t *testing.T) { Endpoints []api.EndpointSubset } testCases := map[string]struct { - Initial map[string]EtcdResponseWithError + Initial map[string]tools.EtcdResponseWithError Responses []*etcd.Response From uint64 Expected []*T }{ "from not found": { - Initial: map[string]EtcdResponseWithError{}, + Initial: map[string]tools.EtcdResponseWithError{}, Responses: []*etcd.Response{ { Action: "create", @@ -373,7 +374,7 @@ func TestWatchEtcdState(t *testing.T) { }, }, "from initial state": { - Initial: map[string]EtcdResponseWithError{ + Initial: map[string]tools.EtcdResponseWithError{ prefixedKey: { R: &etcd.Response{ Action: "get", @@ -419,13 +420,13 @@ func TestWatchEtcdState(t *testing.T) { } for k, testCase := range testCases { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) for key, value := range testCase.Initial { fakeClient.Data[key] = value } h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.Watch(baseKey, testCase.From, Everything) + watching, err := h.Watch(baseKey, testCase.From, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -456,12 +457,12 @@ func TestWatchFromZeroIndex(t *testing.T) { pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} testCases := map[string]struct { - Response EtcdResponseWithError + Response tools.EtcdResponseWithError ExpectedVersion string ExpectedType watch.EventType }{ "get value created": { - EtcdResponseWithError{ + tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(codec, pod), @@ -476,7 +477,7 @@ func TestWatchFromZeroIndex(t *testing.T) { watch.Added, }, "get value modified": { - EtcdResponseWithError{ + tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Value: runtime.EncodeOrDie(codec, pod), @@ -493,13 +494,13 @@ func TestWatchFromZeroIndex(t *testing.T) { } for k, testCase := range testCases { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) key := "/some/key" prefixedKey := etcdtest.AddPrefix(key) fakeClient.Data[prefixedKey] = testCase.Response h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.Watch(key, 0, Everything) + watching, err := h.Watch(key, 0, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -534,8 +535,8 @@ func TestWatchListFromZeroIndex(t *testing.T) { pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}} key := "/some/key" prefixedKey := etcdtest.AddPrefix(key) - fakeClient := NewFakeEtcdClient(t) - fakeClient.Data[prefixedKey] = EtcdResponseWithError{ + fakeClient := tools.NewFakeEtcdClient(t) + fakeClient.Data[prefixedKey] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: &etcd.Node{ Dir: true, @@ -560,7 +561,7 @@ func TestWatchListFromZeroIndex(t *testing.T) { } h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.WatchList(key, 0, Everything) + watching, err := h.WatchList(key, 0, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -597,10 +598,10 @@ func TestWatchListIgnoresRootKey(t *testing.T) { key := "/some/key" prefixedKey := etcdtest.AddPrefix(key) - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.WatchList(key, 1, Everything) + watching, err := h.WatchList(key, 1, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -639,10 +640,10 @@ func TestWatchListIgnoresRootKey(t *testing.T) { } func TestWatchFromNotFound(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) key := "/some/key" prefixedKey := etcdtest.AddPrefix(key) - fakeClient.Data[prefixedKey] = EtcdResponseWithError{ + fakeClient.Data[prefixedKey] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, @@ -653,7 +654,7 @@ func TestWatchFromNotFound(t *testing.T) { } h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.Watch(key, 0, Everything) + watching, err := h.Watch(key, 0, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -666,10 +667,10 @@ func TestWatchFromNotFound(t *testing.T) { } func TestWatchFromOtherError(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) key := "/some/key" prefixedKey := etcdtest.AddPrefix(key) - fakeClient.Data[prefixedKey] = EtcdResponseWithError{ + fakeClient.Data[prefixedKey] = tools.EtcdResponseWithError{ R: &etcd.Response{ Node: nil, }, @@ -680,7 +681,7 @@ func TestWatchFromOtherError(t *testing.T) { } h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) - watching, err := h.Watch(key, 0, Everything) + watching, err := h.Watch(key, 0, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -708,15 +709,15 @@ func TestWatchFromOtherError(t *testing.T) { } func TestWatchPurposefulShutdown(t *testing.T) { - fakeClient := NewFakeEtcdClient(t) + fakeClient := tools.NewFakeEtcdClient(t) h := newEtcdHelper(fakeClient, codec, etcdtest.PathPrefix()) key := "/some/key" prefixedKey := etcdtest.AddPrefix(key) - fakeClient.expectNotFoundGetSet[prefixedKey] = struct{}{} + fakeClient.ExpectNotFoundGet(prefixedKey) // Test purposeful shutdown - watching, err := h.Watch(key, 0, Everything) + watching, err := h.Watch(key, 0, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -732,38 +733,3 @@ func TestWatchPurposefulShutdown(t *testing.T) { t.Errorf("An injected error did not cause a graceful shutdown") } } - -func TestEtcdParseWatchResourceVersion(t *testing.T) { - testCases := []struct { - Version string - Kind string - ExpectVersion uint64 - Err bool - }{ - {Version: "", ExpectVersion: 0}, - {Version: "a", Err: true}, - {Version: " ", Err: true}, - {Version: "1", ExpectVersion: 2}, - {Version: "10", ExpectVersion: 11}, - } - for _, testCase := range testCases { - version, err := ParseWatchResourceVersion(testCase.Version, testCase.Kind) - switch { - case testCase.Err: - if err == nil { - t.Errorf("%s: unexpected non-error", testCase.Version) - continue - } - if !errors.IsInvalid(err) { - t.Errorf("%s: unexpected error: %v", testCase.Version, err) - continue - } - case !testCase.Err && err != nil: - t.Errorf("%s: unexpected error: %v", testCase.Version, err) - continue - } - if version != testCase.ExpectVersion { - t.Errorf("%s: expected version %d but was %d", testCase.Version, testCase.ExpectVersion, version) - } - } -} diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go new file mode 100644 index 00000000000..36829f99b7c --- /dev/null +++ b/pkg/storage/interfaces.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "time" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" +) + +// Versioner abstracts setting and retrieving metadata fields from database response +// onto the object ot list. +type Versioner interface { + // UpdateObject sets storage metadata into an API object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateObject(obj runtime.Object, expiration *time.Time, resourceVersion uint64) error + // UpdateList sets the resource version into an API list object. Returns an error if the object + // cannot be updated correctly. May return nil if the requested object does not need metadata + // from database. + UpdateList(obj runtime.Object, resourceVersion uint64) error + // ObjectResourceVersion returns the resource version (for persistence) of the specified object. + // Should return an error if the specified object does not have a persistable version. + ObjectResourceVersion(obj runtime.Object) (uint64, error) +} + +// ResponseMeta contains information about the database metadata that is associated with +// an object. It abstracts the actual underlying objects to prevent coupling with concrete +// database and to improve testability. +type ResponseMeta struct { + // TTL is the time to live of the node that contained the returned object. It may be + // zero or negative in some cases (objects may be expired after the requested + // expiration time due to server lag). + TTL int64 + // Expiration is the time at which the node that contained the returned object will expire and be deleted. + // This can be nil if there is no expiration time set for the node. + Expiration *time.Time + // The resource version of the node that contained the returned object. + ResourceVersion uint64 +} + +// FilterFunc is a predicate which takes an API object and returns true +// iff the object should remain in the set. +type FilterFunc func(obj runtime.Object) bool + +// Everything is a FilterFunc which accepts all objects. +func Everything(runtime.Object) bool { + return true +} + +// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update +// that is guaranteed to succeed. +// See the comment for GuaranteedUpdate for more details. +type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) + +// Interface offers a common interface for object marshaling/unmarshling operations and +// hids all the storage-related operations behind it. +type Interface interface { + // Returns list of servers addresses of the underyling database. + // TODO: This method is used only in a single place. Consider refactoring and getting rid + // of this method from the interface. + Backends() []string + + // Returns Versioner associated with this interface. + Versioner() Versioner + + // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live + // in seconds (0 means forever). If no error is returned and out is not nil, out will be + // set to the read value from database. + Create(key string, obj, out runtime.Object, ttl uint64) error + + // Set marshals obj via json and stores in database under key. Will do an atomic update + // if obj's ResourceVersion field is set. 'ttl' is time-to-live in seconds (0 means forever). + // If no error is returned and out is not nil, out will be set to the read value from database. + Set(key string, obj, out runtime.Object, ttl uint64) error + + // Delete removes the specified key and returns the value that existed at that spot. + Delete(key string, out runtime.Object) error + + // RecursiveDelete removes the specified key. + // TODO: Get rid of this method and use Delete() instead. + RecursiveDelete(key string, recursive bool) error + + // Watch begins watching the specified key. Events are decoded into API objects, + // and any items passing 'filter' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching + // (e.g. reconnecting without missing any updates). + Watch(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) + + // WatchList begins watching the specified key's items. Items are decoded into API + // objects and any item passing 'filter' are sent down to returned watch.Interface. + // resourceVersion may be used to specify what version to begin watching + // (e.g. reconnecting without missing any updates). + WatchList(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) + + // Get unmarshals json found at key into objPtr. On a not found error, will either + // return a zero object of the requested type, or an error, depending on ignoreNotFound. + // Treats empty responses and nil response nodes exactly like a not found error. + Get(key string, objPtr runtime.Object, ignoreNotFound bool) error + + // GetToList unmarshals json found at key and opaque it into *List api object + // (an object that satisfies the runtime.IsList definition). + GetToList(key string, listObj runtime.Object) error + + // List unmarshalls jsons found at directory defined by key and opaque them + // into *List api object (an object that satisfies runtime.IsList definition). + List(key string, listObj runtime.Object) error + + // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') + // retrying the update until success if there is index conflict. + // Note that object passed to tryUpdate may change acress incovations of tryUpdate() if + // other writers are simultanously updateing it, to tryUpdate() needs to take into account + // the current contents of the object when deciding how the update object should look. + // + // Exmaple: + // + // s := /* implementation of Interface */ + // err := s.GuaranteedUpdate( + // "myKey", &MyType{}, true, + // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { + // // Before each incovation of the user defined function, "input" is reset to + // // current contents for "myKey" in database. + // curr := input.(*MyType) // Guaranteed to succeed. + // + // // Make the modification + // curr.Counter++ + // + // // Return the modified object - return an error to stop iterating. Return + // // a uint64 to alter the TTL on the object, or nil to keep it the same value. + // return cur, nil, nil + // } + // }) + GuaranteedUpdate(key string, ptrToType runtime.Object, ignoreNotFound bool, tryUpdate UpdateFunc) error +} diff --git a/pkg/storage/util.go b/pkg/storage/util.go new file mode 100644 index 00000000000..e95735e2ac0 --- /dev/null +++ b/pkg/storage/util.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "strconv" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" + "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/fielderrors" +) + +type SimpleUpdateFunc func(runtime.Object) (runtime.Object, error) + +// SimpleUpdateFunc converts SimpleUpdateFunc into UpdateFunc +func SimpleUpdate(fn SimpleUpdateFunc) UpdateFunc { + return func(input runtime.Object, _ ResponseMeta) (runtime.Object, *uint64, error) { + out, err := fn(input) + return out, nil, err + } +} + +// ParseWatchResourceVersion takes a resource version argument and converts it to +// the etcd version we should pass to helper.Watch(). Because resourceVersion is +// an opaque value, the default watch behavior for non-zero watch is to watch +// the next value (if you pass "1", you will see updates from "2" onwards). +func ParseWatchResourceVersion(resourceVersion, kind string) (uint64, error) { + if resourceVersion == "" || resourceVersion == "0" { + return 0, nil + } + version, err := strconv.ParseUint(resourceVersion, 10, 64) + if err != nil { + // TODO: Does this need to be a ValidationErrorList? I can't convince myself it does. + return 0, errors.NewInvalid(kind, "", fielderrors.ValidationErrorList{fielderrors.NewFieldInvalid("resourceVersion", resourceVersion, err.Error())}) + } + return version + 1, nil +} diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go new file mode 100644 index 00000000000..4445b6c1db3 --- /dev/null +++ b/pkg/storage/util_test.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "testing" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" +) + +func TestEtcdParseWatchResourceVersion(t *testing.T) { + testCases := []struct { + Version string + Kind string + ExpectVersion uint64 + Err bool + }{ + {Version: "", ExpectVersion: 0}, + {Version: "a", Err: true}, + {Version: " ", Err: true}, + {Version: "1", ExpectVersion: 2}, + {Version: "10", ExpectVersion: 11}, + } + for _, testCase := range testCases { + version, err := ParseWatchResourceVersion(testCase.Version, testCase.Kind) + switch { + case testCase.Err: + if err == nil { + t.Errorf("%s: unexpected non-error", testCase.Version) + continue + } + if !errors.IsInvalid(err) { + t.Errorf("%s: unexpected error: %v", testCase.Version, err) + continue + } + case !testCase.Err && err != nil: + t.Errorf("%s: unexpected error: %v", testCase.Version, err) + continue + } + if version != testCase.ExpectVersion { + t.Errorf("%s: expected version %d but was %d", testCase.Version, testCase.ExpectVersion, version) + } + } +} diff --git a/pkg/tools/fake_etcd_client.go b/pkg/tools/fake_etcd_client.go index 45232f5d8b9..bf4b7fdd1f4 100644 --- a/pkg/tools/fake_etcd_client.go +++ b/pkg/tools/fake_etcd_client.go @@ -281,7 +281,8 @@ func (f *FakeEtcdClient) Delete(key string, recursive bool) (*etcd.Response, err Index: f.ChangeIndex, } } - if IsEtcdNotFound(existing.E) { + etcdError, ok := existing.E.(*etcd.EtcdError) + if ok && etcdError != nil && etcdError.ErrorCode == EtcdErrorCodeNotFound { f.DeletedKeys = append(f.DeletedKeys, key) return existing.R, existing.E } diff --git a/pkg/tools/interfaces.go b/pkg/tools/interfaces.go index 589a0ca3435..02569cb28b5 100644 --- a/pkg/tools/interfaces.go +++ b/pkg/tools/interfaces.go @@ -17,11 +17,6 @@ limitations under the License. package tools import ( - "time" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" - "github.com/coreos/go-etcd/etcd" ) @@ -51,119 +46,3 @@ type EtcdClient interface { // the etcd client interface which doesn't, and it doesn't seem worth it to wrap the api. Watch(prefix string, waitIndex uint64, recursive bool, receiver chan *etcd.Response, stop chan bool) (*etcd.Response, error) } - -// StorageVersioner abstracts setting and retrieving metadata fields from the etcd response onto the object -// or list. -type StorageVersioner interface { - // UpdateObject sets etcd storage metadata into an API object. Returns an error if the object - // cannot be updated correctly. May return nil if the requested object does not need metadata - // from etcd. - UpdateObject(obj runtime.Object, expiration *time.Time, resourceVersion uint64) error - // UpdateList sets the resource version into an API list object. Returns an error if the object - // cannot be updated correctly. May return nil if the requested object does not need metadata - // from etcd. - UpdateList(obj runtime.Object, resourceVersion uint64) error - // ObjectResourceVersion returns the resource version (for persistence) of the specified object. - // Should return an error if the specified object does not have a persistable version. - ObjectResourceVersion(obj runtime.Object) (uint64, error) -} - -// ResponseMeta contains information about the etcd metadata that is associated with -// an object. It abstracts the actual underlying objects to prevent coupling with etcd -// and to improve testability. -type ResponseMeta struct { - // TTL is the time to live of the node that contained the returned object. It may be - // zero or negative in some cases (objects may be expired after the requested - // expiration time due to server lag). - TTL int64 - // Expiration is the time at which the node that contained the returned object will expire and be deleted. - // This can be nil if there is no expiration time set for the node. - Expiration *time.Time - // The resource version of the node that contained the returned object. - ResourceVersion uint64 -} - -// Pass an StorageUpdateFunc to StorageInterface.GuaranteedUpdate to make an update -// that is guaranteed to succeed. -// See the comment for GuaranteedUpdate for more details. -type StorageUpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error) - -// StorageInterface offers a common interface for object marshaling/unmarshling operations and -// hids all the storage-related operations behind it. -type StorageInterface interface { - // Returns list of servers addresses of the underyling database. - // TODO: This method is used only in a single place. Consider refactoring and getting rid - // of this method from the interface. - Backends() []string - - // Returns StorageVersioner associated with this interface. - Versioner() StorageVersioner - - // Create adds a new object at a key unless it already exists. 'ttl' is time-to-live - // in seconds (0 means forever). If no error is returned and out is not nil, out will be - // set to the read value from etcd. - Create(key string, obj, out runtime.Object, ttl uint64) error - - // Set marshals obj via json and stores in etcd under key. Will do an atomic update - // if obj's ResourceVersion field is set. 'ttl' is time-to-live in seconds (0 means forever). - // If no error is returned and out is not nil, out will be set to the read value from etcd. - Set(key string, obj, out runtime.Object, ttl uint64) error - - // Delete removes the specified key and returns the value that existed at that spot. - Delete(key string, out runtime.Object) error - - // RecursiveDelete removes the specified key. - // TODO: Get rid of this method and use Delete() instead. - RecursiveDelete(key string, recursive bool) error - - // Watch begins watching the specified key. Events are decoded into API objects, - // and any items passing 'filter' are sent down to returned watch.Interface. - // resourceVersion may be used to specify what version to begin watching - // (e.g. reconnecting without missing any updates). - Watch(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) - - // WatchList begins watching the specified key's items. Items are decoded into API - // objects and any item passing 'filter' are sent down to returned watch.Interface. - // resourceVersion may be used to specify what version to begin watching - // (e.g. reconnecting without missing any updates). - WatchList(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) - - // Get unmarshals json found at key into objPtr. On a not found error, will either - // return a zero object of the requested type, or an error, depending on ignoreNotFound. - // Treats empty responses and nil response nodes exactly like a not found error. - Get(key string, objPtr runtime.Object, ignoreNotFound bool) error - - // GetToList unmarshals json found at key and opaque it into *List api object - // (an object that satisfies the runtime.IsList definition). - GetToList(key string, listObj runtime.Object) error - - // List unmarshalls jsons found at directory defined by key and opaque them - // into *List api object (an object that satisfies runtime.IsList definition). - List(key string, listObj runtime.Object) error - - // GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType') - // retrying the update until success if there is etcd index conflict. - // Note that object passed to tryUpdate may change acress incovations of tryUpdate() if - // other writers are simultanously updateing it, to tryUpdate() needs to take into account - // the current contents of the object when deciding how the update object should look. - // - // Exmaple: - // - // s := /* implementation of StorageInterface */ - // err := s.GuaranteedUpdate( - // "myKey", &MyType{}, true, - // func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) { - // // Before each incovation of the user defined function, "input" is reset to - // // etcd's current contents for "myKey". - // curr := input.(*MyType) // Guaranteed to succeed. - // - // // Make the modification - // curr.Counter++ - // - // // Return the modified object - return an error to stop iterating. Return - // // a uint64 to alter the TTL on the object, or nil to keep it the same value. - // return cur, nil, nil - // } - // }) - GuaranteedUpdate(key string, ptrToType runtime.Object, ignoreNotFound bool, tryUpdate StorageUpdateFunc) error -} diff --git a/pkg/util/jsonpath/doc.go b/pkg/util/jsonpath/doc.go new file mode 100644 index 00000000000..6bdf4ac59a5 --- /dev/null +++ b/pkg/util/jsonpath/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package jsonpath is a template engine using jsonpath syntax, +// which can be seen at http://goessner.net/articles/JsonPath/. +// In addition, it has {range} {end} function to iterate list and slice. +package jsonpath diff --git a/pkg/util/jsonpath/jsonpath.go b/pkg/util/jsonpath/jsonpath.go new file mode 100644 index 00000000000..af5d560e14c --- /dev/null +++ b/pkg/util/jsonpath/jsonpath.go @@ -0,0 +1,472 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" + + "github.com/GoogleCloudPlatform/kubernetes/third_party/golang/template" +) + +type JSONPath struct { + name string + parser *Parser + stack [][]reflect.Value //push and pop values in different scopes + cur []reflect.Value //current scope values + beginRange int + inRange int + endRange int +} + +func New(name string) *JSONPath { + return &JSONPath{ + name: name, + beginRange: 0, + inRange: 0, + endRange: 0, + } +} + +// Parse parse the given template, return error +func (j *JSONPath) Parse(text string) (err error) { + j.parser, err = Parse(j.name, text) + return +} + +// Execute bounds data into template and write the result +func (j *JSONPath) Execute(wr io.Writer, data interface{}) error { + if j.parser == nil { + return fmt.Errorf("%s is an incomplete jsonpath template", j.name) + } + + j.cur = []reflect.Value{reflect.ValueOf(data)} + nodes := j.parser.Root.Nodes + for i := 0; i < len(nodes); i++ { + node := nodes[i] + results, err := j.walk(j.cur, node) + if err != nil { + return err + } + + //encounter an end node, break the current block + if j.endRange > 0 && j.endRange <= j.inRange { + j.endRange -= 1 + break + } + //encounter a range node, start a range loop + if j.beginRange > 0 { + j.beginRange -= 1 + j.inRange += 1 + for k, value := range results { + j.parser.Root.Nodes = nodes[i+1:] + if k == len(results)-1 { + j.inRange -= 1 + } + err := j.Execute(wr, value.Interface()) + if err != nil { + return err + } + + } + break + } + err = j.PrintResults(wr, results) + if err != nil { + return err + } + } + return nil +} + +// PrintResults write the results into writer +func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error { + for i, r := range results { + text, err := j.evalToText(r) + if err != nil { + return err + } + if i != len(results)-1 { + text = append(text, ' ') + } + if _, err = wr.Write(text); err != nil { + return err + } + } + return nil +} + +// walk visits tree rooted at the given node in DFS order +func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) { + switch node := node.(type) { + case *ListNode: + return j.evalList(value, node) + case *TextNode: + return []reflect.Value{reflect.ValueOf(string(node.Text))}, nil + case *FieldNode: + return j.evalField(value, node) + case *ArrayNode: + return j.evalArray(value, node) + case *FilterNode: + return j.evalFilter(value, node) + case *IntNode: + return j.evalInt(value, node) + case *FloatNode: + return j.evalFloat(value, node) + case *WildcardNode: + return j.evalWildcard(value, node) + case *RecursiveNode: + return j.evalRecursive(value, node) + case *UnionNode: + return j.evalUnion(value, node) + case *IdentifierNode: + return j.evalIdentifier(value, node) + default: + return value, fmt.Errorf("unexpected Node %v", node) + } +} + +// evalInt evaluates IntNode +func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalFloat evaluates FloatNode +func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) { + result := make([]reflect.Value, len(input)) + for i := range input { + result[i] = reflect.ValueOf(node.Value) + } + return result, nil +} + +// evalList evaluates ListNode +func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) { + var err error + curValue := value + for _, node := range node.Nodes { + curValue, err = j.walk(curValue, node) + if err != nil { + return curValue, err + } + } + return curValue, nil +} + +// evalIdentifier evaluates IdentifierNode +func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) { + results := []reflect.Value{} + switch node.Name { + case "range": + j.stack = append(j.stack, j.cur) + j.beginRange += 1 + results = input + case "end": + if j.endRange < j.inRange { //inside a loop, break the current block + j.endRange += 1 + break + } + // the loop is about to end, pop value and continue the following execution + if len(j.stack) > 0 { + j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1] + } else { + return results, fmt.Errorf("not in range, nothing to end") + } + default: + return input, fmt.Errorf("unrecongnized identifier %v", node.Name) + } + return results, nil +} + +// evalArray evaluates ArrayNode +func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + if value.Kind() == reflect.Interface { + value = reflect.ValueOf(value.Interface()) + } + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice", value) + } + params := node.Params + if !params[0].Known { + params[0].Value = 0 + } + if params[0].Value < 0 { + params[0].Value += value.Len() + } + if !params[1].Known { + params[1].Value = value.Len() + } + + if params[1].Value < 0 { + params[1].Value += value.Len() + } + + if !params[2].Known { + value = value.Slice(params[0].Value, params[1].Value) + } else { + value = value.Slice3(params[0].Value, params[1].Value, params[2].Value) + } + for i := 0; i < value.Len(); i++ { + result = append(result, value.Index(i)) + } + } + return result, nil +} + +// evalUnion evaluates UnionNode +func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, listNode := range node.Nodes { + temp, err := j.evalList(input, listNode) + if err != nil { + return input, err + } + result = append(result, temp...) + } + return result, nil +} + +// evalField evaluates filed of struct or key of map. +func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + var result reflect.Value + if value.Kind() == reflect.Interface { + value = reflect.ValueOf(value.Interface()) + } + if value.Kind() == reflect.Struct { + result = value.FieldByName(node.Value) + } else if value.Kind() == reflect.Map { + result = value.MapIndex(reflect.ValueOf(node.Value)) + } + if result.IsValid() { + results = append(results, result) + } + } + if len(results) == 0 { + return results, fmt.Errorf("%s is not found", node.Value) + } + return results, nil +} + +// evalWildcard extract all contents of the given value +func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalRecursive visit the given value recursively and push all of them to result +func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) { + result := []reflect.Value{} + for _, value := range input { + results := []reflect.Value{} + kind := value.Kind() + if kind == reflect.Struct { + for i := 0; i < value.NumField(); i++ { + results = append(results, value.Field(i)) + } + } else if kind == reflect.Map { + for _, key := range value.MapKeys() { + results = append(results, value.MapIndex(key)) + } + } else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String { + for i := 0; i < value.Len(); i++ { + results = append(results, value.Index(i)) + } + } + if len(results) != 0 { + result = append(result, value) + output, err := j.evalRecursive(results, node) + if err != nil { + return result, err + } + result = append(result, output...) + } + } + return result, nil +} + +// evalFilter filter array according to FilterNode +func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) { + results := []reflect.Value{} + for _, value := range input { + if value.Kind() == reflect.Interface { + value = reflect.ValueOf(value.Interface()) + } + if value.Kind() != reflect.Array && value.Kind() != reflect.Slice { + return input, fmt.Errorf("%v is not array or slice", value) + } + for i := 0; i < value.Len(); i++ { + temp := []reflect.Value{value.Index(i)} + lefts, err := j.evalList(temp, node.Left) + + //case exists + if node.Operator == "exists" { + if len(lefts) > 0 { + results = append(results, value.Index(i)) + } + continue + } + + if err != nil { + return input, err + } + + var left, right interface{} + if len(lefts) != 1 { + return input, fmt.Errorf("can only compare one element at a time") + } + left = lefts[0].Interface() + + rights, err := j.evalList(temp, node.Right) + if err != nil { + return input, err + } + if len(rights) != 1 { + return input, fmt.Errorf("can only compare one element at a time") + } + right = rights[0].Interface() + + pass := false + switch node.Operator { + case "<": + pass, err = template.Less(left, right) + case ">": + pass, err = template.Greater(left, right) + case "==": + pass, err = template.Equal(left, right) + case "!=": + pass, err = template.NotEqual(left, right) + case "<=": + pass, err = template.LessEqual(left, right) + case ">=": + pass, err = template.GreaterEqual(left, right) + default: + return results, fmt.Errorf("unrecognized filter operator %s", node.Operator) + } + if err != nil { + return results, err + } + if pass { + results = append(results, value.Index(i)) + } + } + } + return results, nil +} + +// evalToText translates reflect value to corresponding text +func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) { + if v.Kind() == reflect.Interface { + v = reflect.ValueOf(v.Interface()) + } + var buffer bytes.Buffer + switch v.Kind() { + case reflect.Invalid: + //pass + case reflect.Ptr: + text, err := j.evalToText(reflect.Indirect(v)) + if err != nil { + return nil, err + } + buffer.Write(text) + case reflect.Bool: + if variable := v.Bool(); variable { + buffer.WriteString("True") + } else { + buffer.WriteString("False") + } + case reflect.Float32: + buffer.WriteString(strconv.FormatFloat(v.Float(), 'f', -1, 32)) + case reflect.Float64: + buffer.WriteString(strconv.FormatFloat(v.Float(), 'f', -1, 64)) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + buffer.WriteString(strconv.FormatInt(v.Int(), 10)) + case reflect.String: + buffer.WriteString(v.String()) + case reflect.Array, reflect.Slice: + buffer.WriteString("[") + for i := 0; i < v.Len(); i++ { + text, err := j.evalToText(v.Index(i)) + if err != nil { + return nil, err + } + buffer.Write(text) + if i != v.Len()-1 { + buffer.WriteString(", ") + } + } + buffer.WriteString("]") + case reflect.Struct: + buffer.WriteString("{") + for i := 0; i < v.NumField(); i++ { + text, err := j.evalToText(v.Field(i)) + if err != nil { + return nil, err + } + pair := fmt.Sprintf("%s: %s", v.Type().Field(i).Name, text) + buffer.WriteString(pair) + if i != v.NumField()-1 { + buffer.WriteString(", ") + } + } + buffer.WriteString("}") + case reflect.Map: + buffer.WriteString("{") + for i, key := range v.MapKeys() { + text, err := j.evalToText(v.MapIndex(key)) + if err != nil { + return nil, err + } + pair := fmt.Sprintf("%s: %s", key, text) + buffer.WriteString(pair) + if i != len(v.MapKeys())-1 { + buffer.WriteString(", ") + } + } + buffer.WriteString("}") + + default: + return nil, fmt.Errorf("%v is not printable", v.Kind()) + } + return buffer.Bytes(), nil +} diff --git a/pkg/util/jsonpath/jsonpath_test.go b/pkg/util/jsonpath/jsonpath_test.go new file mode 100644 index 00000000000..78cebc92f0c --- /dev/null +++ b/pkg/util/jsonpath/jsonpath_test.go @@ -0,0 +1,214 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "bytes" + "encoding/json" + "testing" +) + +type jsonpathTest struct { + name string + template string + input interface{} + expect string +} + +func testJSONPath(tests []jsonpathTest, t *testing.T) { + for _, test := range tests { + j := New(test.name) + err := j.Parse(test.template) + if err != nil { + t.Errorf("in %s, parse %s error %v", test.name, test.template, err) + } + buf := new(bytes.Buffer) + err = j.Execute(buf, test.input) + if err != nil { + t.Errorf("in %s, execute error %v", test.name, err) + } + out := buf.String() + if out != test.expect { + t.Errorf(`in %s, expect to get "%s", got "%s"`, test.name, test.expect, out) + } + } +} + +func testFailJSONPath(tests []jsonpathTest, t *testing.T) { + for _, test := range tests { + j := New(test.name) + err := j.Parse(test.template) + if err != nil { + t.Errorf("in %s, parse %s error %v", test.name, test.template, err) + } + buf := new(bytes.Buffer) + err = j.Execute(buf, test.input) + var out string + if err == nil { + out = "nil" + } else { + out = err.Error() + } + if out != test.expect { + t.Errorf("in %s, expect to get error %s, got %s", test.name, test.expect, out) + } + } +} + +func TestStructInput(t *testing.T) { + type book struct { + Category string + Author string + Title string + Price float32 + } + + type bicycle struct { + Color string + Price float32 + } + + type store struct { + Book []book + Bicycle bicycle + Name string + Labels map[string]int + } + + storeData := store{ + Name: "jsonpath", + Book: []book{ + {"reference", "Nigel Rees", "Sayings of the Centurey", 8.95}, + {"fiction", "Evelyn Waugh", "Sword of Honour", 12.99}, + {"fiction", "Herman Melville", "Moby Dick", 8.99}, + }, + Bicycle: bicycle{"red", 19.95}, + Labels: map[string]int{ + "engieer": 10, + "web/html": 15, + "k8s-app": 20, + }, + } + + storeTests := []jsonpathTest{ + {"plain", "hello jsonpath", nil, "hello jsonpath"}, + {"recursive", "{..}", []int{1, 2, 3}, "[1, 2, 3]"}, + {"filter", "{[?(@<5)]}", []int{2, 6, 3, 7}, "2 3"}, + {"quote", `{"{"}`, nil, "{"}, + {"union", "{[1,3,4]}", []int{0, 1, 2, 3, 4}, "1 3 4"}, + {"array", "{[0:2]}", []string{"Monday", "Tudesday"}, "Monday Tudesday"}, + {"variable", "hello {.Name}", storeData, "hello jsonpath"}, + {"dict/", "{.Labels.web/html}", storeData, "15"}, + {"dict-", "{.Labels.k8s-app}", storeData, "20"}, + {"nest", "{.Bicycle.Color}", storeData, "red"}, + {"allarray", "{.Book[*].Author}", storeData, "Nigel Rees Evelyn Waugh Herman Melville"}, + {"allfileds", "{.Bicycle.*}", storeData, "red 19.95"}, + {"recurfileds", "{..Price}", storeData, "8.95 12.99 8.99 19.95"}, + {"lastarray", "{.Book[-1:]}", storeData, + "{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"}, + {"recurarray", "{..Book[2]}", storeData, + "{Category: fiction, Author: Herman Melville, Title: Moby Dick, Price: 8.99}"}, + } + testJSONPath(storeTests, t) + + failStoreTests := []jsonpathTest{ + {"invalid identfier", "{hello}", storeData, "unrecongnized identifier hello"}, + {"nonexistent field", "{.hello}", storeData, "hello is not found"}, + {"invalid array", "{.Labels[0]}", storeData, " is not array or slice"}, + {"invalid filter operator", "{.Book[?(@.Price<>10)]}", storeData, "unrecognized filter operator <>"}, + {"redundent end", "{range .Labels.*}{@}{end}{end}", storeData, "not in range, nothing to end"}, + } + testFailJSONPath(failStoreTests, t) +} + +func TestJSONInput(t *testing.T) { + var pointsJSON = []byte(`[ + {"id": "i1", "x":4, "y":-5}, + {"id": "i2", "x":-2, "y":-5, "z":1}, + {"id": "i3", "x": 8, "y": 3 }, + {"id": "i4", "x": -6, "y": -1 }, + {"id": "i5", "x": 0, "y": 2, "z": 1 }, + {"id": "i6", "x": 1, "y": 4 } + ]`) + var pointsData interface{} + err := json.Unmarshal(pointsJSON, &pointsData) + if err != nil { + t.Error(err) + } + pointsTests := []jsonpathTest{ + {"exists filter", "{[?(@.z)].id}", pointsData, "i2 i5"}, + {"bracket key", "{[0]['id']}", pointsData, "i1"}, + } + testJSONPath(pointsTests, t) +} + +// TestKubenates tests some use cases from kubenates +func TestKubenates(t *testing.T) { + var input = []byte(`{ + "kind": "List", + "items":[ + { + "kind":"None", + "metadata":{"name":"127.0.0.1"}, + "status":{ + "capacity":{"cpu":"4"}, + "addresses":[{"type": "LegacyHostIP", "address":"127.0.0.1"}] + } + }, + { + "kind":"None", + "metadata":{"name":"127.0.0.2"}, + "status":{ + "capacity":{"cpu":"8"}, + "addresses":[ + {"type": "LegacyHostIP", "address":"127.0.0.2"}, + {"type": "another", "address":"127.0.0.3"} + ] + } + } + ], + "users":[ + { + "name": "myself", + "user": {} + }, + { + "name": "e2e", + "user": {"username": "admin", "password": "secret"} + } + ] + }`) + var nodesData interface{} + err := json.Unmarshal(input, &nodesData) + if err != nil { + t.Error(err) + } + nodesTests := []jsonpathTest{ + {"range item", "{range .items[*]}{.metadata.name}, {end}{.kind}", nodesData, `127.0.0.1, 127.0.0.2, List`}, + {"range addresss", "{.items[*].status.addresses[*].address}", nodesData, + `127.0.0.1 127.0.0.2 127.0.0.3`}, + {"double range", "{range .items[*]}{range .status.addresses[*]}{.address}, {end}{end}", nodesData, + `127.0.0.1, 127.0.0.2, 127.0.0.3, `}, + {"item name", "{.items[*].metadata.name}", nodesData, `127.0.0.1 127.0.0.2`}, + {"union nodes capacity", "{.items[*]['metadata.name', 'status.capacity']}", nodesData, + `127.0.0.1 127.0.0.2 {cpu: 4} {cpu: 8}`}, + {"range nodes capacity", "{range .items[*]}[{.metadata.name}, {.status.capacity}] {end}", nodesData, + `[127.0.0.1, {cpu: 4}] [127.0.0.2, {cpu: 8}] `}, + {"user password", `{.users[?(@.name=="e2e")].user.password}`, nodesData, "secret"}, + } + testJSONPath(nodesTests, t) +} diff --git a/pkg/util/jsonpath/node.go b/pkg/util/jsonpath/node.go new file mode 100644 index 00000000000..bdbf0d38d22 --- /dev/null +++ b/pkg/util/jsonpath/node.go @@ -0,0 +1,239 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import "fmt" + +// NodeType identifies the type of a parse tree node. +type NodeType int + +// Type returns itself and provides an easy default implementation +func (t NodeType) Type() NodeType { + return t +} + +func (t NodeType) String() string { + return NodeTypeName[t] +} + +const ( + NodeText NodeType = iota + NodeArray + NodeList + NodeField + NodeIdentifier + NodeFilter + NodeInt + NodeFloat + NodeWildcard + NodeRecursive + NodeUnion +) + +var NodeTypeName = map[NodeType]string{ + NodeText: "NodeText", + NodeArray: "NodeArray", + NodeList: "NodeList", + NodeField: "NodeField", + NodeIdentifier: "NodeIdentifier", + NodeFilter: "NodeFilter", + NodeInt: "NodeInt", + NodeFloat: "NodeFloat", + NodeWildcard: "NodeWildcard", + NodeRecursive: "NodeRecursive", + NodeUnion: "NodeUnion", +} + +type Node interface { + Type() NodeType + String() string +} + +// ListNode holds a sequence of nodes. +type ListNode struct { + NodeType + Nodes []Node // The element nodes in lexical order. +} + +func newList() *ListNode { + return &ListNode{NodeType: NodeList} +} + +func (l *ListNode) append(n Node) { + l.Nodes = append(l.Nodes, n) +} + +func (l *ListNode) String() string { + return fmt.Sprintf("%s", l.Type()) +} + +// TextNode holds plain text. +type TextNode struct { + NodeType + Text []byte // The text; may span newlines. +} + +func newText(text string) *TextNode { + return &TextNode{NodeType: NodeText, Text: []byte(text)} +} + +func (t *TextNode) String() string { + return fmt.Sprintf("%s: %s", t.Type(), t.Text) +} + +// FieldNode holds filed of struct +type FieldNode struct { + NodeType + Value string +} + +func newField(value string) *FieldNode { + return &FieldNode{NodeType: NodeField, Value: value} +} + +func (f *FieldNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Value) +} + +// IdentifierNode holds an identifier +type IdentifierNode struct { + NodeType + Name string +} + +func newIdentifier(value string) *IdentifierNode { + return &IdentifierNode{ + NodeType: NodeIdentifier, + Name: value, + } +} + +func (f *IdentifierNode) String() string { + return fmt.Sprintf("%s: %s", f.Type(), f.Name) +} + +// ParamsEntry holds param information for ArrayNode +type ParamsEntry struct { + Value int + Known bool //whether the value is known when parse it +} + +// ArrayNode holds start, end, step information for array index selection +type ArrayNode struct { + NodeType + Params [3]ParamsEntry //start, end, step +} + +func newArray(params [3]ParamsEntry) *ArrayNode { + return &ArrayNode{ + NodeType: NodeArray, + Params: params, + } +} + +func (a *ArrayNode) String() string { + return fmt.Sprintf("%s: %v", a.Type(), a.Params) +} + +// FilterNode holds operand and operator information for filter +type FilterNode struct { + NodeType + Left *ListNode + Right *ListNode + Operator string +} + +func newFilter(left, right *ListNode, operator string) *FilterNode { + return &FilterNode{ + NodeType: NodeFilter, + Left: left, + Right: right, + Operator: operator, + } +} + +func (f *FilterNode) String() string { + return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right) +} + +// IntNode holds integer value +type IntNode struct { + NodeType + Value int +} + +func newInt(num int) *IntNode { + return &IntNode{NodeType: NodeInt, Value: num} +} + +func (i *IntNode) String() string { + return fmt.Sprintf("%s: %d", i.Type(), i.Value) +} + +// FloatNode holds float value +type FloatNode struct { + NodeType + Value float64 +} + +func newFloat(num float64) *FloatNode { + return &FloatNode{NodeType: NodeFloat, Value: num} +} + +func (i *FloatNode) String() string { + return fmt.Sprintf("%s: %f", i.Type(), i.Value) +} + +// WildcardNode means a wildcard +type WildcardNode struct { + NodeType +} + +func newWildcard() *WildcardNode { + return &WildcardNode{NodeType: NodeWildcard} +} + +func (i *WildcardNode) String() string { + return fmt.Sprintf("%s", i.Type()) +} + +// RecursiveNode means a recursive descent operator +type RecursiveNode struct { + NodeType +} + +func newRecursive() *RecursiveNode { + return &RecursiveNode{NodeType: NodeRecursive} +} + +func (r *RecursiveNode) String() string { + return fmt.Sprintf("%s", r.Type()) +} + +// UnionNode is union of ListNode +type UnionNode struct { + NodeType + Nodes []*ListNode +} + +func newUnion(nodes []*ListNode) *UnionNode { + return &UnionNode{NodeType: NodeUnion, Nodes: nodes} +} + +func (u *UnionNode) String() string { + return fmt.Sprintf("%s", u.Type()) +} diff --git a/pkg/util/jsonpath/parser.go b/pkg/util/jsonpath/parser.go new file mode 100644 index 00000000000..e80034c5872 --- /dev/null +++ b/pkg/util/jsonpath/parser.go @@ -0,0 +1,419 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "fmt" + "regexp" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +const eof = -1 + +const ( + leftDelim = "{" + rightDelim = "}" +) + +type Parser struct { + Name string + Root *ListNode + input string + cur *ListNode + pos int + start int + width int +} + +// Parse parsed the given text and return a node Parser. +// If an error is encountered, parsing stops and an empty +// Parser is returned with the error +func Parse(name, text string) (*Parser, error) { + p := NewParser(name) + err := p.Parse(text) + if err != nil { + p = nil + } + return p, err +} + +func NewParser(name string) *Parser { + return &Parser{ + Name: name, + } +} + +// parseAction parsed the expression inside delimiter +func parseAction(name, text string) (*Parser, error) { + p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim)) + p.Root = p.Root.Nodes[0].(*ListNode) + return p, err +} + +func (p *Parser) Parse(text string) error { + p.input = text + p.Root = newList() + p.pos = 0 + return p.parseText(p.Root) +} + +// consumeText return the parsed text since last cosumeText +func (p *Parser) consumeText() string { + value := p.input[p.start:p.pos] + p.start = p.pos + return value +} + +// next returns the next rune in the input. +func (p *Parser) next() rune { + if int(p.pos) >= len(p.input) { + p.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(p.input[p.pos:]) + p.width = w + p.pos += p.width + return r +} + +// peek returns but does not consume the next rune in the input. +func (p *Parser) peek() rune { + r := p.next() + p.backup() + return r +} + +// backup steps back one rune. Can only be called once per call of next. +func (p *Parser) backup() { + p.pos -= p.width +} + +func (p *Parser) parseText(cur *ListNode) error { + for { + if strings.HasPrefix(p.input[p.pos:], leftDelim) { + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return p.parseLeftDelim(cur) + } + if p.next() == eof { + break + } + } + // Correctly reached EOF. + if p.pos > p.start { + cur.append(newText(p.consumeText())) + } + return nil +} + +// parseLeftDelim scans the left delimiter, which is known to be present. +func (p *Parser) parseLeftDelim(cur *ListNode) error { + p.pos += len(leftDelim) + p.consumeText() + newNode := newList() + cur.append(newNode) + cur = newNode + return p.parseInsideAction(cur) +} + +func (p *Parser) parseInsideAction(cur *ListNode) error { + prefixMap := map[string]func(*ListNode) error{ + rightDelim: p.parseRightDelim, + "[?(": p.parseFilter, + "..": p.parseRecursive, + } + for prefix, parseFunc := range prefixMap { + if strings.HasPrefix(p.input[p.pos:], prefix) { + return parseFunc(cur) + } + } + + switch r := p.next(); { + case r == eof || isEndOfLine(r): + return fmt.Errorf("unclosed action") + case r == ' ': + p.consumeText() + case r == '@': //the current object, just pass it + p.consumeText() + case r == '[': + return p.parseArray(cur) + case r == '"': + return p.parseQuote(cur) + case r == '.': + return p.parseField(cur) + case r == '+' || r == '-' || unicode.IsDigit(r): + p.backup() + return p.parseNumber(cur) + case isAlphaNumeric(r): + p.backup() + return p.parseIdentifier(cur) + default: + return fmt.Errorf("unrecognized charactor in action: %#U", r) + } + return p.parseInsideAction(cur) +} + +// parseRightDelim scans the right delimiter, which is known to be present. +func (p *Parser) parseRightDelim(cur *ListNode) error { + p.pos += len(rightDelim) + p.consumeText() + cur = p.Root + return p.parseText(cur) +} + +// parseIdentifier scans build-in keywords, like "range" "end" +func (p *Parser) parseIdentifier(cur *ListNode) error { + var r rune + for { + r = p.next() + if isTerminator(r) { + p.backup() + break + } + } + value := p.consumeText() + cur.append(newIdentifier(value)) + return p.parseInsideAction(cur) +} + +// parseRecursive scans the recursive desent operator .. +func (p *Parser) parseRecursive(cur *ListNode) error { + p.pos += len("..") + p.consumeText() + cur.append(newRecursive()) + if r := p.peek(); isAlphaNumeric(r) { + return p.parseField(cur) + } + return p.parseInsideAction(cur) +} + +// parseNumber scans number +func (p *Parser) parseNumber(cur *ListNode) error { + r := p.peek() + if r == '+' || r == '-' { + r = p.next() + } + for { + r = p.next() + if r != '.' && !unicode.IsDigit(r) { + p.backup() + break + } + } + value := p.consumeText() + i, err := strconv.Atoi(value) + if err == nil { + cur.append(newInt(i)) + return p.parseInsideAction(cur) + } + d, err := strconv.ParseFloat(value, 64) + if err == nil { + cur.append(newFloat(d)) + return p.parseInsideAction(cur) + } + return fmt.Errorf("cannot parse number %s", value) +} + +// parseArray scans array index selection +func (p *Parser) parseArray(cur *ListNode) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated array") + case ']': + break Loop + } + } + text := p.consumeText() + text = string(text[1 : len(text)-1]) + if text == "*" { + text = ":" + } + + //union operator + strs := strings.Split(text, ",") + if len(strs) > 1 { + union := []*ListNode{} + for _, str := range strs { + parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " "))) + if err != nil { + return err + } + union = append(union, parser.Root) + } + cur.append(newUnion(union)) + return p.parseInsideAction(cur) + } + + // dict key + reg := regexp.MustCompile(`^'([^']*)'$`) + value := reg.FindStringSubmatch(text) + if value != nil { + parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1])) + if err != nil { + return err + } + for _, node := range parser.Root.Nodes { + cur.append(node) + } + return p.parseInsideAction(cur) + } + + //slice operator + reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`) + value = reg.FindStringSubmatch(text) + if value == nil { + return fmt.Errorf("invalid array index %s", text) + } + value = value[1:] + params := [3]ParamsEntry{} + for i := 0; i < 3; i++ { + if value[i] != "" { + if i > 0 { + value[i] = value[i][1:] + } + if i > 0 && value[i] == "" { + params[i].Known = false + } else { + var err error + params[i].Known = true + params[i].Value, err = strconv.Atoi(value[i]) + if err != nil { + return fmt.Errorf("array index %s is not a number", params[i].Value) + } + } + } else { + if i == 1 { + params[i].Known = true + params[i].Value = params[0].Value + 1 + } else { + params[i].Known = false + params[i].Value = 0 + } + } + } + cur.append(newArray(params)) + return p.parseInsideAction(cur) +} + +// parseFilter scans filter inside array selection +func (p *Parser) parseFilter(cur *ListNode) error { + p.pos += len("[?(") + p.consumeText() +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated filter") + case ')': + break Loop + } + } + if p.next() != ']' { + return fmt.Errorf("unclosed array expect ]") + } + reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`) + text := p.consumeText() + text = string(text[:len(text)-2]) + value := reg.FindStringSubmatch(text) + if value == nil { + parser, err := parseAction("text", text) + if err != nil { + return err + } + cur.append(newFilter(parser.Root, newList(), "exists")) + } else { + leftParser, err := parseAction("left", value[1]) + if err != nil { + return err + } + rightParser, err := parseAction("right", value[3]) + if err != nil { + return err + } + cur.append(newFilter(leftParser.Root, rightParser.Root, value[2])) + } + return p.parseInsideAction(cur) +} + +// parseQuote scans array index selection +func (p *Parser) parseQuote(cur *ListNode) error { +Loop: + for { + switch p.next() { + case eof, '\n': + return fmt.Errorf("unterminated quoted string") + case '"': + break Loop + } + } + value := p.consumeText() + cur.append(newText(value[1 : len(value)-1])) + return p.parseInsideAction(cur) +} + +// parseField scans a field until a terminator +func (p *Parser) parseField(cur *ListNode) error { + p.consumeText() + var r rune + for { + r = p.next() + if isTerminator(r) { + p.backup() + break + } + } + value := p.consumeText() + if value == "*" { + cur.append(newWildcard()) + } else { + cur.append(newField(value)) + } + return p.parseInsideAction(cur) +} + +// isTerminator reports whether the input is at valid termination character to appear after an identifier. +func isTerminator(r rune) bool { + if isSpace(r) || isEndOfLine(r) { + return true + } + switch r { + case eof, '.', ',', '[', ']', '$', '@', '{', '}': + return true + } + return false +} + +// isSpace reports whether r is a space character. +func isSpace(r rune) bool { + return r == ' ' || r == '\t' +} + +// isEndOfLine reports whether r is an end-of-line character. +func isEndOfLine(r rune) bool { + return r == '\r' || r == '\n' +} + +// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. +func isAlphaNumeric(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) +} diff --git a/pkg/util/jsonpath/parser_test.go b/pkg/util/jsonpath/parser_test.go new file mode 100644 index 00000000000..29083dd6c44 --- /dev/null +++ b/pkg/util/jsonpath/parser_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jsonpath + +import ( + "testing" +) + +type parserTest struct { + name string + text string + nodes []Node +} + +var parserTests = []parserTest{ + {"plain", `hello jsonpath`, []Node{newText("hello jsonpath")}}, + {"variable", `hello {.jsonpath}`, + []Node{newText("hello "), newList(), newField("jsonpath")}}, + {"arrayfiled", `hello {['jsonpath']}`, + []Node{newText("hello "), newList(), newField("jsonpath")}}, + {"quote", `{"{"}`, []Node{newList(), newText("{")}}, + {"array", `{[1:3]}`, []Node{newList(), + newArray([3]ParamsEntry{{1, true}, {3, true}, {0, false}})}}, + {"allarray", `{.book[*].author}`, + []Node{newList(), newField("book"), + newArray([3]ParamsEntry{{0, false}, {0, false}, {0, false}}), newField("author")}}, + {"wildcard", `{.bicycle.*}`, + []Node{newList(), newField("bicycle"), newWildcard()}}, + {"filter", `{[?(@.price<3)]}`, + []Node{newList(), newFilter(newList(), newList(), "<"), + newList(), newField("price"), newList(), newInt(3)}}, + {"recursive", `{..}`, []Node{newList(), newRecursive()}}, + {"recurField", `{..price}`, + []Node{newList(), newRecursive(), newField("price")}}, + {"arraydict", `{['book.price']}`, []Node{newList(), + newField("book"), newField("price"), + }}, + {"union", `{['bicycle.price', 3, 'book.price']}`, []Node{newList(), newUnion([]*ListNode{}), + newList(), newField("bicycle"), newField("price"), + newList(), newArray([3]ParamsEntry{{3, true}, {4, true}, {0, false}}), + newList(), newField("book"), newField("price"), + }}, + {"range", `{range .items}{.name},{end}`, []Node{ + newList(), newIdentifier("range"), newField("items"), + newList(), newField("name"), newText(","), + newList(), newIdentifier("end"), + }}, +} + +func collectNode(nodes []Node, cur Node) []Node { + nodes = append(nodes, cur) + switch cur.Type() { + case NodeList: + for _, node := range cur.(*ListNode).Nodes { + nodes = collectNode(nodes, node) + } + case NodeFilter: + nodes = collectNode(nodes, cur.(*FilterNode).Left) + nodes = collectNode(nodes, cur.(*FilterNode).Right) + case NodeUnion: + for _, node := range cur.(*UnionNode).Nodes { + nodes = collectNode(nodes, node) + } + } + return nodes +} + +func TestParser(t *testing.T) { + for _, test := range parserTests { + parser, err := Parse(test.name, test.text) + if err != nil { + t.Errorf("parse %s error %v", test.name, err) + } + result := collectNode([]Node{}, parser.Root)[1:] + if len(result) != len(test.nodes) { + t.Errorf("in %s, expect to get %d nodes, got %d nodes", test.name, len(test.nodes), len(result)) + t.Error(result) + } + for i, expect := range test.nodes { + if result[i].String() != expect.String() { + t.Errorf("in %s, %dth node, expect %v, got %v", test.name, i, expect, result[i]) + } + } + } +} + +type failParserTest struct { + name string + text string + err string +} + +func TestFailParser(t *testing.T) { + failParserTests := []failParserTest{ + {"unclosed action", "{.hello", "unclosed action"}, + {"unrecognized charactor", "{*}", "unrecognized charactor in action: U+002A '*'"}, + {"invalid number", "{+12.3.0}", "cannot parse number +12.3.0"}, + {"unterminated array", "{[1}", "unterminated array"}, + {"invalid index", "{[::-1]}", "invalid array index ::-1"}, + {"unterminated filter", "{[?(.price]}", "unterminated filter"}, + } + for _, test := range failParserTests { + _, err := Parse(test.name, test.text) + var out string + if err == nil { + out = "nil" + } else { + out = err.Error() + } + if out != test.err { + t.Errorf("in %s, expect to get error %v, got %v", test.name, test.err, out) + } + } +} diff --git a/pkg/util/mount/nsenter_mount.go b/pkg/util/mount/nsenter_mount.go index 0d5dae381fe..a8a2fff961c 100644 --- a/pkg/util/mount/nsenter_mount.go +++ b/pkg/util/mount/nsenter_mount.go @@ -19,6 +19,7 @@ limitations under the License. package mount import ( + "os" "path/filepath" "strings" @@ -39,14 +40,48 @@ import ( // performed in the host's mount namespace do not propagate out to the // bind-mount in this docker version. // 2. The host's root filesystem must be available at /rootfs -// 3. The nsenter binary must be at /nsenter in the container's filesystem. +// 3. The nsenter binary must be on the Kubelet process' PATH in the container's +// filesystem. // 4. The Kubelet process must have CAP_SYS_ADMIN (required by nsenter); at // the present, this effectively means that the kubelet is running in a // privileged container. +// 5. The volume path used by the Kubelet must be the same inside and outside +// the container and be writable by the container (to initialize volume) +// contents. TODO: remove this requirement. +// 6. The host image must have mount, findmnt, and umount binaries in /bin, +// /usr/sbin, or /usr/bin // // For more information about mount propagation modes, see: // https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt -type NsenterMounter struct{} +type NsenterMounter struct { + // a map of commands to their paths on the host filesystem + paths map[string]string +} + +func NewNsenterMounter() *NsenterMounter { + m := &NsenterMounter{ + paths: map[string]string{ + "mount": "", + "findmnt": "", + "umount": "", + }, + } + // search for the mount command in other locations besides /usr/bin + for binary := range m.paths { + // default to root + m.paths[binary] = filepath.Join("/", binary) + for _, path := range []string{"/bin", "/usr/sbin", "/usr/bin"} { + binPath := filepath.Join(hostRootFsPath, path, binary) + if _, err := os.Stat(binPath); err != nil { + continue + } + m.paths[binary] = binPath + break + } + // TODO: error, so that the kubelet can stop if the mounts don't exist + } + return m +} // NsenterMounter implements mount.Interface var _ = Interface(&NsenterMounter{}) @@ -54,30 +89,30 @@ var _ = Interface(&NsenterMounter{}) const ( hostRootFsPath = "/rootfs" hostProcMountsPath = "/rootfs/proc/mounts" - nsenterPath = "/nsenter" + nsenterPath = "nsenter" ) // Mount runs mount(8) in the host's root mount namespace. Aside from this // aspect, Mount has the same semantics as the mounter returned by mount.New() -func (*NsenterMounter) Mount(source string, target string, fstype string, options []string) error { +func (n *NsenterMounter) Mount(source string, target string, fstype string, options []string) error { bind, bindRemountOpts := isBind(options) if bind { - err := doNsenterMount(source, target, fstype, []string{"bind"}) + err := n.doNsenterMount(source, target, fstype, []string{"bind"}) if err != nil { return err } - return doNsenterMount(source, target, fstype, bindRemountOpts) + return n.doNsenterMount(source, target, fstype, bindRemountOpts) } - return doNsenterMount(source, target, fstype, options) + return n.doNsenterMount(source, target, fstype, options) } // doNsenterMount nsenters the host's mount namespace and performs the // requested mount. -func doNsenterMount(source, target, fstype string, options []string) error { +func (n *NsenterMounter) doNsenterMount(source, target, fstype string, options []string) error { glog.V(5).Infof("nsenter Mounting %s %s %s %v", source, target, fstype, options) - args := makeNsenterArgs(source, target, fstype, options) + args := n.makeNsenterArgs(source, target, fstype, options) glog.V(5).Infof("Mount command: %v %v", nsenterPath, args) exec := exec.New() @@ -91,10 +126,11 @@ func doNsenterMount(source, target, fstype string, options []string) error { // makeNsenterArgs makes a list of argument to nsenter in order to do the // requested mount. -func makeNsenterArgs(source, target, fstype string, options []string) []string { +func (n *NsenterMounter) makeNsenterArgs(source, target, fstype string, options []string) []string { nsenterArgs := []string{ "--mount=/rootfs/proc/1/ns/mnt", - "/usr/bin/mount", + "--", + n.absHostPath("mount"), } args := makeMountArgs(source, target, fstype, options) @@ -103,10 +139,11 @@ func makeNsenterArgs(source, target, fstype string, options []string) []string { } // Unmount runs umount(8) in the host's mount namespace. -func (*NsenterMounter) Unmount(target string) error { +func (n *NsenterMounter) Unmount(target string) error { args := []string{ "--mount=/rootfs/proc/1/ns/mnt", - "/usr/bin/umount", + "--", + n.absHostPath("umount"), target, } @@ -127,13 +164,13 @@ func (*NsenterMounter) List() ([]MountPoint, error) { // IsMountPoint determines whether a path is a mountpoint by calling findmnt // in the host's root mount namespace. -func (*NsenterMounter) IsMountPoint(file string) (bool, error) { +func (n *NsenterMounter) IsMountPoint(file string) (bool, error) { file, err := filepath.Abs(file) if err != nil { return false, err } - args := []string{"--mount=/rootfs/proc/1/ns/mnt", "/usr/bin/findmnt", "-o", "target", "--noheadings", "--target", file} + args := []string{"--mount=/rootfs/proc/1/ns/mnt", "--", n.absHostPath("findmnt"), "-o", "target", "--noheadings", "--target", file} glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args) exec := exec.New() @@ -151,3 +188,11 @@ func (*NsenterMounter) IsMountPoint(file string) (bool, error) { return false, nil } + +func (n *NsenterMounter) absHostPath(command string) string { + path, ok := n.paths[command] + if !ok { + return command + } + return path +} diff --git a/pkg/util/mount/nsenter_mount_unsupported.go b/pkg/util/mount/nsenter_mount_unsupported.go index 3fb5d3113f5..e47854735af 100644 --- a/pkg/util/mount/nsenter_mount_unsupported.go +++ b/pkg/util/mount/nsenter_mount_unsupported.go @@ -20,6 +20,10 @@ package mount type NsenterMounter struct{} +func NewNsenterMounter() *NsenterMounter { + return &NsenterMounter{} +} + var _ = Interface(&NsenterMounter{}) func (*NsenterMounter) Mount(source string, target string, fstype string, options []string) error { diff --git a/pkg/version/.gitattributes b/pkg/version/.gitattributes new file mode 100644 index 00000000000..7e349eff60b --- /dev/null +++ b/pkg/version/.gitattributes @@ -0,0 +1 @@ +base.go export-subst diff --git a/pkg/version/base.go b/pkg/version/base.go index 8452a9200f1..b055bed4dee 100644 --- a/pkg/version/base.go +++ b/pkg/version/base.go @@ -23,21 +23,35 @@ package version // version for ad-hoc builds (e.g. `go build`) that cannot get the version // information from git. // -// The "-dev" suffix in the version info indicates that fact, and it means the -// current build is from a version greater that version. For example, v0.7-dev -// means version > 0.7 and < 0.8. (There's exceptions to this rule, see -// docs/releasing.md for more details.) +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. // -// When releasing a new Kubernetes version, this file should be updated to -// reflect the new version, and then a git annotated tag (using format vX.Y -// where X == Major version and Y == Minor version) should be created to point -// to the commit that updates pkg/version/base.go - +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// pkg/version/base.go var ( - // TODO: Deprecate gitMajor and gitMinor, use only gitVersion instead. - gitMajor string = "1" // major version, always numeric - gitMinor string = "0.0+" // minor version, numeric possibly followed by "+" - gitVersion string = "v1.0.0-dev" // version from git, output of $(git describe) - gitCommit string = "" // sha1 from git, output of $(git rev-parse HEAD) + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string = "" // major version, always numeric + gitMinor string = "" // minor version, numeric possibly followed by "+" + + // semantic version, dervied by build scripts (see + // https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/design/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + gitVersion string = "v0.0.0-master+$Format:%h$" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" ) diff --git a/pkg/volume/aws_ebs/aws_ebs.go b/pkg/volume/aws_ebs/aws_ebs.go index b8b60a7c33c..b2342357a04 100644 --- a/pkg/volume/aws_ebs/aws_ebs.go +++ b/pkg/volume/aws_ebs/aws_ebs.go @@ -94,18 +94,19 @@ func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, partition = strconv.Itoa(ebs.Partition) } - return &awsElasticBlockStore{ - podUID: podUID, - volName: spec.Name, - volumeID: volumeID, + return &awsElasticBlockStoreBuilder{ + awsElasticBlockStore: &awsElasticBlockStore{ + podUID: podUID, + volName: spec.Name, + volumeID: volumeID, + manager: manager, + mounter: mounter, + plugin: plugin, + }, fsType: fsType, partition: partition, readOnly: readOnly, - manager: manager, - mounter: mounter, - diskMounter: &awsSafeFormatAndMount{mounter, exec.New()}, - plugin: plugin, - }, nil + diskMounter: &awsSafeFormatAndMount{mounter, exec.New()}}, nil } func (plugin *awsElasticBlockStorePlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { @@ -114,22 +115,21 @@ func (plugin *awsElasticBlockStorePlugin) NewCleaner(volName string, podUID type } func (plugin *awsElasticBlockStorePlugin) newCleanerInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Cleaner, error) { - return &awsElasticBlockStore{ - podUID: podUID, - volName: volName, - manager: manager, - mounter: mounter, - diskMounter: &awsSafeFormatAndMount{mounter, exec.New()}, - plugin: plugin, - }, nil + return &awsElasticBlockStoreCleaner{&awsElasticBlockStore{ + podUID: podUID, + volName: volName, + manager: manager, + mounter: mounter, + plugin: plugin, + }}, nil } // Abstract interface to PD operations. type ebsManager interface { // Attaches the disk to the kubelet's host machine. - AttachAndMountDisk(ebs *awsElasticBlockStore, globalPDPath string) error + AttachAndMountDisk(b *awsElasticBlockStoreBuilder, globalPDPath string) error // Detaches the disk from the kubelet's host machine. - DetachDisk(ebs *awsElasticBlockStore) error + DetachDisk(c *awsElasticBlockStoreCleaner) error } // awsElasticBlockStore volumes are disk resources provided by Google Compute Engine @@ -139,23 +139,15 @@ type awsElasticBlockStore struct { podUID types.UID // Unique id of the PD, used to find the disk resource in the provider. volumeID string - // Filesystem type, optional. - fsType string - // Specifies the partition to mount - partition string - // Specifies whether the disk will be attached as read-only. - readOnly bool // Utility interface that provides API calls to the provider to attach/detach disks. manager ebsManager // Mounter interface that provides system calls to mount the global path to the pod local path. mounter mount.Interface - // diskMounter provides the interface that is used to mount the actual block device. - diskMounter mount.Interface - plugin *awsElasticBlockStorePlugin + plugin *awsElasticBlockStorePlugin } func detachDiskLogError(ebs *awsElasticBlockStore) { - err := ebs.manager.DetachDisk(ebs) + err := ebs.manager.DetachDisk(&awsElasticBlockStoreCleaner{ebs}) if err != nil { glog.Warningf("Failed to detach disk: %v (%v)", ebs, err) } @@ -175,15 +167,29 @@ func (ebs *awsElasticBlockStore) getVolumeProvider() (aws_cloud.Volumes, error) return volumes, nil } +type awsElasticBlockStoreBuilder struct { + *awsElasticBlockStore + // Filesystem type, optional. + fsType string + // Specifies the partition to mount + partition string + // Specifies whether the disk will be attached as read-only. + readOnly bool + // diskMounter provides the interface that is used to mount the actual block device. + diskMounter mount.Interface +} + +var _ volume.Builder = &awsElasticBlockStoreBuilder{} + // SetUp attaches the disk and bind mounts to the volume path. -func (ebs *awsElasticBlockStore) SetUp() error { - return ebs.SetUpAt(ebs.GetPath()) +func (b *awsElasticBlockStoreBuilder) SetUp() error { + return b.SetUpAt(b.GetPath()) } // SetUpAt attaches the disk and bind mounts to the volume path. -func (ebs *awsElasticBlockStore) SetUpAt(dir string) error { +func (b *awsElasticBlockStoreBuilder) SetUpAt(dir string) error { // TODO: handle failed mounts here. - mountpoint, err := ebs.mounter.IsMountPoint(dir) + mountpoint, err := b.mounter.IsMountPoint(dir) glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, mountpoint, err) if err != nil && !os.IsNotExist(err) { return err @@ -192,35 +198,35 @@ func (ebs *awsElasticBlockStore) SetUpAt(dir string) error { return nil } - globalPDPath := makeGlobalPDPath(ebs.plugin.host, ebs.volumeID) - if err := ebs.manager.AttachAndMountDisk(ebs, globalPDPath); err != nil { + globalPDPath := makeGlobalPDPath(b.plugin.host, b.volumeID) + if err := b.manager.AttachAndMountDisk(b, globalPDPath); err != nil { return err } if err := os.MkdirAll(dir, 0750); err != nil { // TODO: we should really eject the attach/detach out into its own control loop. - detachDiskLogError(ebs) + detachDiskLogError(b.awsElasticBlockStore) return err } // Perform a bind mount to the full path to allow duplicate mounts of the same PD. options := []string{"bind"} - if ebs.readOnly { + if b.readOnly { options = append(options, "ro") } - err = ebs.mounter.Mount(globalPDPath, dir, "", options) + err = b.mounter.Mount(globalPDPath, dir, "", options) if err != nil { - mountpoint, mntErr := ebs.mounter.IsMountPoint(dir) + mountpoint, mntErr := b.mounter.IsMountPoint(dir) if mntErr != nil { glog.Errorf("isMountpoint check failed: %v", mntErr) return err } if mountpoint { - if mntErr = ebs.mounter.Unmount(dir); mntErr != nil { + if mntErr = b.mounter.Unmount(dir); mntErr != nil { glog.Errorf("Failed to unmount: %v", mntErr) return err } - mountpoint, mntErr := ebs.mounter.IsMountPoint(dir) + mountpoint, mntErr := b.mounter.IsMountPoint(dir) if mntErr != nil { glog.Errorf("isMountpoint check failed: %v", mntErr) return err @@ -233,15 +239,15 @@ func (ebs *awsElasticBlockStore) SetUpAt(dir string) error { } os.Remove(dir) // TODO: we should really eject the attach/detach out into its own control loop. - detachDiskLogError(ebs) + detachDiskLogError(b.awsElasticBlockStore) return err } return nil } -func (pd *awsElasticBlockStore) IsReadOnly() bool { - return pd.readOnly +func (b *awsElasticBlockStoreBuilder) IsReadOnly() bool { + return b.readOnly } func makeGlobalPDPath(host volume.VolumeHost, volumeID string) string { @@ -274,16 +280,22 @@ func (ebs *awsElasticBlockStore) GetPath() string { return ebs.plugin.host.GetPodVolumeDir(ebs.podUID, util.EscapeQualifiedNameForDisk(name), ebs.volName) } +type awsElasticBlockStoreCleaner struct { + *awsElasticBlockStore +} + +var _ volume.Cleaner = &awsElasticBlockStoreCleaner{} + // Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. -func (ebs *awsElasticBlockStore) TearDown() error { - return ebs.TearDownAt(ebs.GetPath()) +func (c *awsElasticBlockStoreCleaner) TearDown() error { + return c.TearDownAt(c.GetPath()) } // Unmounts the bind mount, and detaches the disk only if the PD // resource was the last reference to that disk on the kubelet. -func (ebs *awsElasticBlockStore) TearDownAt(dir string) error { - mountpoint, err := ebs.mounter.IsMountPoint(dir) +func (c *awsElasticBlockStoreCleaner) TearDownAt(dir string) error { + mountpoint, err := c.mounter.IsMountPoint(dir) if err != nil { glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err) return err @@ -293,7 +305,7 @@ func (ebs *awsElasticBlockStore) TearDownAt(dir string) error { return os.Remove(dir) } - refs, err := mount.GetMountRefs(ebs.mounter, dir) + refs, err := mount.GetMountRefs(c.mounter, dir) if err != nil { glog.V(2).Info("Error getting mountrefs for ", dir, ": ", err) return err @@ -302,27 +314,27 @@ func (ebs *awsElasticBlockStore) TearDownAt(dir string) error { glog.Warning("Did not find pod-mount for ", dir, " during tear-down") } // Unmount the bind-mount inside this pod - if err := ebs.mounter.Unmount(dir); err != nil { + if err := c.mounter.Unmount(dir); err != nil { glog.V(2).Info("Error unmounting dir ", dir, ": ", err) return err } // If len(refs) is 1, then all bind mounts have been removed, and the // remaining reference is the global mount. It is safe to detach. if len(refs) == 1 { - // ebs.volumeID is not initially set for volume-cleaners, so set it here. - ebs.volumeID, err = getVolumeIDFromGlobalMount(ebs.plugin.host, refs[0]) + // c.volumeID is not initially set for volume-cleaners, so set it here. + c.volumeID, err = getVolumeIDFromGlobalMount(c.plugin.host, refs[0]) if err != nil { glog.V(2).Info("Could not determine volumeID from mountpoint ", refs[0], ": ", err) return err } - if err := ebs.manager.DetachDisk(ebs); err != nil { - glog.V(2).Info("Error detaching disk ", ebs.volumeID, ": ", err) + if err := c.manager.DetachDisk(&awsElasticBlockStoreCleaner{c.awsElasticBlockStore}); err != nil { + glog.V(2).Info("Error detaching disk ", c.volumeID, ": ", err) return err } } else { glog.V(2).Infof("Found multiple refs; won't detach EBS volume: %v", refs) } - mountpoint, mntErr := ebs.mounter.IsMountPoint(dir) + mountpoint, mntErr := c.mounter.IsMountPoint(dir) if mntErr != nil { glog.Errorf("isMountpoint check failed: %v", mntErr) return err diff --git a/pkg/volume/aws_ebs/aws_ebs_test.go b/pkg/volume/aws_ebs/aws_ebs_test.go index 97bbc9b6b0b..ffe09b03d9d 100644 --- a/pkg/volume/aws_ebs/aws_ebs_test.go +++ b/pkg/volume/aws_ebs/aws_ebs_test.go @@ -76,8 +76,8 @@ type fakePDManager struct{} // TODO(jonesdl) To fully test this, we could create a loopback device // and mount that instead. -func (fake *fakePDManager) AttachAndMountDisk(pd *awsElasticBlockStore, globalPDPath string) error { - globalPath := makeGlobalPDPath(pd.plugin.host, pd.volumeID) +func (fake *fakePDManager) AttachAndMountDisk(b *awsElasticBlockStoreBuilder, globalPDPath string) error { + globalPath := makeGlobalPDPath(b.plugin.host, b.volumeID) err := os.MkdirAll(globalPath, 0750) if err != nil { return err @@ -85,8 +85,8 @@ func (fake *fakePDManager) AttachAndMountDisk(pd *awsElasticBlockStore, globalPD return nil } -func (fake *fakePDManager) DetachDisk(pd *awsElasticBlockStore) error { - globalPath := makeGlobalPDPath(pd.plugin.host, pd.volumeID) +func (fake *fakePDManager) DetachDisk(c *awsElasticBlockStoreCleaner) error { + globalPath := makeGlobalPDPath(c.plugin.host, c.volumeID) err := os.RemoveAll(globalPath) if err != nil { return err @@ -206,3 +206,32 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) { t.Errorf("Expected true for builder.IsReadOnly") } } + +func TestBuilderAndCleanerTypeAssert(t *testing.T) { + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + + plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs") + if err != nil { + t.Errorf("Can't find the plugin by name") + } + spec := &api.Volume{ + Name: "vol1", + VolumeSource: api.VolumeSource{ + AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ + VolumeID: "pd", + FSType: "ext4", + }, + }, + } + + builder, err := plug.(*awsElasticBlockStorePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) + if _, ok := builder.(volume.Cleaner); ok { + t.Errorf("Volume Builder can be type-assert to Cleaner") + } + + cleaner, err := plug.(*awsElasticBlockStorePlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) + if _, ok := cleaner.(volume.Builder); ok { + t.Errorf("Volume Cleaner can be type-assert to Builder") + } +} diff --git a/pkg/volume/aws_ebs/aws_util.go b/pkg/volume/aws_ebs/aws_util.go index a23c1c35835..5d1b0e6cda0 100644 --- a/pkg/volume/aws_ebs/aws_util.go +++ b/pkg/volume/aws_ebs/aws_util.go @@ -31,17 +31,17 @@ type AWSDiskUtil struct{} // Attaches a disk specified by a volume.AWSElasticBlockStore to the current kubelet. // Mounts the disk to it's global path. -func (util *AWSDiskUtil) AttachAndMountDisk(pd *awsElasticBlockStore, globalPDPath string) error { - volumes, err := pd.getVolumeProvider() +func (util *AWSDiskUtil) AttachAndMountDisk(b *awsElasticBlockStoreBuilder, globalPDPath string) error { + volumes, err := b.getVolumeProvider() if err != nil { return err } - devicePath, err := volumes.AttachDisk("", pd.volumeID, pd.readOnly) + devicePath, err := volumes.AttachDisk("", b.volumeID, b.readOnly) if err != nil { return err } - if pd.partition != "" { - devicePath = devicePath + pd.partition + if b.partition != "" { + devicePath = devicePath + b.partition } //TODO(jonesdl) There should probably be better method than busy-waiting here. numTries := 0 @@ -61,7 +61,7 @@ func (util *AWSDiskUtil) AttachAndMountDisk(pd *awsElasticBlockStore, globalPDPa } // Only mount the PD globally once. - mountpoint, err := pd.mounter.IsMountPoint(globalPDPath) + mountpoint, err := b.mounter.IsMountPoint(globalPDPath) if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(globalPDPath, 0750); err != nil { @@ -73,11 +73,11 @@ func (util *AWSDiskUtil) AttachAndMountDisk(pd *awsElasticBlockStore, globalPDPa } } options := []string{} - if pd.readOnly { + if b.readOnly { options = append(options, "ro") } if !mountpoint { - err = pd.diskMounter.Mount(devicePath, globalPDPath, pd.fsType, options) + err = b.diskMounter.Mount(devicePath, globalPDPath, b.fsType, options) if err != nil { os.Remove(globalPDPath) return err @@ -87,10 +87,10 @@ func (util *AWSDiskUtil) AttachAndMountDisk(pd *awsElasticBlockStore, globalPDPa } // Unmounts the device and detaches the disk from the kubelet's host machine. -func (util *AWSDiskUtil) DetachDisk(pd *awsElasticBlockStore) error { +func (util *AWSDiskUtil) DetachDisk(c *awsElasticBlockStoreCleaner) error { // Unmount the global PD mount, which should be the only one. - globalPDPath := makeGlobalPDPath(pd.plugin.host, pd.volumeID) - if err := pd.mounter.Unmount(globalPDPath); err != nil { + globalPDPath := makeGlobalPDPath(c.plugin.host, c.volumeID) + if err := c.mounter.Unmount(globalPDPath); err != nil { glog.V(2).Info("Error unmount dir ", globalPDPath, ": ", err) return err } @@ -99,13 +99,13 @@ func (util *AWSDiskUtil) DetachDisk(pd *awsElasticBlockStore) error { return err } // Detach the disk - volumes, err := pd.getVolumeProvider() + volumes, err := c.getVolumeProvider() if err != nil { - glog.V(2).Info("Error getting volume provider for volumeID ", pd.volumeID, ": ", err) + glog.V(2).Info("Error getting volume provider for volumeID ", c.volumeID, ": ", err) return err } - if err := volumes.DetachDisk("", pd.volumeID); err != nil { - glog.V(2).Info("Error detaching disk ", pd.volumeID, ": ", err) + if err := volumes.DetachDisk("", c.volumeID); err != nil { + glog.V(2).Info("Error detaching disk ", c.volumeID, ": ", err) return err } return nil diff --git a/pkg/volume/persistent_claim/persistent_claim_test.go b/pkg/volume/persistent_claim/persistent_claim_test.go index b18db424621..a325f9e8b10 100644 --- a/pkg/volume/persistent_claim/persistent_claim_test.go +++ b/pkg/volume/persistent_claim/persistent_claim_test.go @@ -23,7 +23,6 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" @@ -238,7 +237,7 @@ func TestNewBuilder(t *testing.T) { o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(item.pv) o.Add(item.claim) - client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)} plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(testProbeVolumePlugins(), newTestHost(t, client)) @@ -295,7 +294,7 @@ func TestNewBuilderClaimNotBound(t *testing.T) { o := testclient.NewObjects(api.Scheme, api.Scheme) o.Add(pv) o.Add(claim) - client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)} plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(testProbeVolumePlugins(), newTestHost(t, client)) diff --git a/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go b/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go index 858d26ad667..1a8548ef347 100644 --- a/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go +++ b/pkg/volumeclaimbinder/persistent_volume_claim_binder_test.go @@ -23,7 +23,6 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" @@ -32,7 +31,7 @@ import ( func TestRunStop(t *testing.T) { o := testclient.NewObjects(api.Scheme, api.Scheme) - client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)} binder := NewPersistentVolumeClaimBinder(client, 1*time.Second) if len(binder.stopChannels) != 0 { @@ -119,7 +118,7 @@ func TestExampleObjects(t *testing.T) { t.Fatal(err) } - client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)} if reflect.TypeOf(scenario.expected) == reflect.TypeOf(&api.PersistentVolumeClaim{}) { pvc, err := client.PersistentVolumeClaims("ns").Get("doesntmatter") @@ -179,7 +178,7 @@ func TestBindingWithExamples(t *testing.T) { t.Fatal(err) } - client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)} pv, err := client.PersistentVolumes().Get("any") pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimRecycle @@ -282,7 +281,7 @@ func TestMissingFromIndex(t *testing.T) { t.Fatal(err) } - client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, api.RESTMapper)} pv, err := client.PersistentVolumes().Get("any") if err != nil { diff --git a/plugin/pkg/admission/namespace/autoprovision/admission.go b/plugin/pkg/admission/namespace/autoprovision/admission.go index 334ffa21008..4ce1e04a3ce 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission.go @@ -22,7 +22,6 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/admission" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" @@ -48,11 +47,11 @@ type provision struct { } func (p *provision) Admit(a admission.Attributes) (err error) { - defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource()) + defaultVersion, kind, err := api.RESTMapper.VersionAndKindForResource(a.GetResource()) if err != nil { return admission.NewForbidden(a, err) } - mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion) + mapping, err := api.RESTMapper.RESTMapping(kind, defaultVersion) if err != nil { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/exists/admission.go b/plugin/pkg/admission/namespace/exists/admission.go index 12dfd48177f..eab01285671 100644 --- a/plugin/pkg/admission/namespace/exists/admission.go +++ b/plugin/pkg/admission/namespace/exists/admission.go @@ -23,7 +23,6 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/admission" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" @@ -49,11 +48,11 @@ type exists struct { } func (e *exists) Admit(a admission.Attributes) (err error) { - defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource()) + defaultVersion, kind, err := api.RESTMapper.VersionAndKindForResource(a.GetResource()) if err != nil { return admission.NewForbidden(a, err) } - mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion) + mapping, err := api.RESTMapper.RESTMapping(kind, defaultVersion) if err != nil { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/admission/namespace/lifecycle/admission.go b/plugin/pkg/admission/namespace/lifecycle/admission.go index 16b70bebf09..9c6049a58e0 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission.go @@ -23,7 +23,6 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/admission" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache" @@ -59,11 +58,11 @@ func (l *lifecycle) Admit(a admission.Attributes) (err error) { return nil } - defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource()) + defaultVersion, kind, err := api.RESTMapper.VersionAndKindForResource(a.GetResource()) if err != nil { return admission.NewForbidden(a, err) } - mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion) + mapping, err := api.RESTMapper.RESTMapping(kind, defaultVersion) if err != nil { return admission.NewForbidden(a, err) } diff --git a/plugin/pkg/scheduler/algorithm/listers.go b/plugin/pkg/scheduler/algorithm/listers.go index 50ecc996b08..66edac545b6 100644 --- a/plugin/pkg/scheduler/algorithm/listers.go +++ b/plugin/pkg/scheduler/algorithm/listers.go @@ -66,7 +66,7 @@ type ServiceLister interface { // FakeServiceLister implements ServiceLister on []api.Service for test purposes. type FakeServiceLister []api.Service -// FakeServiceLister returns api.ServiceList, the list of all services. +// List returns api.ServiceList, the list of all services. func (f FakeServiceLister) List() (api.ServiceList, error) { return api.ServiceList{Items: f}, nil } @@ -91,3 +91,39 @@ func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []api.Service, return } + +// ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler. +type ControllerLister interface { + // Lists all the replication controllers + List() ([]api.ReplicationController, error) + // Gets the services for the given pod + GetPodControllers(*api.Pod) ([]api.ReplicationController, error) +} + +// FakeControllerLister implements ControllerLister on []api.ReplicationController for test purposes. +type FakeControllerLister []api.ReplicationController + +// List returns []api.ReplicationController, the list of all ReplicationControllers. +func (f FakeControllerLister) List() ([]api.ReplicationController, error) { + return f, nil +} + +// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod +func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []api.ReplicationController, err error) { + var selector labels.Selector + + for _, controller := range f { + if controller.Namespace != pod.Namespace { + continue + } + selector = labels.Set(controller.Spec.Selector).AsSelector() + if selector.Matches(labels.Set(pod.Labels)) { + controllers = append(controllers, controller) + } + } + if len(controllers) == 0 { + err = fmt.Errorf("Could not find Replication Controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return +} diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go index 08fcfaf12c2..253095a4b09 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go @@ -132,7 +132,7 @@ func TestZeroLimit(t *testing.T) { // This should match the configuration in defaultPriorities() in // plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want // to test what's actually in production. - []algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewServiceSpreadPriority(algorithm.FakeServiceLister([]api.Service{})), Weight: 1}}, + []algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{})), Weight: 1}}, algorithm.FakeMinionLister(api.NodeList{Items: test.nodes})) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/plugin/pkg/scheduler/algorithm/priorities/service_spreading.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go similarity index 69% rename from plugin/pkg/scheduler/algorithm/priorities/service_spreading.go rename to plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go index 663f638ea19..0a37739f4c0 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/service_spreading.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -23,36 +23,49 @@ import ( "github.com/golang/glog" ) -type ServiceSpread struct { - serviceLister algorithm.ServiceLister +type SelectorSpread struct { + serviceLister algorithm.ServiceLister + controllerLister algorithm.ControllerLister } -func NewServiceSpreadPriority(serviceLister algorithm.ServiceLister) algorithm.PriorityFunction { - serviceSpread := &ServiceSpread{ - serviceLister: serviceLister, +func NewSelectorSpreadPriority(serviceLister algorithm.ServiceLister, controllerLister algorithm.ControllerLister) algorithm.PriorityFunction { + selectorSpread := &SelectorSpread{ + serviceLister: serviceLister, + controllerLister: controllerLister, } - return serviceSpread.CalculateSpreadPriority + return selectorSpread.CalculateSpreadPriority } -// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service -// on the same machine. -func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { +// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service or replication controller. It counts number of pods that run under +// Services or RCs as the pod being scheduled and tries to minimize the number of conflicts. I.e. pushes scheduler towards a Node where there's a smallest number of +// pods which match the same selectors of Services and RCs as current pod. +func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { var maxCount int - var nsServicePods []*api.Pod + var nsPods []*api.Pod + selectors := make([]labels.Selector, 0) services, err := s.serviceLister.GetPodServices(pod) if err == nil { - // just use the first service and get the other pods within the service - // TODO: a separate predicate can be created that tries to handle all services for the pod - selector := labels.SelectorFromSet(services[0].Spec.Selector) - pods, err := podLister.List(selector) + for _, service := range services { + selectors = append(selectors, labels.SelectorFromSet(service.Spec.Selector)) + } + } + controllers, err := s.controllerLister.GetPodControllers(pod) + if err == nil { + for _, controller := range controllers { + selectors = append(selectors, labels.SelectorFromSet(controller.Spec.Selector)) + } + } + + if len(selectors) > 0 { + pods, err := podLister.List(labels.Everything()) if err != nil { return nil, err } // consider only the pods that belong to the same namespace for _, nsPod := range pods { if nsPod.Namespace == pod.Namespace { - nsServicePods = append(nsServicePods, nsPod) + nsPods = append(nsPods, nsPod) } } } @@ -63,12 +76,21 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorith } counts := map[string]int{} - if len(nsServicePods) > 0 { - for _, pod := range nsServicePods { - counts[pod.Spec.NodeName]++ - // Compute the maximum number of pods hosted on any minion - if counts[pod.Spec.NodeName] > maxCount { - maxCount = counts[pod.Spec.NodeName] + if len(nsPods) > 0 { + for _, pod := range nsPods { + matches := false + for _, selector := range selectors { + if selector.Matches(labels.Set(pod.ObjectMeta.Labels)) { + matches = true + break + } + } + if matches { + counts[pod.Spec.NodeName]++ + // Compute the maximum number of pods hosted on any minion + if counts[pod.Spec.NodeName] > maxCount { + maxCount = counts[pod.Spec.NodeName] + } } } } @@ -84,7 +106,7 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorith } result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)}) glog.V(10).Infof( - "%v -> %v: ServiceSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore), + "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore), ) } return result, nil diff --git a/plugin/pkg/scheduler/algorithm/priorities/service_spreading_test.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go similarity index 79% rename from plugin/pkg/scheduler/algorithm/priorities/service_spreading_test.go rename to plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index 1e5a2660b17..369c1fbc6ef 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/service_spreading_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -25,7 +25,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm" ) -func TestServiceSpreadPriority(t *testing.T) { +func TestSelectorSpreadPriority(t *testing.T) { labels1 := map[string]string{ "foo": "bar", "baz": "blah", @@ -44,6 +44,7 @@ func TestServiceSpreadPriority(t *testing.T) { pod *api.Pod pods []*api.Pod nodes []string + rcs []api.ReplicationController services []api.Service expectedList algorithm.HostPriorityList test string @@ -158,11 +159,65 @@ func TestServiceSpreadPriority(t *testing.T) { expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}}, test: "service with partial pod label matches", }, + { + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + // "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to + // do spreading between all pods. The result should be exactly as above. + expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}}, + test: "service with partial pod label matches with service and replication controller", + }, + { + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}}}, + pods: []*api.Pod{ + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, + rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + // Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above. + expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}}, + test: "disjoined service and replication controller should be treated equally", + }, + { + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + // Both Nodes have one pod from the given RC, hence both get 0 score. + expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}}, + test: "Replication controller with partial pod label matches", + }, + { + pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*api.Pod{ + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + }, + nodes: []string{"machine1", "machine2"}, + rcs: []api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, + expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 5}}, + test: "Replication controller with partial pod label matches", + }, } for _, test := range tests { - serviceSpread := ServiceSpread{serviceLister: algorithm.FakeServiceLister(test.services)} - list, err := serviceSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes))) + selectorSpread := SelectorSpread{serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs)} + list, err := selectorSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes))) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 1546412b78a..8d9cc9be9ce 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -67,10 +67,10 @@ func defaultPriorities() util.StringSet { factory.RegisterPriorityFunction("BalancedResourceAllocation", priorities.BalancedResourceAllocation, 1), // spreads pods by minimizing the number of pods (belonging to the same service) on the same minion. factory.RegisterPriorityConfigFactory( - "ServiceSpreadingPriority", + "SelectorSpreadPriority", factory.PriorityConfigFactory{ Function: func(args factory.PluginFactoryArgs) algorithm.PriorityFunction { - return priorities.NewServiceSpreadPriority(args.ServiceLister) + return priorities.NewSelectorSpreadPriority(args.ServiceLister, args.ControllerLister) }, Weight: 1, }, diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index b5faa39c4d5..a1485d1c7e8 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -58,6 +58,8 @@ type ConfigFactory struct { NodeLister *cache.StoreToNodeLister // a means to list all services ServiceLister *cache.StoreToServiceLister + // a means to list all controllers + ControllerLister *cache.StoreToReplicationControllerLister // Close this to stop all reflectors StopEverything chan struct{} @@ -75,9 +77,10 @@ func NewConfigFactory(client *client.Client) *ConfigFactory { PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc), ScheduledPodLister: &cache.StoreToPodLister{}, // Only nodes in the "Ready" condition with status == "True" are schedulable - NodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, - ServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, - StopEverything: make(chan struct{}), + NodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, + ServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, + ControllerLister: &cache.StoreToReplicationControllerLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}, + StopEverything: make(chan struct{}), } modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister) c.modeler = modeler @@ -160,8 +163,9 @@ func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSet) (*scheduler.Config, error) { glog.V(2).Infof("creating scheduler with fit predicates '%v' and priority functions '%v", predicateKeys, priorityKeys) pluginArgs := PluginFactoryArgs{ - PodLister: f.PodLister, - ServiceLister: f.ServiceLister, + PodLister: f.PodLister, + ServiceLister: f.ServiceLister, + ControllerLister: f.ControllerLister, // All fit predicates only need to consider schedulable nodes. NodeLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue), NodeInfo: f.NodeLister, @@ -187,10 +191,15 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe cache.NewReflector(f.createMinionLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything) // Watch and cache all service objects. Scheduler needs to find all pods - // created by the same service, so that it can spread them correctly. + // created by the same services or ReplicationControllers, so that it can spread them correctly. // Cache this locally. cache.NewReflector(f.createServiceLW(), &api.Service{}, f.ServiceLister.Store, 0).RunUntil(f.StopEverything) + // Watch and cache all ReplicationController objects. Scheduler needs to find all pods + // created by the same services or ReplicationControllers, so that it can spread them correctly. + // Cache this locally. + cache.NewReflector(f.createControllerLW(), &api.ReplicationController{}, f.ControllerLister.Store, 0).RunUntil(f.StopEverything) + r := rand.New(rand.NewSource(time.Now().UnixNano())) algo := scheduler.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r) @@ -254,6 +263,11 @@ func (factory *ConfigFactory) createServiceLW() *cache.ListWatch { return cache.NewListWatchFromClient(factory.Client, "services", api.NamespaceAll, parseSelectorOrDie("")) } +// Returns a cache.ListWatch that gets all changes to controllers. +func (factory *ConfigFactory) createControllerLW() *cache.ListWatch { + return cache.NewListWatchFromClient(factory.Client, "replicationControllers", api.NamespaceAll, parseSelectorOrDie("")) +} + func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) { return func(pod *api.Pod, err error) { if err == scheduler.ErrNoNodesAvailable { diff --git a/plugin/pkg/scheduler/factory/plugins.go b/plugin/pkg/scheduler/factory/plugins.go index 78d206db4e0..16d34d8027d 100644 --- a/plugin/pkg/scheduler/factory/plugins.go +++ b/plugin/pkg/scheduler/factory/plugins.go @@ -35,6 +35,7 @@ import ( type PluginFactoryArgs struct { algorithm.PodLister algorithm.ServiceLister + algorithm.ControllerLister NodeLister algorithm.MinionLister NodeInfo predicates.NodeInfo } diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 1b8045b2e5d..60bfb248609 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -34,9 +34,10 @@ import ( ) const ( - podListTimeout = time.Minute - serverStartTimeout = podStartTimeout + 3*time.Minute - dnsReadyTimeout = time.Minute + podListTimeout = time.Minute + serverStartTimeout = podStartTimeout + 3*time.Minute + dnsReadyTimeout = time.Minute + endpointRegisterTimeout = time.Minute ) const queryDnsPythonTemplate string = ` @@ -161,7 +162,7 @@ var _ = Describe("Examples e2e", func() { forEachPod(c, ns, "component", "flower", func(pod api.Pod) { // Do nothing. just wait for it to be up and running. }) - content, err := makeHttpRequestToService(c, ns, "flower-service", "/") + content, err := makeHttpRequestToService(c, ns, "flower-service", "/", endpointRegisterTimeout) Expect(err).NotTo(HaveOccurred()) if !strings.Contains(content, "Celery Flower") { Failf("Flower HTTP request failed") @@ -392,7 +393,7 @@ var _ = Describe("Examples e2e", func() { err := waitForPodRunningInNamespace(c, "rethinkdb-admin", ns) Expect(err).NotTo(HaveOccurred()) checkDbInstances() - content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/") + content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", endpointRegisterTimeout) Expect(err).NotTo(HaveOccurred()) if !strings.Contains(content, "RethinkDB Administration Console") { Failf("RethinkDB console is not running") @@ -526,15 +527,22 @@ var _ = Describe("Examples e2e", func() { }) }) -func makeHttpRequestToService(c *client.Client, ns, service, path string) (string, error) { - result, err := c.Get(). - Prefix("proxy"). - Namespace(ns). - Resource("services"). - Name(service). - Suffix(path). - Do(). - Raw() +func makeHttpRequestToService(c *client.Client, ns, service, path string, timeout time.Duration) (string, error) { + var result []byte + var err error + for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { + result, err = c.Get(). + Prefix("proxy"). + Namespace(ns). + Resource("services"). + Name(service). + Suffix(path). + Do(). + Raw() + if err != nil { + break + } + } return string(result), err } diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 29f069ee6d1..3a64135db65 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -104,10 +104,10 @@ var _ = Describe("Kubectl client", func() { runKubectl("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) By("scaling down the replication controller") - runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=1", fmt.Sprintf("--namespace=%v", ns)) + runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) By("scaling up the replication controller") - runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=2", fmt.Sprintf("--namespace=%v", ns)) + runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns)) validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns) }) diff --git a/test/integration/etcd_tools_test.go b/test/integration/etcd_tools_test.go index 258b20787be..f28fda0eb58 100644 --- a/test/integration/etcd_tools_test.go +++ b/test/integration/etcd_tools_test.go @@ -25,7 +25,8 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/pkg/watch" "github.com/GoogleCloudPlatform/kubernetes/test/integration/framework" @@ -33,7 +34,7 @@ import ( func TestSet(t *testing.T) { client := framework.NewEtcdClient() - etcdStorage := tools.NewEtcdStorage(client, testapi.Codec(), "") + etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), "") framework.WithEtcdKey(func(key string) { testObject := api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: "foo"}} if err := etcdStorage.Set(key, &testObject, nil, 0); err != nil { @@ -56,7 +57,7 @@ func TestSet(t *testing.T) { func TestGet(t *testing.T) { client := framework.NewEtcdClient() - etcdStorage := tools.NewEtcdStorage(client, testapi.Codec(), "") + etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), "") framework.WithEtcdKey(func(key string) { testObject := api.ServiceAccount{ObjectMeta: api.ObjectMeta{Name: "foo"}} coded, err := testapi.Codec().Encode(&testObject) @@ -81,7 +82,7 @@ func TestGet(t *testing.T) { func TestWatch(t *testing.T) { client := framework.NewEtcdClient() - etcdStorage := tools.NewEtcdStorage(client, testapi.Codec(), etcdtest.PathPrefix()) + etcdStorage := etcd.NewEtcdStorage(client, testapi.Codec(), etcdtest.PathPrefix()) framework.WithEtcdKey(func(key string) { key = etcdtest.AddPrefix(key) resp, err := client.Set(key, runtime.EncodeOrDie(testapi.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}), 0) @@ -91,7 +92,7 @@ func TestWatch(t *testing.T) { expectedVersion := resp.Node.ModifiedIndex // watch should load the object at the current index - w, err := etcdStorage.Watch(key, 0, tools.Everything) + w, err := etcdStorage.Watch(key, 0, storage.Everything) if err != nil { t.Fatalf("Unexpected error: %v", err) } diff --git a/test/integration/framework/etcd_utils.go b/test/integration/framework/etcd_utils.go index a46bd7bac2d..c3f8bd0c209 100644 --- a/test/integration/framework/etcd_utils.go +++ b/test/integration/framework/etcd_utils.go @@ -22,9 +22,10 @@ import ( "fmt" "math/rand" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/master" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/coreos/go-etcd/etcd" "github.com/golang/glog" @@ -41,8 +42,8 @@ func NewEtcdClient() *etcd.Client { return etcd.NewClient([]string{}) } -func NewEtcdStorage() (tools.StorageInterface, error) { - return master.NewEtcdStorage(NewEtcdClient(), testapi.Version(), etcdtest.PathPrefix()) +func NewEtcdStorage() (storage.Interface, error) { + return master.NewEtcdStorage(NewEtcdClient(), latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) } func RequireEtcd() { diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 719972a126a..eb08d72e4ea 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -26,16 +26,18 @@ import ( "time" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client/record" "github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication" + explatest "github.com/GoogleCloudPlatform/kubernetes/pkg/expapi/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" "github.com/GoogleCloudPlatform/kubernetes/pkg/master" - "github.com/GoogleCloudPlatform/kubernetes/pkg/tools" + "github.com/GoogleCloudPlatform/kubernetes/pkg/storage" "github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcdtest" "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/admission/admit" "github.com/golang/glog" @@ -72,7 +74,7 @@ type MasterComponents struct { // Used to stop master components individually, and via MasterComponents.Stop once sync.Once // Kubernetes etcd storage, has embedded etcd client - EtcdStorage tools.StorageInterface + EtcdStorage storage.Interface } // Config is a struct of configuration directives for NewMasterComponents. @@ -119,28 +121,36 @@ func NewMasterComponents(c *Config) *MasterComponents { } // startMasterOrDie starts a kubernetes master and an httpserver to handle api requests -func startMasterOrDie(masterConfig *master.Config) (*master.Master, *httptest.Server, tools.StorageInterface) { +func startMasterOrDie(masterConfig *master.Config) (*master.Master, *httptest.Server, storage.Interface) { var m *master.Master s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { m.Handler.ServeHTTP(w, req) })) - var etcdStorage tools.StorageInterface + var etcdStorage storage.Interface var err error if masterConfig == nil { - etcdStorage, err = master.NewEtcdStorage(NewEtcdClient(), "", etcdtest.PathPrefix()) + etcdClient := NewEtcdClient() + etcdStorage, err = master.NewEtcdStorage(etcdClient, latest.InterfacesFor, latest.Version, etcdtest.PathPrefix()) if err != nil { glog.Fatalf("Failed to create etcd storage for master %v", err) } + expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix()) + if err != nil { + glog.Fatalf("Failed to create etcd storage for master %v", err) + } + masterConfig = &master.Config{ - DatabaseStorage: etcdStorage, - KubeletClient: client.FakeKubeletClient{}, - EnableLogsSupport: false, - EnableProfiling: true, - EnableUISupport: false, - APIPrefix: "/api", - Authorizer: apiserver.NewAlwaysAllowAuthorizer(), - AdmissionControl: admit.NewAlwaysAdmit(), + DatabaseStorage: etcdStorage, + ExpDatabaseStorage: expEtcdStorage, + KubeletClient: client.FakeKubeletClient{}, + EnableLogsSupport: false, + EnableProfiling: true, + EnableUISupport: false, + APIPrefix: "/api", + ExpAPIPrefix: "/experimental", + Authorizer: apiserver.NewAlwaysAllowAuthorizer(), + AdmissionControl: admit.NewAlwaysAdmit(), } } else { etcdStorage = masterConfig.DatabaseStorage @@ -258,20 +268,28 @@ func StartPods(numPods int, host string, restClient *client.Client) error { // TODO: Merge this into startMasterOrDie. func RunAMaster(t *testing.T) (*master.Master, *httptest.Server) { - etcdStorage, err := master.NewEtcdStorage(NewEtcdClient(), testapi.Version(), etcdtest.PathPrefix()) + etcdClient := NewEtcdClient() + etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + expEtcdStorage, err := master.NewEtcdStorage(etcdClient, explatest.InterfacesFor, explatest.Version, etcdtest.PathPrefix()) if err != nil { t.Fatalf("unexpected error: %v", err) } m := master.New(&master.Config{ - DatabaseStorage: etcdStorage, - KubeletClient: client.FakeKubeletClient{}, - EnableLogsSupport: false, - EnableProfiling: true, - EnableUISupport: false, - APIPrefix: "/api", - Authorizer: apiserver.NewAlwaysAllowAuthorizer(), - AdmissionControl: admit.NewAlwaysAdmit(), + DatabaseStorage: etcdStorage, + ExpDatabaseStorage: expEtcdStorage, + KubeletClient: client.FakeKubeletClient{}, + EnableLogsSupport: false, + EnableProfiling: true, + EnableUISupport: false, + APIPrefix: "/api", + ExpAPIPrefix: "/experimental", + EnableExp: true, + Authorizer: apiserver.NewAlwaysAllowAuthorizer(), + AdmissionControl: admit.NewAlwaysAdmit(), }) s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { diff --git a/test/integration/master_test.go b/test/integration/master_test.go new file mode 100644 index 00000000000..dd63c6b4881 --- /dev/null +++ b/test/integration/master_test.go @@ -0,0 +1,39 @@ +// +build integration,!no-etcd + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "net/http" + "testing" + + "github.com/GoogleCloudPlatform/kubernetes/test/integration/framework" +) + +func TestExperimentalPrefix(t *testing.T) { + _, s := framework.RunAMaster(t) + defer s.Close() + + resp, err := http.Get(s.URL + "/experimental/") + if err != nil { + t.Fatalf("unexpected error getting experimental prefix: %v", err) + } + if resp.StatusCode != http.StatusOK { + t.Fatalf("got status %v instead of 200 OK", resp.StatusCode) + } +} diff --git a/test/integration/service_account_test.go b/test/integration/service_account_test.go index b768f0e53ec..a45fd760f2f 100644 --- a/test/integration/service_account_test.go +++ b/test/integration/service_account_test.go @@ -33,6 +33,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authenticator" "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authenticator/bearertoken" @@ -340,7 +341,7 @@ func startServiceAccountTestServer(t *testing.T) (*client.Client, client.Config, deleteAllEtcdKeys() // Etcd - etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), testapi.Version(), etcdtest.PathPrefix()) + etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/utils.go b/test/integration/utils.go index f193abaa470..4a7363071c8 100644 --- a/test/integration/utils.go +++ b/test/integration/utils.go @@ -25,6 +25,7 @@ import ( "net/http/httptest" "testing" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" @@ -67,7 +68,7 @@ func deleteAllEtcdKeys() { } func runAMaster(t *testing.T) (*master.Master, *httptest.Server) { - etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), testapi.Version(), etcdtest.PathPrefix()) + etcdStorage, err := master.NewEtcdStorage(newEtcdClient(), latest.InterfacesFor, testapi.Version(), etcdtest.PathPrefix()) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/third_party/golang/template/exec.go b/third_party/golang/template/exec.go new file mode 100644 index 00000000000..11fef63f486 --- /dev/null +++ b/third_party/golang/template/exec.go @@ -0,0 +1,89 @@ +package template +import ( + "reflect" + "fmt" +) + + +var ( + errorType = reflect.TypeOf((*error)(nil)).Elem() + fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() +) + + +// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. +// We indirect through pointers and empty interfaces (only) because +// non-empty interfaces have methods we might need. +func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { + for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { + if v.IsNil() { + return v, true + } + if v.Kind() == reflect.Interface && v.NumMethod() > 0 { + break + } + } + return v, false +} + +// printableValue returns the, possibly indirected, interface value inside v that +// is best for a call to formatted printer. +func printableValue(v reflect.Value) (interface{}, bool) { + if v.Kind() == reflect.Ptr { + v, _ = indirect(v) // fmt.Fprint handles nil. + } + if !v.IsValid() { + return "", true + } + + if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { + if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { + v = v.Addr() + } else { + switch v.Kind() { + case reflect.Chan, reflect.Func: + return nil, false + } + } + } + return v.Interface(), true +} + +// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. +func canBeNil(typ reflect.Type) bool { + switch typ.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return true + } + return false +} + +// isTrue reports whether the value is 'true', in the sense of not the zero of its type, +// and whether the value has a meaningful truth value. +func isTrue(val reflect.Value) (truth, ok bool) { + if !val.IsValid() { + // Something like var x interface{}, never set. It's a form of nil. + return false, true + } + switch val.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + truth = val.Len() > 0 + case reflect.Bool: + truth = val.Bool() + case reflect.Complex64, reflect.Complex128: + truth = val.Complex() != 0 + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: + truth = !val.IsNil() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + truth = val.Int() != 0 + case reflect.Float32, reflect.Float64: + truth = val.Float() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + truth = val.Uint() != 0 + case reflect.Struct: + truth = true // Struct values are always true. + default: + return + } + return truth, true +} \ No newline at end of file diff --git a/third_party/golang/template/funcs.go b/third_party/golang/template/funcs.go new file mode 100644 index 00000000000..27a008b0a7e --- /dev/null +++ b/third_party/golang/template/funcs.go @@ -0,0 +1,599 @@ +//This package is copied from Go library text/template. +//The original private functions eq, ge, gt, le, lt, and ne +//are exported as public functions. +package template + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +var Equal = eq +var GreaterEqual = ge +var Greater = gt +var LessEqual = le +var Less = lt +var NotEqual = ne + +// FuncMap is the type of the map defining the mapping from names to functions. +// Each function must have either a single return value, or two return values of +// which the second has type error. In that case, if the second (error) +// return value evaluates to non-nil during execution, execution terminates and +// Execute returns that error. +type FuncMap map[string]interface{} + +var builtins = FuncMap{ + "and": and, + "call": call, + "html": HTMLEscaper, + "index": index, + "js": JSEscaper, + "len": length, + "not": not, + "or": or, + "print": fmt.Sprint, + "printf": fmt.Sprintf, + "println": fmt.Sprintln, + "urlquery": URLQueryEscaper, + + // Comparisons + "eq": eq, // == + "ge": ge, // >= + "gt": gt, // > + "le": le, // <= + "lt": lt, // < + "ne": ne, // != +} + +var builtinFuncs = createValueFuncs(builtins) + +// createValueFuncs turns a FuncMap into a map[string]reflect.Value +func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { + m := make(map[string]reflect.Value) + addValueFuncs(m, funcMap) + return m +} + +// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. +func addValueFuncs(out map[string]reflect.Value, in FuncMap) { + for name, fn := range in { + v := reflect.ValueOf(fn) + if v.Kind() != reflect.Func { + panic("value for " + name + " not a function") + } + if !goodFunc(v.Type()) { + panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) + } + out[name] = v + } +} + +// AddFuncs adds to values the functions in funcs. It does no checking of the input - +// call addValueFuncs first. +func addFuncs(out, in FuncMap) { + for name, fn := range in { + out[name] = fn + } +} + +// goodFunc checks that the function or method has the right result signature. +func goodFunc(typ reflect.Type) bool { + // We allow functions with 1 result or 2 results where the second is an error. + switch { + case typ.NumOut() == 1: + return true + case typ.NumOut() == 2 && typ.Out(1) == errorType: + return true + } + return false +} + +// findFunction looks for a function in the template, and global map. +func findFunction(name string) (reflect.Value, bool) { + if fn := builtinFuncs[name]; fn.IsValid() { + return fn, true + } + return reflect.Value{}, false +} + +// Indexing. + +// index returns the result of indexing its first argument by the following +// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each +// indexed item must be a map, slice, or array. +func index(item interface{}, indices ...interface{}) (interface{}, error) { + v := reflect.ValueOf(item) + for _, i := range indices { + index := reflect.ValueOf(i) + var isNil bool + if v, isNil = indirect(v); isNil { + return nil, fmt.Errorf("index of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.String: + var x int64 + switch index.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x = index.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x = int64(index.Uint()) + default: + return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) + } + if x < 0 || x >= int64(v.Len()) { + return nil, fmt.Errorf("index out of range: %d", x) + } + v = v.Index(int(x)) + case reflect.Map: + if !index.IsValid() { + index = reflect.Zero(v.Type().Key()) + } + if !index.Type().AssignableTo(v.Type().Key()) { + return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) + } + if x := v.MapIndex(index); x.IsValid() { + v = x + } else { + v = reflect.Zero(v.Type().Elem()) + } + default: + return nil, fmt.Errorf("can't index item of type %s", v.Type()) + } + } + return v.Interface(), nil +} + +// Length + +// length returns the length of the item, with an error if it has no defined length. +func length(item interface{}) (int, error) { + v, isNil := indirect(reflect.ValueOf(item)) + if isNil { + return 0, fmt.Errorf("len of nil pointer") + } + switch v.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: + return v.Len(), nil + } + return 0, fmt.Errorf("len of type %s", v.Type()) +} + +// Function invocation + +// call returns the result of evaluating the first argument as a function. +// The function must return 1 result, or 2 results, the second of which is an error. +func call(fn interface{}, args ...interface{}) (interface{}, error) { + v := reflect.ValueOf(fn) + typ := v.Type() + if typ.Kind() != reflect.Func { + return nil, fmt.Errorf("non-function of type %s", typ) + } + if !goodFunc(typ) { + return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) + } + numIn := typ.NumIn() + var dddType reflect.Type + if typ.IsVariadic() { + if len(args) < numIn-1 { + return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) + } + dddType = typ.In(numIn - 1).Elem() + } else { + if len(args) != numIn { + return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) + } + } + argv := make([]reflect.Value, len(args)) + for i, arg := range args { + value := reflect.ValueOf(arg) + // Compute the expected type. Clumsy because of variadics. + var argType reflect.Type + if !typ.IsVariadic() || i < numIn-1 { + argType = typ.In(i) + } else { + argType = dddType + } + if !value.IsValid() && canBeNil(argType) { + value = reflect.Zero(argType) + } + if !value.Type().AssignableTo(argType) { + return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) + } + argv[i] = value + } + result := v.Call(argv) + if len(result) == 2 && !result[1].IsNil() { + return result[0].Interface(), result[1].Interface().(error) + } + return result[0].Interface(), nil +} + +// Boolean logic. + +func truth(a interface{}) bool { + t, _ := isTrue(reflect.ValueOf(a)) + return t +} + +// and computes the Boolean AND of its arguments, returning +// the first false argument it encounters, or the last argument. +func and(arg0 interface{}, args ...interface{}) interface{} { + if !truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if !truth(arg0) { + break + } + } + return arg0 +} + +// or computes the Boolean OR of its arguments, returning +// the first true argument it encounters, or the last argument. +func or(arg0 interface{}, args ...interface{}) interface{} { + if truth(arg0) { + return arg0 + } + for i := range args { + arg0 = args[i] + if truth(arg0) { + break + } + } + return arg0 +} + +// not returns the Boolean negation of its argument. +func not(arg interface{}) (truth bool) { + truth, _ = isTrue(reflect.ValueOf(arg)) + return !truth +} + +// Comparison. + +// TODO: Perhaps allow comparison between signed and unsigned integers. + +var ( + errBadComparisonType = errors.New("invalid type for comparison") + errBadComparison = errors.New("incompatible types for comparison") + errNoComparison = errors.New("missing argument for comparison") +) + +type kind int + +const ( + invalidKind kind = iota + boolKind + complexKind + intKind + floatKind + integerKind + stringKind + uintKind +) + +func basicKind(v reflect.Value) (kind, error) { + switch v.Kind() { + case reflect.Bool: + return boolKind, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intKind, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintKind, nil + case reflect.Float32, reflect.Float64: + return floatKind, nil + case reflect.Complex64, reflect.Complex128: + return complexKind, nil + case reflect.String: + return stringKind, nil + } + return invalidKind, errBadComparisonType +} + +// eq evaluates the comparison a == b || a == c || ... +func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + if len(arg2) == 0 { + return false, errNoComparison + } + for _, arg := range arg2 { + v2 := reflect.ValueOf(arg) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind: + truth = v1.Bool() == v2.Bool() + case complexKind: + truth = v1.Complex() == v2.Complex() + case floatKind: + truth = v1.Float() == v2.Float() + case intKind: + truth = v1.Int() == v2.Int() + case stringKind: + truth = v1.String() == v2.String() + case uintKind: + truth = v1.Uint() == v2.Uint() + default: + panic("invalid kind") + } + } + if truth { + return true, nil + } + } + return false, nil +} + +// ne evaluates the comparison a != b. +func ne(arg1, arg2 interface{}) (bool, error) { + // != is the inverse of ==. + equal, err := eq(arg1, arg2) + return !equal, err +} + +// lt evaluates the comparison a < b. +func lt(arg1, arg2 interface{}) (bool, error) { + v1 := reflect.ValueOf(arg1) + k1, err := basicKind(v1) + if err != nil { + return false, err + } + v2 := reflect.ValueOf(arg2) + k2, err := basicKind(v2) + if err != nil { + return false, err + } + truth := false + if k1 != k2 { + // Special case: Can compare integer values regardless of type's sign. + switch { + case k1 == intKind && k2 == uintKind: + truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() + case k1 == uintKind && k2 == intKind: + truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) + default: + return false, errBadComparison + } + } else { + switch k1 { + case boolKind, complexKind: + return false, errBadComparisonType + case floatKind: + truth = v1.Float() < v2.Float() + case intKind: + truth = v1.Int() < v2.Int() + case stringKind: + truth = v1.String() < v2.String() + case uintKind: + truth = v1.Uint() < v2.Uint() + default: + panic("invalid kind") + } + } + return truth, nil +} + +// le evaluates the comparison <= b. +func le(arg1, arg2 interface{}) (bool, error) { + // <= is < or ==. + lessThan, err := lt(arg1, arg2) + if lessThan || err != nil { + return lessThan, err + } + return eq(arg1, arg2) +} + +// gt evaluates the comparison a > b. +func gt(arg1, arg2 interface{}) (bool, error) { + // > is the inverse of <=. + lessOrEqual, err := le(arg1, arg2) + if err != nil { + return false, err + } + return !lessOrEqual, nil +} + +// ge evaluates the comparison a >= b. +func ge(arg1, arg2 interface{}) (bool, error) { + // >= is the inverse of <. + lessThan, err := lt(arg1, arg2) + if err != nil { + return false, err + } + return !lessThan, nil +} + +// HTML escaping. + +var ( + htmlQuot = []byte(""") // shorter than """ + htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 + htmlAmp = []byte("&") + htmlLt = []byte("<") + htmlGt = []byte(">") +) + +// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. +func HTMLEscape(w io.Writer, b []byte) { + last := 0 + for i, c := range b { + var html []byte + switch c { + case '"': + html = htmlQuot + case '\'': + html = htmlApos + case '&': + html = htmlAmp + case '<': + html = htmlLt + case '>': + html = htmlGt + default: + continue + } + w.Write(b[last:i]) + w.Write(html) + last = i + 1 + } + w.Write(b[last:]) +} + +// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. +func HTMLEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexAny(s, `'"&<>`) < 0 { + return s + } + var b bytes.Buffer + HTMLEscape(&b, []byte(s)) + return b.String() +} + +// HTMLEscaper returns the escaped HTML equivalent of the textual +// representation of its arguments. +func HTMLEscaper(args ...interface{}) string { + return HTMLEscapeString(evalArgs(args)) +} + +// JavaScript escaping. + +var ( + jsLowUni = []byte(`\u00`) + hex = []byte("0123456789ABCDEF") + + jsBackslash = []byte(`\\`) + jsApos = []byte(`\'`) + jsQuot = []byte(`\"`) + jsLt = []byte(`\x3C`) + jsGt = []byte(`\x3E`) +) + +// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. +func JSEscape(w io.Writer, b []byte) { + last := 0 + for i := 0; i < len(b); i++ { + c := b[i] + + if !jsIsSpecial(rune(c)) { + // fast path: nothing to do + continue + } + w.Write(b[last:i]) + + if c < utf8.RuneSelf { + // Quotes, slashes and angle brackets get quoted. + // Control characters get written as \u00XX. + switch c { + case '\\': + w.Write(jsBackslash) + case '\'': + w.Write(jsApos) + case '"': + w.Write(jsQuot) + case '<': + w.Write(jsLt) + case '>': + w.Write(jsGt) + default: + w.Write(jsLowUni) + t, b := c>>4, c&0x0f + w.Write(hex[t : t+1]) + w.Write(hex[b : b+1]) + } + } else { + // Unicode rune. + r, size := utf8.DecodeRune(b[i:]) + if unicode.IsPrint(r) { + w.Write(b[i : i+size]) + } else { + fmt.Fprintf(w, "\\u%04X", r) + } + i += size - 1 + } + last = i + 1 + } + w.Write(b[last:]) +} + +// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. +func JSEscapeString(s string) string { + // Avoid allocation if we can. + if strings.IndexFunc(s, jsIsSpecial) < 0 { + return s + } + var b bytes.Buffer + JSEscape(&b, []byte(s)) + return b.String() +} + +func jsIsSpecial(r rune) bool { + switch r { + case '\\', '\'', '"', '<', '>': + return true + } + return r < ' ' || utf8.RuneSelf <= r +} + +// JSEscaper returns the escaped JavaScript equivalent of the textual +// representation of its arguments. +func JSEscaper(args ...interface{}) string { + return JSEscapeString(evalArgs(args)) +} + +// URLQueryEscaper returns the escaped value of the textual representation of +// its arguments in a form suitable for embedding in a URL query. +func URLQueryEscaper(args ...interface{}) string { + return url.QueryEscape(evalArgs(args)) +} + +// evalArgs formats the list of arguments into a string. It is therefore equivalent to +// fmt.Sprint(args...) +// except that each argument is indirected (if a pointer), as required, +// using the same rules as the default string evaluation during template +// execution. +func evalArgs(args []interface{}) string { + ok := false + var s string + // Fast path for simple common case. + if len(args) == 1 { + s, ok = args[0].(string) + } + if !ok { + for i, arg := range args { + a, ok := printableValue(reflect.ValueOf(arg)) + if ok { + args[i] = a + } // else left fmt do its thing + } + s = fmt.Sprint(args...) + } + return s +}