fixed conflict with the current upstream master
This commit is contained in:
commit
2a6546cada
35
.travis.yml
35
.travis.yml
@ -5,36 +5,21 @@ language: go
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.4
|
||||
env:
|
||||
- KUBE_TEST_API_VERSIONS="v1"
|
||||
KUBE_TEST_ETCD_PREFIXES="registry"
|
||||
- go: 1.3
|
||||
env:
|
||||
- KUBE_TEST_API_VERSIONS="v1beta3"
|
||||
KUBE_TEST_ETCD_PREFIXES="kubernetes.io/registry"
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get github.com/tools/godep
|
||||
- ./hack/travis/install-etcd.sh
|
||||
- ./hack/build-go.sh
|
||||
|
||||
script:
|
||||
- ./hack/verify-gofmt.sh
|
||||
- ./hack/verify-boilerplate.sh
|
||||
- ./hack/verify-description.sh
|
||||
- ./hack/travis/install-std-race.sh
|
||||
- ./hack/build-go.sh
|
||||
- GOPATH=$PWD/Godeps/_workspace:$GOPATH go install ./...
|
||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/verify-gendocs.sh
|
||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/verify-swagger-spec.sh
|
||||
|
||||
before_script:
|
||||
- npm install karma karma-junit-reporter karma-phantomjs-launcher karma-jasmine
|
||||
|
||||
script:
|
||||
- KUBE_RACE="-race" KUBE_COVER="y" KUBE_GOVERALLS_BIN="$HOME/gopath/bin/goveralls" KUBE_TIMEOUT='-timeout 300s' KUBE_COVERPROCS=8 KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS}" KUBE_TEST_ETCD_PREFIXES="${KUBE_TEST_ETCD_PREFIXES}" ./hack/test-go.sh -- -p=2
|
||||
- node_modules/karma/bin/karma start www/master/karma.conf.js --single-run --browsers PhantomJS
|
||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-cmd.sh
|
||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS}" KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=4 LOG_LEVEL=4 ./hack/test-integration.sh
|
||||
- PATH=$HOME/gopath/bin:./third_party/etcd:$PATH ./hack/test-update-storage-objects.sh
|
||||
- PATH=$GOPATH/bin:$PATH ./hack/verify-generated-conversions.sh
|
||||
- PATH=$GOPATH/bin:$PATH ./hack/verify-generated-deep-copies.sh
|
||||
- PATH=$GOPATH/bin:./third_party/etcd:$PATH ./hack/verify-gendocs.sh
|
||||
- PATH=$GOPATH/bin:./third_party/etcd:$PATH ./hack/verify-swagger-spec.sh
|
||||
- godep go test ./cmd/mungedocs
|
||||
|
||||
notifications:
|
||||
irc: "chat.freenode.net#google-containers"
|
||||
irc: "chat.freenode.net#kubernetes-dev"
|
||||
|
@ -1,10 +1,10 @@
|
||||
# Kubernetes Overview
|
||||
|
||||
See the [user overview](docs/overview.md) for an introduction to Kubernetes and its core concepts.
|
||||
See the [user guide overview](docs/user-guide/overview.md) for an introduction to Kubernetes and its core concepts.
|
||||
|
||||
See the [design overview](docs/design) for an overview of the system design.
|
||||
|
||||
See the [API overview](docs/api.md) and [conventions](docs/api-conventions.md) for an overview of the API design.
|
||||
See the [API overview](docs/api.md) and [conventions](docs/devel/api-conventions.md) for an overview of the API design.
|
||||
|
||||
|
||||
[]()
|
||||
|
8
Godeps/Godeps.json
generated
8
Godeps/Godeps.json
generated
@ -195,7 +195,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
|
||||
"Rev": "ae4665cf2d188c65764c73fe4af5378acc549510"
|
||||
"Rev": "c57a80f1ab2ad67bafa83f5fd0b4c2ecbd253dd5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/emicklei/go-restful",
|
||||
@ -496,15 +496,15 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Rev": "7e4a149930b09fe4c2b134c50ce637457ba6e966"
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/mock",
|
||||
"Rev": "7e4a149930b09fe4c2b134c50ce637457ba6e966"
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Rev": "7e4a149930b09fe4c2b134c50ce637457ba6e966"
|
||||
"Rev": "089c7181b8c728499929ff09b62d3fdd8df8adff"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||
|
38
Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md
generated
vendored
38
Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md
generated
vendored
@ -1,16 +1,44 @@
|
||||
go-bindata-http
|
||||
===============
|
||||
# go-bindata-assetfs
|
||||
|
||||
Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`.
|
||||
|
||||
[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs)
|
||||
|
||||
After running
|
||||
### Installation
|
||||
|
||||
$ go-bindata data/...
|
||||
Install with
|
||||
|
||||
Use
|
||||
$ go get github.com/jteeuwen/go-bindata/...
|
||||
$ go get github.com/elazarl/go-bindata-assetfs/...
|
||||
|
||||
### Creating embedded data
|
||||
|
||||
Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage,
|
||||
instead of running `go-bindata` run `go-bindata-assetfs`.
|
||||
|
||||
The tool will create a `bindata_assetfs.go` file, which contains the embedded data.
|
||||
|
||||
A typical use case is
|
||||
|
||||
$ go-bindata-assetfs data/...
|
||||
|
||||
### Using assetFS in your code
|
||||
|
||||
The generated file provides an `assetFS()` function that returns a `http.Filesystem`
|
||||
wrapping the embedded files. What you usually want to do is:
|
||||
|
||||
http.Handle("/", http.FileServer(assetFS()))
|
||||
|
||||
This would run an HTTP server serving the embedded files.
|
||||
|
||||
## Without running binary tool
|
||||
|
||||
You can always just run the `go-bindata` tool, and then
|
||||
|
||||
use
|
||||
|
||||
import "github.com/elazarl/go-bindata-assetfs"
|
||||
...
|
||||
http.Handle("/",
|
||||
http.FileServer(
|
||||
&assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"}))
|
||||
|
12
Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go
generated
vendored
12
Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go
generated
vendored
@ -3,7 +3,6 @@ package assetfs
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@ -13,6 +12,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
fileTimestamp = time.Now()
|
||||
)
|
||||
|
||||
// FakeFile implements os.FileInfo interface for a given path and size
|
||||
type FakeFile struct {
|
||||
// Path is the path of this file
|
||||
@ -37,7 +40,7 @@ func (f *FakeFile) Mode() os.FileMode {
|
||||
}
|
||||
|
||||
func (f *FakeFile) ModTime() time.Time {
|
||||
return time.Unix(0, 0)
|
||||
return fileTimestamp
|
||||
}
|
||||
|
||||
func (f *FakeFile) Size() int64 {
|
||||
@ -70,6 +73,10 @@ func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
return nil, errors.New("not a directory")
|
||||
}
|
||||
|
||||
func (f *AssetFile) Size() int64 {
|
||||
return f.FakeFile.Size()
|
||||
}
|
||||
|
||||
func (f *AssetFile) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
@ -98,7 +105,6 @@ func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirect
|
||||
}
|
||||
|
||||
func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) {
|
||||
fmt.Println(f, count)
|
||||
if count <= 0 {
|
||||
return f.Children, nil
|
||||
}
|
||||
|
62
Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go
generated
vendored
Normal file
62
Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
const bindatafile = "bindata.go"
|
||||
|
||||
func main() {
|
||||
if _, err := exec.LookPath("go-bindata"); err != nil {
|
||||
fmt.Println("Cannot find go-bindata executable in path")
|
||||
fmt.Println("Maybe you need: go get github.com/elazarl/go-bindata-assetfs/...")
|
||||
os.Exit(1)
|
||||
}
|
||||
cmd := exec.Command("go-bindata", os.Args[1:]...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
in, err := os.Open(bindatafile)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot read", bindatafile, err)
|
||||
return
|
||||
}
|
||||
out, err := os.Create("bindata_assetfs.go")
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot write 'bindata_assetfs.go'", err)
|
||||
return
|
||||
}
|
||||
r := bufio.NewReader(in)
|
||||
done := false
|
||||
for line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() {
|
||||
line = append(line, '\n')
|
||||
if _, err := out.Write(line); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot write to 'bindata_assetfs.go'", err)
|
||||
return
|
||||
}
|
||||
if !done && !isPrefix && bytes.HasPrefix(line, []byte("import (")) {
|
||||
fmt.Fprintln(out, "\t\"github.com/elazarl/go-bindata-assetfs\"")
|
||||
done = true
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(out, `
|
||||
func assetFS() *assetfs.AssetFS {
|
||||
for k := range _bintree.Children {
|
||||
return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: k}
|
||||
}
|
||||
panic("unreachable")
|
||||
}`)
|
||||
// Close files BEFORE remove calls (don't use defer).
|
||||
in.Close()
|
||||
out.Close()
|
||||
if err := os.Remove(bindatafile); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "Cannot remove", bindatafile, err)
|
||||
}
|
||||
}
|
7
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
generated
vendored
7
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go
generated
vendored
@ -84,6 +84,11 @@ func CallerInfo() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is a huge edge case, but it will panic if this is the case, see #180
|
||||
if file == "<autogenerated>" {
|
||||
break
|
||||
}
|
||||
|
||||
parts := strings.Split(file, "/")
|
||||
dir := parts[len(parts)-2]
|
||||
file = parts[len(parts)-1]
|
||||
@ -296,7 +301,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
}
|
||||
|
||||
if !success {
|
||||
Fail(t, "Expected not to be nil.", msgAndArgs...)
|
||||
Fail(t, "Expected value not to be nil.", msgAndArgs...)
|
||||
}
|
||||
|
||||
return success
|
||||
|
22
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go
generated
vendored
22
Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions_test.go
generated
vendored
@ -2,6 +2,7 @@ package assert
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"regexp"
|
||||
"testing"
|
||||
@ -789,3 +790,24 @@ func TestRegexp(t *testing.T) {
|
||||
True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str))
|
||||
}
|
||||
}
|
||||
|
||||
func testAutogeneratedFunction() {
|
||||
defer func() {
|
||||
if err := recover(); err == nil {
|
||||
panic("did not panic")
|
||||
}
|
||||
CallerInfo()
|
||||
}()
|
||||
t := struct {
|
||||
io.Closer
|
||||
}{}
|
||||
var c io.Closer
|
||||
c = t
|
||||
c.Close()
|
||||
}
|
||||
|
||||
func TestCallerInfoWithAutogeneratedFunctions(t *testing.T) {
|
||||
NotPanics(t, func() {
|
||||
testAutogeneratedFunction()
|
||||
})
|
||||
}
|
||||
|
95
README.md
95
README.md
@ -2,7 +2,7 @@
|
||||
|
||||
[](https://godoc.org/github.com/GoogleCloudPlatform/kubernetes) [](https://travis-ci.org/GoogleCloudPlatform/kubernetes) [](https://coveralls.io/r/GoogleCloudPlatform/kubernetes)
|
||||
|
||||
### I am ...
|
||||
### Are you ...
|
||||
* Interested in learning more about using Kubernetes? Please see our user-facing documentation on [kubernetes.io](http://kubernetes.io)
|
||||
* Interested in hacking on the core Kubernetes code base? Keep reading!
|
||||
|
||||
@ -25,71 +25,78 @@ Kubernetes builds upon a [decade and a half of experience at Google running prod
|
||||
### Kubernetes can run anywhere!
|
||||
However, initial development was done on GCE and so our instructions and scripts are built around that. If you make it work on other infrastructure please let us know and contribute instructions/code.
|
||||
|
||||
### Kubernetes is in pre-production beta!
|
||||
While the concepts and architecture in Kubernetes represent years of experience designing and building large scale cluster manager at Google, the Kubernetes project is still under heavy development. Expect bugs, design and API changes as we bring it to a stable, production product over the coming year.
|
||||
### Kubernetes is ready for Production!
|
||||
With the [1.0.1 release](https://github.com/GoogleCloudPlatform/kubernetes/releases/tag/v1.0.1) Kubernetes is ready to serve your production workloads.
|
||||
|
||||
|
||||
## Concepts
|
||||
|
||||
Kubernetes works with the following concepts:
|
||||
|
||||
**Clusters** are the compute resources on top of which your containers are built. Kubernetes can run anywhere! See the [Getting Started Guides](docs/getting-started-guides) for instructions for a variety of services.
|
||||
[**Cluster**](docs/admin/README.md)
|
||||
: A cluster is a set of physical or virtual machines and other infrastructure resources used by Kubernetes to run your applications. Kubernetes can run anywhere! See the [Getting Started Guides](docs/getting-started-guides) for instructions for a variety of services.
|
||||
|
||||
**Pods** are a colocated group of Docker containers with shared volumes. They're the smallest deployable units that can be created, scheduled, and managed with Kubernetes. Pods can be created individually, but it's recommended that you use a replication controller even if creating a single pod. [More about pods](docs/pods.md).
|
||||
[**Node**](docs/admin/node.md)
|
||||
: A node is a physical or virtual machine running Kubernetes, onto which pods can be scheduled.
|
||||
|
||||
**Replication controllers** manage the lifecycle of pods. They ensure that a specified number of pods are running
|
||||
at any given time, by creating or killing pods as required. [More about replication controllers](docs/replication-controller.md).
|
||||
[**Pod**](docs/user-guide/pods.md)
|
||||
: Pods are a colocated group of application containers with shared volumes. They're the smallest deployable units that can be created, scheduled, and managed with Kubernetes. Pods can be created individually, but it's recommended that you use a replication controller even if creating a single pod.
|
||||
|
||||
**Services** provide a single, stable name and address for a set of pods.
|
||||
They act as basic load balancers. [More about services](docs/services.md).
|
||||
[**Replication controller**](docs/user-guide/replication-controller.md)
|
||||
: Replication controllers manage the lifecycle of pods. They ensure that a specified number of pods are running
|
||||
at any given time, by creating or killing pods as required.
|
||||
|
||||
**Labels** are used to organize and select groups of objects based on key:value pairs. [More about labels](docs/labels.md).
|
||||
[**Service**](docs/user-guide/services.md)
|
||||
: Services provide a single, stable name and address for a set of pods.
|
||||
They act as basic load balancers.
|
||||
|
||||
[**Label**](docs/user-guide/labels.md)
|
||||
: Labels are used to organize and select groups of objects based on key:value pairs.
|
||||
|
||||
## Documentation
|
||||
|
||||
Kubernetes documentation is organized into several categories.
|
||||
|
||||
- **Getting Started Guides**
|
||||
- for people who want to create a kubernetes cluster
|
||||
- in [docs/getting-started-guides](docs/getting-started-guides)
|
||||
- **User Documentation**
|
||||
- **Getting started guides**
|
||||
- for people who want to create a Kubernetes cluster
|
||||
- in [Creating a Kubernetes Cluster](docs/getting-started-guides/README.md)
|
||||
- for people who want to port Kubernetes to a new environment
|
||||
- in [Getting Started from Scratch](docs/getting-started-guides/scratch.md)
|
||||
- **User documentation**
|
||||
- for people who want to run programs on an existing Kubernetes cluster
|
||||
- in the [Kubernetes User Guide: Managing Applications](docs/user-guide/README.md)
|
||||
- the [Kubectl Command Line Interface](docs/user-guide/kubectl/kubectl.md) is a detailed reference on
|
||||
the `kubectl` CLI
|
||||
- [User FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/User-FAQ)
|
||||
- in [docs](docs/overview.md)
|
||||
- for people who want to run programs on kubernetes
|
||||
- describes current features of the system (with brief mentions of planned features)
|
||||
- **Developer Documentation**
|
||||
- in [docs/devel](docs/devel)
|
||||
- for people who want to contribute code to kubernetes
|
||||
- covers development conventions
|
||||
- explains current architecture and project plans
|
||||
- **Service Documentation**
|
||||
- in [docs/services.md](docs/services.md)
|
||||
- [Service FAQ](https://github.com/GoogleCloudPlatform/kubernetes/wiki/Services-FAQ)
|
||||
- for people who are interested in how Services work
|
||||
- details of ```kube-proxy``` iptables
|
||||
- how to wire services to external internet
|
||||
- **API documentation**
|
||||
- in [the API doc](docs/api.md)
|
||||
- and automatically generated API documentation served by the master
|
||||
- **Design Documentation**
|
||||
- in [docs/design](docs/design)
|
||||
- for people who want to understand the design choices made
|
||||
- describes tradeoffs, alternative designs
|
||||
- descriptions of planned features that are too long for a github issue.
|
||||
- **Walkthroughs and Examples**
|
||||
- in [examples](/examples)
|
||||
- Hands on introduction and example config files
|
||||
- **Cluster administrator documentation**
|
||||
- for people who want to create a Kubernetes cluster and administer it
|
||||
- in the [Kubernetes Cluster Admin Guide](docs/admin/README.md)
|
||||
- **Developer and API documentation**
|
||||
- for people who want to write programs that access the Kubernetes API, write plugins
|
||||
or extensions, or modify the core Kubernete code
|
||||
- in the [Kubernetes Developer Guide](docs/devel/README.md)
|
||||
- see also [notes on the API](docs/api.md)
|
||||
- see also the [API object documentation](http://kubernetes.io/third_party/swagger-ui/), a
|
||||
detailed description of all fields found in the core API objects
|
||||
- **Walkthroughs and examples**
|
||||
- hands-on introduction and example config files
|
||||
- in the [user guide](docs/user-guide/README.md#quick-walkthrough)
|
||||
- in the [docs/examples directory](examples/)
|
||||
- **Contributions from the Kubernetes community**
|
||||
- in the [docs/contrib directory](contrib/)
|
||||
- **Design documentation and design proposals**
|
||||
- for people who want to understand the design of Kubernetes, and feature proposals
|
||||
- design docs in the [Kubernetes Design Overview](docs/design/README.md) and the [docs/design directory](docs/design/)
|
||||
- proposals in the [docs/proposals directory](docs/proposals/)
|
||||
- **Wiki/FAQ**
|
||||
- in [wiki](https://github.com/GoogleCloudPlatform/kubernetes/wiki)
|
||||
- includes a number of [Kubernetes community-contributed recipes](/contrib/recipes)
|
||||
- in the [wiki](https://github.com/GoogleCloudPlatform/kubernetes/wiki)
|
||||
- troubleshooting information in the [troubleshooting guide](docs/troubleshooting.md)
|
||||
|
||||
## Community, discussion and support
|
||||
|
||||
If you have questions or want to start contributing please reach out. We don't bite!
|
||||
|
||||
The Kubernetes team is hanging out on IRC on the [#google-containers channel on freenode.net](http://webchat.freenode.net/?channels=google-containers). This client may be overloaded from time to time. If this happens you can use any [IRC client out there](http://en.wikipedia.org/wiki/Comparison_of_Internet_Relay_Chat_clients) to talk to us.
|
||||
|
||||
We also have the [google-containers Google Groups mailing list](https://groups.google.com/forum/#!forum/google-containers) for questions and discussion as well as the [kubernetes-announce mailing list](https://groups.google.com/forum/#!forum/kubernetes-announce) for important announcements (low-traffic, no chatter).
|
||||
Please see the [troubleshooting guide](docs/troubleshooting.md), or how to [get more help.](docs/troubleshooting.md#getting-help)
|
||||
|
||||
If you are a company and are looking for a more formal engagement with Google around Kubernetes and containers at Google as a whole, please fill out [this form](https://docs.google.com/a/google.com/forms/d/1_RfwC8LZU4CKe4vKq32x5xpEJI5QZ-j0ShGmZVv9cm4/viewform) and we'll be in touch.
|
||||
|
||||
|
@ -1,10 +1,6 @@
|
||||
{
|
||||
"swaggerVersion": "1.2",
|
||||
"apis": [
|
||||
{
|
||||
"path": "/api/v1beta3",
|
||||
"description": "API at /api/v1beta3 version v1beta3"
|
||||
},
|
||||
{
|
||||
"path": "/api/v1",
|
||||
"description": "API at /api/v1 version v1"
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -99,7 +99,7 @@ echo " with the ${KUBE_RELEASE_VERSION} tag. Mark it as a pre-release."
|
||||
echo " 3) Upload the ${KUBE_BUILD_DIR}/kubernetes.tar.gz to GitHub"
|
||||
echo " 4) Use this template for the release:"
|
||||
echo ""
|
||||
echo "## [Documentation](http://releases.k8s.io/${KUBE_RELEASE_VERSION}/docs)"
|
||||
echo "## [Documentation](http://releases.k8s.io/${KUBE_RELEASE_VERSION}/docs/README.md)"
|
||||
echo "## [Examples](http://releases.k8s.io/${KUBE_RELEASE_VERSION}/examples)"
|
||||
echo "## Changes since <last release> (last PR <last PR>)"
|
||||
echo ""
|
||||
|
3
build/common.sh
Normal file → Executable file
3
build/common.sh
Normal file → Executable file
@ -332,7 +332,7 @@ function kube::build::source_targets() {
|
||||
api
|
||||
build
|
||||
cmd
|
||||
docs/getting-started-guides
|
||||
docs
|
||||
examples
|
||||
Godeps/_workspace/src
|
||||
Godeps/Godeps.json
|
||||
@ -770,6 +770,7 @@ function kube::release::package_full_tarball() {
|
||||
cp -R "${KUBE_ROOT}/third_party/htpasswd" "${release_stage}/third_party/htpasswd"
|
||||
|
||||
cp -R "${KUBE_ROOT}/examples" "${release_stage}/"
|
||||
cp -R "${KUBE_ROOT}/docs" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/README.md" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/LICENSE" "${release_stage}/"
|
||||
cp "${KUBE_ROOT}/Vagrantfile" "${release_stage}/"
|
||||
|
@ -89,22 +89,8 @@ if ! ($SED --version 2>&1 | grep -q GNU); then
|
||||
echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'."
|
||||
fi
|
||||
|
||||
echo "+++ Versioning documentation and examples"
|
||||
|
||||
# Update the docs to match this version.
|
||||
DOCS_TO_EDIT=(docs/README.md examples/README.md)
|
||||
for DOC in "${DOCS_TO_EDIT[@]}"; do
|
||||
$SED -ri \
|
||||
-e '/<!-- BEGIN STRIP_FOR_RELEASE -->/,/<!-- END STRIP_FOR_RELEASE -->/d' \
|
||||
-e "s/HEAD/${NEW_VERSION}/" \
|
||||
"${DOC}"
|
||||
done
|
||||
|
||||
# Update API descriptions to match this version.
|
||||
$SED -ri -e "s|(releases.k8s.io)/[^/]*|\1/${NEW_VERSION}|" pkg/api/v[0-9]*/types.go
|
||||
|
||||
${KUBE_ROOT}/hack/run-gendocs.sh
|
||||
${KUBE_ROOT}/hack/update-swagger-spec.sh
|
||||
echo "+++ Running ./versionize-docs"
|
||||
${KUBE_ROOT}/build/versionize-docs.sh ${NEW_VERSION}
|
||||
git commit -am "Versioning docs and examples for ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}"
|
||||
|
||||
dochash=$(git log -n1 --format=%H)
|
||||
@ -135,57 +121,24 @@ echo "+++ Committing version change"
|
||||
git add "${VERSION_FILE}"
|
||||
git commit -m "Kubernetes version ${NEW_VERSION}-dev"
|
||||
|
||||
echo "+++ Constructing backmerge branches"
|
||||
|
||||
function return_to_kansas {
|
||||
git checkout -f "${current_branch}"
|
||||
}
|
||||
trap return_to_kansas EXIT
|
||||
|
||||
backmerge="v${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-merge-to-master"
|
||||
backmergetmp="${backmerge}-tmp-$(date +%s)"
|
||||
|
||||
# Now we create a temporary branch to revert the doc commit, then
|
||||
# create the backmerge branch for the convenience of the user.
|
||||
git checkout -b "${backmergetmp}"
|
||||
git revert "${dochash}" --no-edit
|
||||
git checkout -b "${backmerge}" "${fetch_remote}/master"
|
||||
git merge -s recursive -X ours "${backmergetmp}" -m "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH} merge to master"
|
||||
|
||||
git checkout "${current_branch}"
|
||||
git branch -D "${backmergetmp}"
|
||||
|
||||
echo ""
|
||||
echo "Success you must now:"
|
||||
echo ""
|
||||
echo "- Push the tag:"
|
||||
echo " git push ${push_url} v${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}"
|
||||
echo " - Please note you are pushing the tag live BEFORE your PRs."
|
||||
echo " You need this so the builds pick up the right tag info."
|
||||
echo " You need this so the builds pick up the right tag info (and so your reviewers can see it)."
|
||||
echo " If something goes wrong further down please fix the tag!"
|
||||
echo " Either delete this tag and give up, fix the tag before your next PR,"
|
||||
echo " or find someone who can help solve the tag problem!"
|
||||
echo ""
|
||||
|
||||
if [[ "${VERSION_PATCH}" != "0" ]]; then
|
||||
echo "- Send branch: ${current_branch} as a PR to ${release_branch} <-- NOTE THIS"
|
||||
echo "- Get someone to review and merge that PR"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "- I created the branch ${backmerge} for you. What I don't know is if this is"
|
||||
echo " the latest version. If it is, AND ONLY IF IT IS, submit this branch as a pull"
|
||||
echo " request to master:"
|
||||
echo ""
|
||||
echo " git push <personal> ${backmerge}"
|
||||
echo ""
|
||||
echo " and get someone to approve that PR. I know this branch looks odd. The purpose of this"
|
||||
echo " branch is to get the tag for the version onto master for things like 'git describe'."
|
||||
echo ""
|
||||
echo " IF THIS IS NOT THE LATEST VERSION YOU WILL CAUSE TIME TO GO BACKWARDS. DON'T DO THAT, PLEASE."
|
||||
echo ""
|
||||
|
||||
if [[ "${VERSION_PATCH}" == "0" ]]; then
|
||||
echo "- Push the new release branch"
|
||||
echo "- Send branch: ${current_branch} as a PR to ${push_url}/master"
|
||||
echo " For major/minor releases, this gets the branch tag merged and changes the version numbers."
|
||||
echo "- Push the new release branch:"
|
||||
echo " git push ${push_url} ${current_branch}:${release_branch}"
|
||||
else
|
||||
echo "- Send branch: ${current_branch} as a PR to ${release_branch} <-- NOTE THIS"
|
||||
echo " Get someone to review and merge that PR"
|
||||
fi
|
||||
|
75
build/versionize-docs.sh
Executable file
75
build/versionize-docs.sh
Executable file
@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Updates the docs to be ready to be used as release docs for a particular
|
||||
# version.
|
||||
# Example usage:
|
||||
# ./versionize-docs.sh v1.0.1
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
NEW_VERSION=${1-}
|
||||
|
||||
if [ "$#" -lt 1 ]; then
|
||||
echo "Usage: versionize-docs <release-version>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SED=sed
|
||||
if which gsed &>/dev/null; then
|
||||
SED=gsed
|
||||
fi
|
||||
if ! ($SED --version 2>&1 | grep -q GNU); then
|
||||
echo "!!! GNU sed is required. If on OS X, use 'brew install gnu-sed'."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "+++ Versioning documentation and examples"
|
||||
|
||||
# Update the docs to match this version.
|
||||
HTML_PREVIEW_PREFIX="https://htmlpreview.github.io/\?https://github.com/GoogleCloudPlatform/kubernetes"
|
||||
|
||||
md_dirs=(docs examples)
|
||||
md_files=()
|
||||
for dir in "${md_dirs[@]}"; do
|
||||
mdfiles+=($( find "${dir}" -name "*.md" -type f ))
|
||||
done
|
||||
for doc in "${mdfiles[@]}"; do
|
||||
$SED -ri \
|
||||
-e '/<!-- BEGIN STRIP_FOR_RELEASE -->/,/<!-- END STRIP_FOR_RELEASE -->/d' \
|
||||
-e "s|(releases.k8s.io)/[^/]+|\1/${NEW_VERSION}|" \
|
||||
"${doc}"
|
||||
|
||||
# Replace /HEAD in html preview links with /NEW_VERSION.
|
||||
$SED -ri -e "s|(${HTML_PREVIEW_PREFIX}/HEAD)|${HTML_PREVIEW_PREFIX}/${NEW_VERSION}|" "${doc}"
|
||||
|
||||
is_versioned_tag="<!-- BEGIN MUNGE: IS_VERSIONED -->
|
||||
<!-- TAG IS_VERSIONED -->
|
||||
<!-- END MUNGE: IS_VERSIONED -->"
|
||||
if ! grep -q "${is_versioned_tag}" "${doc}"; then
|
||||
echo -e "\n\n${is_versioned_tag}\n\n" >> "${doc}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Update API descriptions to match this version.
|
||||
$SED -ri -e "s|(releases.k8s.io)/[^/]+|\1/${NEW_VERSION}|" pkg/api/v[0-9]*/types.go
|
||||
|
||||
${KUBE_ROOT}/hack/run-gendocs.sh
|
||||
${KUBE_ROOT}/hack/update-swagger-spec.sh
|
@ -21,7 +21,7 @@ manifests on the master server. But still, users are discouraged to do it
|
||||
on their own - they should rather wait for a new release of
|
||||
Kubernetes that will also contain new versions of add-ons.
|
||||
|
||||
Each add-on must specify the following label: ````kubernetes.io/cluster-service: true````.
|
||||
Each add-on must specify the following label: ```kubernetes.io/cluster-service: true```.
|
||||
Yaml files that do not define this label will be ignored.
|
||||
|
||||
The naming convention for Replication Controllers is
|
||||
|
@ -74,8 +74,8 @@ Supported environments offer the following config flags, which are used at
|
||||
cluster turn-up to create the SkyDNS pods and configure the kubelets. For
|
||||
example, see `cluster/gce/config-default.sh`.
|
||||
|
||||
```shell
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
```sh
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
@ -111,13 +111,12 @@ func (ks *kube2sky) writeSkyRecord(subdomain string, data string) error {
|
||||
}
|
||||
|
||||
// Generates skydns records for a headless service.
|
||||
func (ks *kube2sky) newHeadlessService(subdomain string, service *kapi.Service, isNewStyleFormat bool) error {
|
||||
func (ks *kube2sky) newHeadlessService(subdomain string, service *kapi.Service) error {
|
||||
// Create an A record for every pod in the service.
|
||||
// This record must be periodically updated.
|
||||
// Format is as follows:
|
||||
// For a service x, with pods a and b create DNS records,
|
||||
// a.x.ns.domain. and, b.x.ns.domain.
|
||||
// TODO: Handle multi-port services.
|
||||
ks.mlock.Lock()
|
||||
defer ks.mlock.Unlock()
|
||||
key, err := kcache.MetaNamespaceKeyFunc(service)
|
||||
@ -133,7 +132,7 @@ func (ks *kube2sky) newHeadlessService(subdomain string, service *kapi.Service,
|
||||
return nil
|
||||
}
|
||||
if e, ok := e.(*kapi.Endpoints); ok {
|
||||
return ks.generateRecordsForHeadlessService(subdomain, e, service, isNewStyleFormat)
|
||||
return ks.generateRecordsForHeadlessService(subdomain, e, service)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -148,7 +147,7 @@ func getSkyMsg(ip string, port int) *skymsg.Service {
|
||||
}
|
||||
}
|
||||
|
||||
func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi.Endpoints, svc *kapi.Service, isNewStyleFormat bool) error {
|
||||
func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi.Endpoints, svc *kapi.Service) error {
|
||||
for idx := range e.Subsets {
|
||||
for subIdx := range e.Subsets[idx].Addresses {
|
||||
b, err := json.Marshal(getSkyMsg(e.Subsets[idx].Addresses[subIdx].IP, 0))
|
||||
@ -163,15 +162,13 @@ func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi.
|
||||
if err := ks.writeSkyRecord(recordKey, recordValue); err != nil {
|
||||
return err
|
||||
}
|
||||
if isNewStyleFormat {
|
||||
for portIdx := range e.Subsets[idx].Ports {
|
||||
endpointPort := &e.Subsets[idx].Ports[portIdx]
|
||||
portSegment := buildPortSegmentString(endpointPort.Name, endpointPort.Protocol)
|
||||
if portSegment != "" {
|
||||
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, endpointPort.Port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for portIdx := range e.Subsets[idx].Ports {
|
||||
endpointPort := &e.Subsets[idx].Ports[portIdx]
|
||||
portSegment := buildPortSegmentString(endpointPort.Name, endpointPort.Protocol)
|
||||
if portSegment != "" {
|
||||
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, endpointPort.Port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -200,7 +197,7 @@ func (ks *kube2sky) getServiceFromEndpoints(e *kapi.Endpoints) (*kapi.Service, e
|
||||
return nil, fmt.Errorf("got a non service object in services store %v", obj)
|
||||
}
|
||||
|
||||
func (ks *kube2sky) addDNSUsingEndpoints(subdomain string, e *kapi.Endpoints, isNewStyleFormat bool) error {
|
||||
func (ks *kube2sky) addDNSUsingEndpoints(subdomain string, e *kapi.Endpoints) error {
|
||||
ks.mlock.Lock()
|
||||
defer ks.mlock.Unlock()
|
||||
svc, err := ks.getServiceFromEndpoints(e)
|
||||
@ -215,41 +212,29 @@ func (ks *kube2sky) addDNSUsingEndpoints(subdomain string, e *kapi.Endpoints, is
|
||||
if err := ks.removeDNS(subdomain); err != nil {
|
||||
return err
|
||||
}
|
||||
return ks.generateRecordsForHeadlessService(subdomain, e, svc, isNewStyleFormat)
|
||||
return ks.generateRecordsForHeadlessService(subdomain, e, svc)
|
||||
}
|
||||
|
||||
func (ks *kube2sky) handleEndpointAdd(obj interface{}) {
|
||||
if e, ok := obj.(*kapi.Endpoints); ok {
|
||||
name := buildDNSNameString(ks.domain, e.Namespace, e.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.addDNSUsingEndpoints(name, e, false) })
|
||||
name = buildDNSNameString(ks.domain, serviceSubdomain, e.Namespace, e.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.addDNSUsingEndpoints(name, e, true) })
|
||||
name := buildDNSNameString(ks.domain, serviceSubdomain, e.Namespace, e.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.addDNSUsingEndpoints(name, e) })
|
||||
}
|
||||
}
|
||||
|
||||
func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service, isNewStyleFormat bool) error {
|
||||
func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *kapi.Service) error {
|
||||
b, err := json.Marshal(getSkyMsg(service.Spec.ClusterIP, 0))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recordValue := string(b)
|
||||
recordKey := subdomain
|
||||
recordLabel := ""
|
||||
if isNewStyleFormat {
|
||||
recordLabel = getHash(recordValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
recordKey = buildDNSNameString(subdomain, recordLabel)
|
||||
}
|
||||
recordLabel := getHash(recordValue)
|
||||
recordKey := buildDNSNameString(subdomain, recordLabel)
|
||||
|
||||
glog.V(2).Infof("Setting DNS record: %v -> %q, with recordKey: %v\n", subdomain, recordValue, recordKey)
|
||||
if err := ks.writeSkyRecord(recordKey, recordValue); err != nil {
|
||||
return err
|
||||
}
|
||||
if !isNewStyleFormat {
|
||||
return nil
|
||||
}
|
||||
// Generate SRV Records
|
||||
for i := range service.Spec.Ports {
|
||||
port := &service.Spec.Ports[i]
|
||||
@ -290,15 +275,15 @@ func (ks *kube2sky) generateSRVRecord(subdomain, portSegment, recordName, cName
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service, isNewStyleFormat bool) error {
|
||||
func (ks *kube2sky) addDNS(subdomain string, service *kapi.Service) error {
|
||||
if len(service.Spec.Ports) == 0 {
|
||||
glog.Fatalf("unexpected service with no ports: %v", service)
|
||||
}
|
||||
// if ClusterIP is not set, a DNS entry should not be created
|
||||
if !kapi.IsServiceIPSet(service) {
|
||||
return ks.newHeadlessService(subdomain, service, isNewStyleFormat)
|
||||
return ks.newHeadlessService(subdomain, service)
|
||||
}
|
||||
return ks.generateRecordsForPortalService(subdomain, service, isNewStyleFormat)
|
||||
return ks.generateRecordsForPortalService(subdomain, service)
|
||||
}
|
||||
|
||||
// Implements retry logic for arbitrary mutator. Crashes after retrying for
|
||||
@ -345,19 +330,14 @@ func createEndpointsLW(kubeClient *kclient.Client) *kcache.ListWatch {
|
||||
|
||||
func (ks *kube2sky) newService(obj interface{}) {
|
||||
if s, ok := obj.(*kapi.Service); ok {
|
||||
//TODO(artfulcoder) stop adding and deleting old-format string for service
|
||||
name := buildDNSNameString(ks.domain, s.Namespace, s.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.addDNS(name, s, false) })
|
||||
name = buildDNSNameString(ks.domain, serviceSubdomain, s.Namespace, s.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.addDNS(name, s, true) })
|
||||
name := buildDNSNameString(ks.domain, serviceSubdomain, s.Namespace, s.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.addDNS(name, s) })
|
||||
}
|
||||
}
|
||||
|
||||
func (ks *kube2sky) removeService(obj interface{}) {
|
||||
if s, ok := obj.(*kapi.Service); ok {
|
||||
name := buildDNSNameString(ks.domain, s.Namespace, s.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.removeDNS(name) })
|
||||
name = buildDNSNameString(ks.domain, serviceSubdomain, s.Namespace, s.Name)
|
||||
name := buildDNSNameString(ks.domain, serviceSubdomain, s.Namespace, s.Name)
|
||||
ks.mutateEtcdOrDie(func() error { return ks.removeDNS(name) })
|
||||
}
|
||||
}
|
||||
|
@ -94,11 +94,7 @@ func newKube2Sky(ec etcdClient) *kube2sky {
|
||||
}
|
||||
}
|
||||
|
||||
func getEtcdOldStylePath(name, namespace string) string {
|
||||
return path.Join(basePath, namespace, name)
|
||||
}
|
||||
|
||||
func getEtcdNewStylePath(name, namespace string) string {
|
||||
func getEtcdPathForA(name, namespace string) string {
|
||||
return path.Join(basePath, serviceSubDomain, namespace, name)
|
||||
}
|
||||
|
||||
@ -125,18 +121,11 @@ func getHostPortFromString(data string) (*hostPort, error) {
|
||||
}
|
||||
|
||||
func assertDnsServiceEntryInEtcd(t *testing.T, ec *fakeEtcdClient, serviceName, namespace string, expectedHostPort *hostPort) {
|
||||
oldStyleKey := getEtcdOldStylePath(serviceName, namespace)
|
||||
values := ec.Get(oldStyleKey)
|
||||
require.True(t, len(values) > 0, fmt.Sprintf("oldStyleKey '%s' not found.", oldStyleKey))
|
||||
actualHostPort, err := getHostPortFromString(values[0])
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedHostPort.Host, actualHostPort.Host)
|
||||
|
||||
newStyleKey := getEtcdNewStylePath(serviceName, namespace)
|
||||
values = ec.Get(newStyleKey)
|
||||
key := getEtcdPathForA(serviceName, namespace)
|
||||
values := ec.Get(key)
|
||||
//require.True(t, exists)
|
||||
require.True(t, len(values) > 0, "newStyleKey entry not found.")
|
||||
actualHostPort, err = getHostPortFromString(values[0])
|
||||
require.True(t, len(values) > 0, "entry not found.")
|
||||
actualHostPort, err := getHostPortFromString(values[0])
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedHostPort.Host, actualHostPort.Host)
|
||||
}
|
||||
@ -230,9 +219,8 @@ func TestHeadlessService(t *testing.T) {
|
||||
assert.NoError(t, k2s.servicesStore.Add(&service))
|
||||
endpoints := newEndpoints(service, newSubsetWithOnePort("", 80, "10.0.0.1", "10.0.0.2"), newSubsetWithOnePort("", 8080, "10.0.0.3", "10.0.0.4"))
|
||||
|
||||
// We expect 4 records with "svc" subdomain and 4 records without
|
||||
// "svc" subdomain.
|
||||
expectedDNSRecords := 8
|
||||
// We expect 4 records.
|
||||
expectedDNSRecords := 4
|
||||
assert.NoError(t, k2s.endpointsStore.Add(&endpoints))
|
||||
k2s.newService(&service)
|
||||
assert.Equal(t, expectedDNSRecords, len(ec.writes))
|
||||
@ -251,9 +239,8 @@ func TestHeadlessServiceWithNamedPorts(t *testing.T) {
|
||||
assert.NoError(t, k2s.servicesStore.Add(&service))
|
||||
endpoints := newEndpoints(service, newSubsetWithTwoPorts("http1", 80, "http2", 81, "10.0.0.1", "10.0.0.2"), newSubsetWithOnePort("https", 443, "10.0.0.3", "10.0.0.4"))
|
||||
|
||||
// We expect 14 records. 6 SRV records. 4 POD entries with old style, 4 POD entries with new style
|
||||
// "svc" subdomain.
|
||||
expectedDNSRecords := 14
|
||||
// We expect 10 records. 6 SRV records. 4 POD records.
|
||||
expectedDNSRecords := 10
|
||||
assert.NoError(t, k2s.endpointsStore.Add(&endpoints))
|
||||
k2s.newService(&service)
|
||||
assert.Equal(t, expectedDNSRecords, len(ec.writes))
|
||||
@ -263,8 +250,8 @@ func TestHeadlessServiceWithNamedPorts(t *testing.T) {
|
||||
|
||||
endpoints.Subsets = endpoints.Subsets[:1]
|
||||
k2s.handleEndpointAdd(&endpoints)
|
||||
// We expect 8 records. 4 SRV records. 2 POD entries with old style, 2 POD entries with new style
|
||||
expectedDNSRecords = 8
|
||||
// We expect 6 records. 4 SRV records. 2 POD records.
|
||||
expectedDNSRecords = 6
|
||||
assert.Equal(t, expectedDNSRecords, len(ec.writes))
|
||||
assertSRVEntryInEtcd(t, ec, "http1", "tcp", testService, testNamespace, 80, 2)
|
||||
assertSRVEntryInEtcd(t, ec, "http2", "tcp", testService, testNamespace, 81, 2)
|
||||
@ -284,14 +271,14 @@ func TestHeadlessServiceEndpointsUpdate(t *testing.T) {
|
||||
assert.NoError(t, k2s.servicesStore.Add(&service))
|
||||
endpoints := newEndpoints(service, newSubsetWithOnePort("", 80, "10.0.0.1", "10.0.0.2"))
|
||||
|
||||
expectedDNSRecords := 4
|
||||
expectedDNSRecords := 2
|
||||
assert.NoError(t, k2s.endpointsStore.Add(&endpoints))
|
||||
k2s.newService(&service)
|
||||
assert.Equal(t, expectedDNSRecords, len(ec.writes))
|
||||
endpoints.Subsets = append(endpoints.Subsets,
|
||||
newSubsetWithOnePort("", 8080, "10.0.0.3", "10.0.0.4"),
|
||||
)
|
||||
expectedDNSRecords = 8
|
||||
expectedDNSRecords = 4
|
||||
k2s.handleEndpointAdd(&endpoints)
|
||||
|
||||
assert.Equal(t, expectedDNSRecords, len(ec.writes))
|
||||
@ -315,9 +302,8 @@ func TestHeadlessServiceWithDelayedEndpointsAddition(t *testing.T) {
|
||||
|
||||
// Add an endpoints object for the service.
|
||||
endpoints := newEndpoints(service, newSubsetWithOnePort("", 80, "10.0.0.1", "10.0.0.2"), newSubsetWithOnePort("", 8080, "10.0.0.3", "10.0.0.4"))
|
||||
// We expect 4 records with "svc" subdomain and 4 records without
|
||||
// "svc" subdomain.
|
||||
expectedDNSRecords := 8
|
||||
// We expect 4 records.
|
||||
expectedDNSRecords := 4
|
||||
k2s.handleEndpointAdd(&endpoints)
|
||||
assert.Equal(t, expectedDNSRecords, len(ec.writes))
|
||||
}
|
||||
@ -347,7 +333,7 @@ func TestUpdateSinglePortService(t *testing.T) {
|
||||
k2s := newKube2Sky(ec)
|
||||
service := newService(testNamespace, testService, "1.2.3.4", "", 0)
|
||||
k2s.newService(&service)
|
||||
assert.Len(t, ec.writes, 2)
|
||||
assert.Len(t, ec.writes, 1)
|
||||
newService := service
|
||||
newService.Spec.ClusterIP = "0.0.0.0"
|
||||
k2s.updateService(&service, &newService)
|
||||
@ -365,9 +351,7 @@ func TestDeleteSinglePortService(t *testing.T) {
|
||||
service := newService(testNamespace, testService, "1.2.3.4", "", 80)
|
||||
// Add the service
|
||||
k2s.newService(&service)
|
||||
// two entries should get created, one with the svc subdomain (new-style)
|
||||
// , and one without the svc subdomain (old-style)
|
||||
assert.Len(t, ec.writes, 2)
|
||||
assert.Len(t, ec.writes, 1)
|
||||
// Delete the service
|
||||
k2s.removeService(&service)
|
||||
assert.Empty(t, ec.writes)
|
||||
@ -387,7 +371,7 @@ func TestServiceWithNamePort(t *testing.T) {
|
||||
expectedValue := getHostPort(&service)
|
||||
assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue)
|
||||
assertSRVEntryInEtcd(t, ec, "http1", "tcp", testService, testNamespace, 80, 1)
|
||||
assert.Len(t, ec.writes, 3)
|
||||
assert.Len(t, ec.writes, 2)
|
||||
|
||||
// update service
|
||||
newService := service
|
||||
@ -396,7 +380,7 @@ func TestServiceWithNamePort(t *testing.T) {
|
||||
expectedValue = getHostPort(&newService)
|
||||
assertDnsServiceEntryInEtcd(t, ec, testService, testNamespace, expectedValue)
|
||||
assertSRVEntryInEtcd(t, ec, "http2", "tcp", testService, testNamespace, 80, 1)
|
||||
assert.Len(t, ec.writes, 3)
|
||||
assert.Len(t, ec.writes, 2)
|
||||
|
||||
// Delete the service
|
||||
k2s.removeService(&service)
|
||||
|
@ -1,22 +1,22 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v6
|
||||
name: kube-dns-v8
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v6
|
||||
version: v8
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: {{ pillar['dns_replicas'] }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v6
|
||||
version: v8
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v6
|
||||
version: v8
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
@ -28,12 +28,17 @@ spec:
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -data-dir
|
||||
- /var/etcd/data
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
volumeMounts:
|
||||
- name: etcd-storage
|
||||
mountPath: /var/etcd/data
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.11
|
||||
resources:
|
||||
@ -61,4 +66,26 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
- name: healthz
|
||||
image: gcr.io/google_containers/exechealthz:1.0
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} localhost >/dev/null
|
||||
- -port=8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: etcd-storage
|
||||
emptyDir: {}
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
|
@ -17,9 +17,6 @@ RUN apt-get -q update && \
|
||||
apt-get clean && \
|
||||
curl -s https://storage.googleapis.com/signals-agents/logging/google-fluentd-install.sh | sudo bash
|
||||
|
||||
# Update gem for fluent-plugin-google-cloud
|
||||
RUN /usr/sbin/google-fluentd-gem update fluent-plugin-google-cloud
|
||||
|
||||
# Copy the Fluentd configuration file for logging Docker container logs.
|
||||
COPY google-fluentd.conf /etc/google-fluentd/google-fluentd.conf
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
.PHONY: kbuild kpush
|
||||
|
||||
TAG = 1.8
|
||||
TAG = 1.9
|
||||
|
||||
# Rules for building the test image for deployment to Dockerhub with user kubernetes.
|
||||
|
||||
|
@ -72,7 +72,7 @@ if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
|
||||
fi
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
@ -68,7 +68,7 @@ if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
|
||||
fi
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
@ -117,6 +117,13 @@ function get_instance_public_ip {
|
||||
--query Reservations[].Instances[].NetworkInterfaces[0].Association.PublicIp
|
||||
}
|
||||
|
||||
function get_instance_private_ip {
|
||||
local instance_id=$1
|
||||
$AWS_CMD --output text describe-instances \
|
||||
--instance-ids ${instance_id} \
|
||||
--query Reservations[].Instances[].NetworkInterfaces[0].PrivateIpAddress
|
||||
}
|
||||
|
||||
# Gets a security group id, by name ($1)
|
||||
function get_security_group_id {
|
||||
local name=$1
|
||||
|
@ -80,7 +80,9 @@ function create-kubeconfig() {
|
||||
fi
|
||||
|
||||
"${kubectl}" config set-cluster "${CONTEXT}" "${cluster_args[@]}"
|
||||
"${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}"
|
||||
if [[ -n "${user_args[@]:-}" ]]; then
|
||||
"${kubectl}" config set-credentials "${CONTEXT}" "${user_args[@]}"
|
||||
fi
|
||||
"${kubectl}" config set-context "${CONTEXT}" --cluster="${CONTEXT}" --user="${CONTEXT}"
|
||||
"${kubectl}" config use-context "${CONTEXT}" --cluster="${CONTEXT}"
|
||||
|
||||
|
@ -27,9 +27,9 @@ MINION_DISK_TYPE=pd-standard
|
||||
MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB}
|
||||
|
||||
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20150611}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20150715}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
|
||||
MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-container-vm-v20150611}
|
||||
MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-container-vm-v20150715}
|
||||
MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-google-containers}
|
||||
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
|
||||
RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5}
|
||||
@ -41,16 +41,17 @@ MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
MINION_TAG="${INSTANCE_PREFIX}-minion"
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.244.0.0/16}"
|
||||
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/monitoring" "https://www.googleapis.com/auth/logging.write")
|
||||
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
|
||||
|
||||
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
|
||||
POLL_SLEEP_INTERVAL=3
|
||||
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||
ALLOCATE_NODE_CIDRS=true
|
||||
|
||||
# Optional: Cluster monitoring to setup as part of the cluster bring up:
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
# googleinfluxdb - Enable influxdb and google (except GCM)
|
||||
# standalone - Heapster only. Metrics available via Heapster REST API.
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-googleinfluxdb}"
|
||||
@ -69,11 +70,20 @@ if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
|
||||
fi
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
||||
# Optional: Create autoscaler for cluster's nodes.
|
||||
# NOT WORKING YET!
|
||||
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
|
||||
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
|
||||
fi
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
|
||||
|
||||
|
@ -28,9 +28,9 @@ MINION_DISK_SIZE=${MINION_DISK_SIZE:-100GB}
|
||||
KUBE_APISERVER_REQUEST_TIMEOUT=300
|
||||
|
||||
OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION:-debian}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20150611}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20150715}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
|
||||
MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-container-vm-v20150611}
|
||||
MINION_IMAGE=${KUBE_GCE_MINION_IMAGE:-container-vm-v20150715}
|
||||
MINION_IMAGE_PROJECT=${KUBE_GCE_MINION_PROJECT:-google-containers}
|
||||
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
|
||||
RKT_VERSION=${KUBE_RKT_VERSION:-0.5.5}
|
||||
@ -42,7 +42,7 @@ MASTER_TAG="${INSTANCE_PREFIX}-master"
|
||||
MINION_TAG="${INSTANCE_PREFIX}-minion"
|
||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.245.0.0/16}"
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
MINION_SCOPES=("storage-ro" "compute-rw" "https://www.googleapis.com/auth/logging.write" "https://www.googleapis.com/auth/monitoring")
|
||||
MINION_SCOPES="${MINION_SCOPES:-compute-rw,monitoring,logging-write,storage-ro}"
|
||||
# Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default.
|
||||
POLL_SLEEP_INTERVAL=3
|
||||
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||
@ -67,11 +67,20 @@ if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
|
||||
fi
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
||||
# Optional: Create autoscaler for cluster's nodes.
|
||||
# NOT WORKING YET!
|
||||
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
|
||||
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
|
||||
fi
|
||||
|
||||
ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
|
||||
|
||||
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
|
||||
|
@ -331,24 +331,16 @@ function create-salt-master-auth() {
|
||||
fi
|
||||
}
|
||||
|
||||
# TODO(roberthbailey): Remove the insecure kubeconfig configuration files
|
||||
# once the certs are being plumbed through for GKE.
|
||||
function create-salt-node-auth() {
|
||||
if [[ ! -e /srv/kubernetes/ca.crt ]]; then
|
||||
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
mkdir -p /srv/kubernetes
|
||||
(umask 077;
|
||||
echo "${CA_CERT}" | base64 -d > /srv/kubernetes/ca.crt;
|
||||
echo "${KUBELET_CERT}" | base64 -d > /srv/kubernetes/kubelet.crt;
|
||||
echo "${KUBELET_KEY}" | base64 -d > /srv/kubernetes/kubelet.key)
|
||||
fi
|
||||
fi
|
||||
kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
# This should happen both on cluster initialization and node upgrades.
|
||||
#
|
||||
# - Uses CA_CERT, KUBELET_CERT, and KUBELET_KEY to generate a kubeconfig file
|
||||
# for the kubelet to securely connect to the apiserver.
|
||||
function create-salt-kubelet-auth() {
|
||||
local -r kubelet_kubeconfig_file="/srv/salt-overlay/salt/kubelet/kubeconfig"
|
||||
if [ ! -e "${kubelet_kubeconfig_file}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${KUBELET_CERT:-}" ]] && [[ ! -z "${KUBELET_KEY:-}" ]]; then
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
@ -368,35 +360,18 @@ contexts:
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
)
|
||||
else
|
||||
(umask 077;
|
||||
cat > "${kubelet_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
token: ${KUBELET_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kubelet
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
)
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
|
||||
# This should happen both on cluster initialization and node upgrades.
|
||||
#
|
||||
# - Uses the CA_CERT and KUBE_PROXY_TOKEN to generate a kubeconfig file for
|
||||
# the kube-proxy to securely connect to the apiserver.
|
||||
function create-salt-kubeproxy-auth() {
|
||||
local -r kube_proxy_kubeconfig_file="/srv/salt-overlay/salt/kube-proxy/kubeconfig"
|
||||
if [ ! -e "${kube_proxy_kubeconfig_file}" ]; then
|
||||
mkdir -p /srv/salt-overlay/salt/kube-proxy
|
||||
if [[ ! -z "${CA_CERT:-}" ]]; then
|
||||
(umask 077;
|
||||
(umask 077;
|
||||
cat > "${kube_proxy_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
@ -416,28 +391,6 @@ contexts:
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
)
|
||||
else
|
||||
(umask 077;
|
||||
cat > "${kube_proxy_kubeconfig_file}" <<EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: ${KUBE_PROXY_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-proxy
|
||||
name: service-account-context
|
||||
current-context: service-account-context
|
||||
EOF
|
||||
)
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@ -598,7 +551,8 @@ if [[ -z "${is_push}" ]]; then
|
||||
if [[ "${KUBERNETES_MASTER}" == "true" ]]; then
|
||||
create-salt-master-auth
|
||||
else
|
||||
create-salt-node-auth
|
||||
create-salt-kubelet-auth
|
||||
create-salt-kubeproxy-auth
|
||||
fi
|
||||
download-release
|
||||
configure-salt
|
||||
|
2
cluster/gce/coreos/helper.sh
Normal file → Executable file
2
cluster/gce/coreos/helper.sh
Normal file → Executable file
@ -140,7 +140,7 @@ function create-node-instance-template {
|
||||
if [[ -n ${1:-} ]]; then
|
||||
suffix="-${1}"
|
||||
fi
|
||||
create-node-template "${NODE_INSTANCE_PREFIX}-template${suffix}" "${scope_flags[*]}" \
|
||||
create-node-template "${NODE_INSTANCE_PREFIX}-template${suffix}" "${scope_flags}" \
|
||||
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
||||
"user-data=${KUBE_ROOT}/cluster/gce/coreos/node.yaml"
|
||||
}
|
||||
|
2
cluster/gce/debian/helper.sh
Normal file → Executable file
2
cluster/gce/debian/helper.sh
Normal file → Executable file
@ -120,7 +120,7 @@ function create-node-instance-template {
|
||||
if [[ -n ${1:-} ]]; then
|
||||
suffix="-${1}"
|
||||
fi
|
||||
create-node-template "${NODE_INSTANCE_PREFIX}-template${suffix}" "${scope_flags[*]}" \
|
||||
create-node-template "${NODE_INSTANCE_PREFIX}-template${suffix}" "${scope_flags}" \
|
||||
"startup-script=${KUBE_ROOT}/cluster/gce/configure-vm.sh" \
|
||||
"kube-env=${KUBE_TEMP}/node-kube-env.yaml"
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ fi
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
function usage() {
|
||||
echo "!!! EXPERIMENTAL !!!"
|
||||
@ -178,11 +178,11 @@ function prepare-node-upgrade() {
|
||||
detect-minion-names
|
||||
|
||||
# TODO(mbforbes): Refactor setting scope flags.
|
||||
local -a scope_flags=()
|
||||
if (( "${#MINION_SCOPES[@]}" > 0 )); then
|
||||
scope_flags=("--scopes" "$(join_csv ${MINION_SCOPES[@]})")
|
||||
local scope_flags=
|
||||
if [ -n "${MINION_SCOPES}" ]; then
|
||||
scope_flags="--scopes ${MINION_SCOPES}"
|
||||
else
|
||||
scope_flags=("--no-scopes")
|
||||
scope_flags="--no-scopes"
|
||||
fi
|
||||
|
||||
# Get required node env vars from exiting template.
|
||||
@ -212,8 +212,15 @@ function do-node-upgrade() {
|
||||
echo "== Upgrading nodes to ${KUBE_VERSION}. ==" >&2
|
||||
# Do the actual upgrade.
|
||||
# NOTE(mbforbes): If you are changing this gcloud command, update
|
||||
# test/e2e/restart.go to match this EXACTLY.
|
||||
gcloud preview rolling-updates \
|
||||
# test/e2e/cluster_upgrade.go to match this EXACTLY.
|
||||
# TODO(mbforbes): Remove this hack on July 29, 2015, when the migration to
|
||||
# `gcloud alpha compute rolling-updates` is complete.
|
||||
local subgroup="preview"
|
||||
local exists=$(gcloud ${subgroup} rolling-updates -h &>/dev/null; echo $?) || true
|
||||
if [[ "${exists}" != "0" ]]; then
|
||||
subgroup="alpha compute"
|
||||
fi
|
||||
gcloud ${subgroup} rolling-updates \
|
||||
--project="${PROJECT}" \
|
||||
--zone="${ZONE}" \
|
||||
start \
|
||||
|
@ -544,9 +544,9 @@ function write-node-env {
|
||||
function create-certs {
|
||||
local -r cert_ip="${1}"
|
||||
|
||||
local octects=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g'))
|
||||
((octects[3]+=1))
|
||||
local -r service_ip=$(echo "${octects[*]}" | sed 's/ /./g')
|
||||
local octets=($(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e 's|/.*||' -e 's/\./ /g'))
|
||||
((octets[3]+=1))
|
||||
local -r service_ip=$(echo "${octets[*]}" | sed 's/ /./g')
|
||||
local -r sans="IP:${cert_ip},IP:${service_ip},DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.${DNS_DOMAIN},DNS:${MASTER_NAME}"
|
||||
|
||||
# Note: This was heavily cribbed from make-ca-cert.sh
|
||||
@ -685,11 +685,11 @@ function kube-up {
|
||||
echo "Creating minions."
|
||||
|
||||
# TODO(mbforbes): Refactor setting scope flags.
|
||||
local -a scope_flags=()
|
||||
if (( "${#MINION_SCOPES[@]}" > 0 )); then
|
||||
scope_flags=("--scopes" "$(join_csv ${MINION_SCOPES[@]})")
|
||||
local scope_flags=
|
||||
if [ -n "${MINION_SCOPES}" ]; then
|
||||
scope_flags="--scopes ${MINION_SCOPES}"
|
||||
else
|
||||
scope_flags=("--no-scopes")
|
||||
scope_flags="--no-scopes"
|
||||
fi
|
||||
|
||||
write-node-env
|
||||
@ -708,6 +708,18 @@ function kube-up {
|
||||
detect-minion-names
|
||||
detect-master
|
||||
|
||||
# Create autoscaler for nodes if requested
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||
METRICS=""
|
||||
METRICS+="--custom-metric-utilization metric=custom.cloudmonitoring.googleapis.com/kubernetes.io/cpu/node_utilization,"
|
||||
METRICS+="utilization-target=${TARGET_NODE_UTILIZATION},utilization-target-type=GAUGE "
|
||||
METRICS+="--custom-metric-utilization metric=custom.cloudmonitoring.googleapis.com/kubernetes.io/memory/node_utilization,"
|
||||
METRICS+="utilization-target=${TARGET_NODE_UTILIZATION},utilization-target-type=GAUGE "
|
||||
echo "Creating node autoscaler."
|
||||
gcloud preview autoscaler --zone "${ZONE}" create "${NODE_INSTANCE_PREFIX}-autoscaler" --target "${NODE_INSTANCE_PREFIX}-group" \
|
||||
--min-num-replicas "${AUTOSCALER_MIN_NODES}" --max-num-replicas "${AUTOSCALER_MAX_NODES}" ${METRICS} || true
|
||||
fi
|
||||
|
||||
echo "Waiting for cluster initialization."
|
||||
echo
|
||||
echo " This will continually check to see if the API for kubernetes is reachable."
|
||||
@ -769,6 +781,20 @@ function kube-down {
|
||||
echo "Bringing down cluster"
|
||||
set +e # Do not stop on error
|
||||
|
||||
# Delete autoscaler for nodes if present.
|
||||
local autoscaler
|
||||
autoscaler=( $(gcloud preview autoscaler --zone "${ZONE}" list \
|
||||
| awk 'NR >= 2 { print $1 }' \
|
||||
| grep "${NODE_INSTANCE_PREFIX}-autoscaler") )
|
||||
if [[ "${autoscaler:-}" != "" ]]; then
|
||||
gcloud preview autoscaler --zone "${ZONE}" delete "${NODE_INSTANCE_PREFIX}-autoscaler"
|
||||
fi
|
||||
|
||||
# Get the name of the managed instance group template before we delete the
|
||||
# managed instange group. (The name of the managed instnace group template may
|
||||
# change during a cluster upgrade.)
|
||||
local template=$(get-template "${PROJECT}" "${ZONE}" "${NODE_INSTANCE_PREFIX}-group")
|
||||
|
||||
# The gcloud APIs don't return machine parseable error codes/retry information. Therefore the best we can
|
||||
# do is parse the output and special case particular responses we are interested in.
|
||||
if gcloud preview managed-instance-groups --project "${PROJECT}" --zone "${ZONE}" describe "${NODE_INSTANCE_PREFIX}-group" &>/dev/null; then
|
||||
@ -792,11 +818,11 @@ function kube-down {
|
||||
fi
|
||||
fi
|
||||
|
||||
if gcloud compute instance-templates describe --project "${PROJECT}" "${NODE_INSTANCE_PREFIX}-template" &>/dev/null; then
|
||||
if gcloud compute instance-templates describe --project "${PROJECT}" "${template}" &>/dev/null; then
|
||||
gcloud compute instance-templates delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
"${NODE_INSTANCE_PREFIX}-template"
|
||||
"${template}"
|
||||
fi
|
||||
|
||||
# First delete the master (if it exists).
|
||||
@ -886,6 +912,22 @@ function kube-down {
|
||||
set -e
|
||||
}
|
||||
|
||||
# Gets the instance template for the managed instance group with the provided
|
||||
# project, zone, and group name. It echos the template name so that the function
|
||||
# output can be used.
|
||||
#
|
||||
# $1: project
|
||||
# $2: zone
|
||||
# $3: managed instance group name
|
||||
function get-template {
|
||||
# url is set to https://www.googleapis.com/compute/v1/projects/$1/global/instanceTemplates/<template>
|
||||
local url=$(gcloud preview managed-instance-groups --project="${1}" --zone="${2}" describe "${3}" | grep instanceTemplate)
|
||||
# template is set to <template> (the pattern strips off all but last slash)
|
||||
local template="${url##*/}"
|
||||
echo "${template}"
|
||||
}
|
||||
|
||||
|
||||
# Checks if there are any present resources related kubernetes cluster.
|
||||
#
|
||||
# Assumed vars:
|
||||
@ -894,7 +936,6 @@ function kube-down {
|
||||
# ZONE
|
||||
# Vars set:
|
||||
# KUBE_RESOURCE_FOUND
|
||||
|
||||
function check-resources {
|
||||
detect-project
|
||||
|
||||
@ -987,11 +1028,11 @@ function prepare-push() {
|
||||
write-node-env
|
||||
|
||||
# TODO(mbforbes): Refactor setting scope flags.
|
||||
local -a scope_flags=()
|
||||
if (( "${#MINION_SCOPES[@]}" > 0 )); then
|
||||
scope_flags=("--scopes" "${MINION_SCOPES[@]}")
|
||||
local scope_flags=
|
||||
if [ -n "${MINION_SCOPES}" ]; then
|
||||
scope_flags="--scopes ${MINION_SCOPES}"
|
||||
else
|
||||
scope_flags=("--no-scopes")
|
||||
scope_flags="--no-scopes"
|
||||
fi
|
||||
|
||||
# Ugly hack: Since it is not possible to delete instance-template that is currently
|
||||
|
@ -30,6 +30,7 @@ GCLOUD="${GCLOUD:-gcloud}"
|
||||
CMD_GROUP="${CMD_GROUP:-alpha}"
|
||||
GCLOUD_CONFIG_DIR="${GCLOUD_CONFIG_DIR:-${HOME}/.config/gcloud/kubernetes}"
|
||||
ENABLE_CLUSTER_DNS=false
|
||||
MINION_SCOPES="${MINION_SCOPES:-"compute-rw,storage-ro"}"
|
||||
|
||||
# This is a hack, but I keep setting this when I run commands manually, and
|
||||
# then things grossly fail during normal runs because cluster/kubecfg.sh and
|
||||
|
@ -29,7 +29,7 @@ source "${KUBE_ROOT}/cluster/gke/${KUBE_CONFIG_FILE:-config-default.sh}"
|
||||
# Assumed vars:
|
||||
# GCLOUD
|
||||
function prepare-e2e() {
|
||||
echo "... in prepare-e2e()" >&2
|
||||
echo "... in gke:prepare-e2e()" >&2
|
||||
|
||||
# Ensure GCLOUD is set to some gcloud binary.
|
||||
if [[ -z "${GCLOUD:-}" ]]; then
|
||||
@ -48,28 +48,28 @@ function prepare-e2e() {
|
||||
# Vars set:
|
||||
# PROJECT
|
||||
function detect-project() {
|
||||
echo "... in detect-project()" >&2
|
||||
echo "... in gke:detect-project()" >&2
|
||||
if [[ -z "${PROJECT:-}" ]]; then
|
||||
export PROJECT=$("${GCLOUD}" config list project | tail -n 1 | cut -f 3 -d ' ')
|
||||
echo "... Using project: ${PROJECT}" >&2
|
||||
fi
|
||||
|
||||
if [[ -z "${PROJECT:-}" ]]; then
|
||||
echo "Could not detect Google Cloud Platform project. Set the default project using " >&2
|
||||
echo "'gcloud config set project <PROJECT>'" >&2
|
||||
exit 1
|
||||
fi
|
||||
echo "Project: ${PROJECT}" >&2
|
||||
}
|
||||
|
||||
# Execute prior to running tests to build a release if required for env.
|
||||
function test-build-release() {
|
||||
echo "... in test-build-release()" >&2
|
||||
echo "... in gke:test-build-release()" >&2
|
||||
# We currently use the Kubernetes version that GKE supports (not testing
|
||||
# bleeding-edge builds).
|
||||
}
|
||||
|
||||
# Verify needed binaries exist.
|
||||
function verify-prereqs() {
|
||||
echo "... in gke:verify-prereqs()" >&2
|
||||
if ! which gcloud >/dev/null; then
|
||||
local resp
|
||||
if [[ "${KUBE_PROMPT_FOR_UPDATE}" == "y" ]]; then
|
||||
@ -112,8 +112,9 @@ function verify-prereqs() {
|
||||
# ZONE
|
||||
# CLUSTER_API_VERSION (optional)
|
||||
# NUM_MINIONS
|
||||
# MINION_SCOPES
|
||||
function kube-up() {
|
||||
echo "... in kube-up()" >&2
|
||||
echo "... in gke:kube-up()" >&2
|
||||
detect-project >&2
|
||||
|
||||
# Make the specified network if we need to.
|
||||
@ -121,7 +122,7 @@ function kube-up() {
|
||||
echo "Creating new network: ${NETWORK}" >&2
|
||||
"${GCLOUD}" compute networks create "${NETWORK}" --project="${PROJECT}" --range "${NETWORK_RANGE}"
|
||||
else
|
||||
echo "Using network: ${NETWORK}" >&2
|
||||
echo "... Using network: ${NETWORK}" >&2
|
||||
fi
|
||||
|
||||
# Allow SSH on all nodes in the network. This doesn't actually check whether
|
||||
@ -134,7 +135,7 @@ function kube-up() {
|
||||
--project="${PROJECT}" \
|
||||
--source-ranges="0.0.0.0/0"
|
||||
else
|
||||
echo "Using firewall-rule: ${FIREWALL_SSH}" >&2
|
||||
echo "... Using firewall-rule: ${FIREWALL_SSH}" >&2
|
||||
fi
|
||||
|
||||
local create_args=(
|
||||
@ -142,6 +143,7 @@ function kube-up() {
|
||||
"--project=${PROJECT}"
|
||||
"--num-nodes=${NUM_MINIONS}"
|
||||
"--network=${NETWORK}"
|
||||
"--scopes=${MINION_SCOPES}"
|
||||
)
|
||||
if [[ ! -z "${DOGFOOD_GCLOUD:-}" ]]; then
|
||||
create_args+=("--cluster-version=${CLUSTER_API_VERSION:-}")
|
||||
@ -164,7 +166,7 @@ function kube-up() {
|
||||
# Vars set:
|
||||
# MINION_TAG
|
||||
function test-setup() {
|
||||
echo "... in test-setup()" >&2
|
||||
echo "... in gke:test-setup()" >&2
|
||||
# Detect the project into $PROJECT if it isn't set
|
||||
detect-project >&2
|
||||
detect-minions >&2
|
||||
@ -198,7 +200,7 @@ function test-setup() {
|
||||
# KUBE_USER
|
||||
# KUBE_PASSWORD
|
||||
function get-password() {
|
||||
echo "... in get-password()" >&2
|
||||
echo "... in gke:get-password()" >&2
|
||||
detect-project >&2
|
||||
KUBE_USER=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \
|
||||
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
|
||||
@ -217,7 +219,7 @@ function get-password() {
|
||||
# KUBE_MASTER
|
||||
# KUBE_MASTER_IP
|
||||
function detect-master() {
|
||||
echo "... in detect-master()" >&2
|
||||
echo "... in gke:detect-master()" >&2
|
||||
detect-project >&2
|
||||
KUBE_MASTER="k8s-${CLUSTER_NAME}-master"
|
||||
KUBE_MASTER_IP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \
|
||||
@ -230,7 +232,7 @@ function detect-master() {
|
||||
# Vars set:
|
||||
# MINION_NAMES
|
||||
function detect-minions() {
|
||||
echo "... in detect-minions()" >&2
|
||||
echo "... in gke:detect-minions()" >&2
|
||||
detect-minion-names
|
||||
}
|
||||
|
||||
@ -241,10 +243,11 @@ function detect-minions() {
|
||||
# Vars set:
|
||||
# MINION_NAMES
|
||||
function detect-minion-names {
|
||||
echo "... in gke:detect-minion-names()" >&2
|
||||
detect-project
|
||||
detect-node-instance-group
|
||||
MINION_NAMES=($(gcloud preview --project "${PROJECT}" instance-groups \
|
||||
--zone "${ZONE}" instances --group "${NODE_INSTANCE_GROUP}" list \
|
||||
--zone "${ZONE}" instances --group "${NODE_INSTANCE_GROUP}" list --quiet \
|
||||
| cut -d'/' -f11))
|
||||
echo "MINION_NAMES=${MINION_NAMES[*]}"
|
||||
}
|
||||
@ -259,6 +262,7 @@ function detect-minion-names {
|
||||
# Vars set:
|
||||
# NODE_INSTANCE_GROUP
|
||||
function detect-node-instance-group {
|
||||
echo "... in gke:detect-node-instance-group()" >&2
|
||||
NODE_INSTANCE_GROUP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \
|
||||
--project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \
|
||||
| grep instanceGroupManagers | cut -d '/' -f 11)
|
||||
@ -270,7 +274,7 @@ function detect-node-instance-group {
|
||||
# GCLOUD
|
||||
# ZONE
|
||||
function ssh-to-node() {
|
||||
echo "... in ssh-to-node()" >&2
|
||||
echo "... in gke:ssh-to-node()" >&2
|
||||
detect-project >&2
|
||||
|
||||
local node="$1"
|
||||
@ -288,13 +292,13 @@ function ssh-to-node() {
|
||||
|
||||
# Restart the kube-proxy on a node ($1)
|
||||
function restart-kube-proxy() {
|
||||
echo "... in restart-kube-proxy()" >&2
|
||||
echo "... in gke:restart-kube-proxy()" >&2
|
||||
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
|
||||
}
|
||||
|
||||
# Restart the kube-proxy on master ($1)
|
||||
function restart-apiserver() {
|
||||
echo "... in restart-kube-apiserver()" >&2
|
||||
echo "... in gke:restart-apiserver()" >&2
|
||||
ssh-to-node "$1" "sudo docker ps | grep /kube-apiserver | cut -d ' ' -f 1 | xargs sudo docker kill"
|
||||
}
|
||||
|
||||
@ -308,7 +312,7 @@ function restart-apiserver() {
|
||||
# KUBE_ROOT
|
||||
# ZONE
|
||||
function test-teardown() {
|
||||
echo "... in test-teardown()" >&2
|
||||
echo "... in gke:test-teardown()" >&2
|
||||
|
||||
detect-project >&2
|
||||
detect-minions >&2
|
||||
@ -333,7 +337,7 @@ function test-teardown() {
|
||||
# ZONE
|
||||
# CLUSTER_NAME
|
||||
function kube-down() {
|
||||
echo "... in kube-down()" >&2
|
||||
echo "... in gke:kube-down()" >&2
|
||||
detect-project >&2
|
||||
"${GCLOUD}" "${CMD_GROUP}" container clusters delete --project="${PROJECT}" \
|
||||
--zone="${ZONE}" "${CLUSTER_NAME}" --quiet
|
||||
|
@ -12,7 +12,6 @@
|
||||
"/hyperkube",
|
||||
"controller-manager",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--sync_nodes=true",
|
||||
"--v=2"
|
||||
]
|
||||
},
|
||||
|
@ -12,7 +12,6 @@
|
||||
"/hyperkube",
|
||||
"controller-manager",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--sync_nodes=true",
|
||||
"--v=2"
|
||||
]
|
||||
},
|
||||
|
@ -22,7 +22,7 @@ set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
echo "Bringing down cluster using provider: $KUBERNETES_PROVIDER"
|
||||
|
||||
|
@ -25,7 +25,7 @@ set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
function usage() {
|
||||
echo "${0} [-m|-n <node id>] <version>"
|
||||
|
@ -26,9 +26,9 @@ set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
echo "Starting cluster using provider: $KUBERNETES_PROVIDER" >&2
|
||||
echo "... Starting cluster using provider: $KUBERNETES_PROVIDER" >&2
|
||||
|
||||
echo "... calling verify-prereqs" >&2
|
||||
verify-prereqs
|
||||
@ -36,7 +36,7 @@ verify-prereqs
|
||||
echo "... calling kube-up" >&2
|
||||
kube-up
|
||||
|
||||
echo "... calling validate-cluster" >&2
|
||||
echo "... calling validate-cluster.sh" >&2
|
||||
"${KUBE_ROOT}/cluster/validate-cluster.sh"
|
||||
|
||||
echo -e "Done, listing cluster services:\n" >&2
|
||||
|
@ -16,73 +16,83 @@
|
||||
|
||||
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
# Must ensure that the following ENV vars are set
|
||||
function detect-master {
|
||||
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP"
|
||||
echo "KUBE_MASTER: $KUBE_MASTER"
|
||||
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP" 1>&2
|
||||
echo "KUBE_MASTER: $KUBE_MASTER" 1>&2
|
||||
}
|
||||
|
||||
# Get minion names if they are not static.
|
||||
function detect-minion-names {
|
||||
echo "MINION_NAMES: ${MINION_NAMES[*]}"
|
||||
echo "MINION_NAMES: [${MINION_NAMES[*]}]" 1>&2
|
||||
}
|
||||
|
||||
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
|
||||
function detect-minions {
|
||||
echo "KUBE_MINION_IP_ADDRESSES=[]"
|
||||
echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2
|
||||
}
|
||||
|
||||
# Verify prereqs on host machine
|
||||
function verify-prereqs {
|
||||
echo "TODO"
|
||||
echo "TODO: verify-prereqs" 1>&2
|
||||
}
|
||||
|
||||
# Instantiate a kubernetes cluster
|
||||
function kube-up {
|
||||
echo "TODO"
|
||||
echo "TODO: kube-up" 1>&2
|
||||
}
|
||||
|
||||
# Delete a kubernetes cluster
|
||||
function kube-down {
|
||||
echo "TODO"
|
||||
echo "TODO: kube-down" 1>&2
|
||||
}
|
||||
|
||||
# Update a kubernetes cluster
|
||||
function kube-push {
|
||||
echo "TODO"
|
||||
echo "TODO: kube-push" 1>&2
|
||||
}
|
||||
|
||||
# Prepare update a kubernetes component
|
||||
function prepare-push {
|
||||
echo "TODO"
|
||||
echo "TODO: prepare-push" 1>&2
|
||||
}
|
||||
|
||||
# Update a kubernetes master
|
||||
function push-master {
|
||||
echo "TODO"
|
||||
echo "TODO: push-master" 1>&2
|
||||
}
|
||||
|
||||
# Update a kubernetes node
|
||||
function push-node {
|
||||
echo "TODO"
|
||||
echo "TODO: push-node" 1>&2
|
||||
}
|
||||
|
||||
# Execute prior to running tests to build a release if required for env
|
||||
function test-build-release {
|
||||
echo "TODO"
|
||||
echo "TODO: test-build-release" 1>&2
|
||||
}
|
||||
|
||||
# Execute prior to running tests to initialize required structure
|
||||
function test-setup {
|
||||
echo "TODO"
|
||||
echo "TODO: test-setup" 1>&2
|
||||
}
|
||||
|
||||
# Execute after running tests to perform any required clean-up
|
||||
function test-teardown {
|
||||
echo "TODO"
|
||||
echo "TODO: test-teardown" 1>&2
|
||||
}
|
||||
|
||||
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
|
||||
function get-password {
|
||||
echo "TODO"
|
||||
echo "TODO: get-password" 1>&2
|
||||
}
|
||||
|
||||
# Providers util.sh scripts should define functions that override the above default functions impls
|
||||
if [ -n "${KUBERNETES_PROVIDER}" ]; then
|
||||
PROVIDER_UTILS="${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
if [ -f ${PROVIDER_UTILS} ]; then
|
||||
source "${PROVIDER_UTILS}"
|
||||
fi
|
||||
fi
|
||||
|
@ -32,10 +32,7 @@ set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
UTILS=${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh
|
||||
if [ -f ${UTILS} ]; then
|
||||
source "${UTILS}"
|
||||
fi
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
# Get the absolute path of the directory component of a file, i.e. the
|
||||
# absolute path of the dirname of $1.
|
||||
|
@ -53,7 +53,7 @@ ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.11.0.254"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
@ -37,7 +37,7 @@ coreos:
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/bin/kube-proxy \
|
||||
--master=http://${MASTER_IP}:7080
|
||||
--master=http://${MASTER_IP}:8080
|
||||
Restart=always
|
||||
RestartSec=2
|
||||
|
||||
|
@ -200,7 +200,6 @@ function wait-cluster-readiness {
|
||||
function kube-up {
|
||||
detect-master
|
||||
detect-minions
|
||||
get-password
|
||||
initialize-pool keep_base_image
|
||||
initialize-network
|
||||
|
||||
@ -328,12 +327,6 @@ function test-teardown {
|
||||
kube-down
|
||||
}
|
||||
|
||||
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
|
||||
function get-password {
|
||||
export KUBE_USER=''
|
||||
export KUBE_PASSWORD=''
|
||||
}
|
||||
|
||||
# SSH to a node by name or IP ($1) and run a command ($2).
|
||||
function ssh-to-node {
|
||||
local node="$1"
|
||||
|
@ -53,7 +53,7 @@ ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
@ -9,10 +9,6 @@ bridge-utils:
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
{{ environment_file }}:
|
||||
file.managed:
|
||||
- source: salt://docker/default
|
||||
@ -22,6 +18,25 @@ docker-io:
|
||||
- mode: 644
|
||||
- makedirs: true
|
||||
|
||||
{% if grains.os == 'Fedora' and grains.osrelease_info[0] >= 22 %}
|
||||
|
||||
docker:
|
||||
pkg:
|
||||
- installed
|
||||
service.running:
|
||||
- enable: True
|
||||
- require:
|
||||
- pkg: docker
|
||||
- watch:
|
||||
- file: {{ environment_file }}
|
||||
- pkg: docker
|
||||
|
||||
{% else %}
|
||||
|
||||
docker-io:
|
||||
pkg:
|
||||
- installed
|
||||
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
@ -31,6 +46,8 @@ docker:
|
||||
- file: {{ environment_file }}
|
||||
- pkg: docker-io
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% if grains.cloud is defined
|
||||
@ -43,6 +60,10 @@ docker:
|
||||
- repl: '# net.ipv4.ip_forward=0'
|
||||
{% endif %}
|
||||
|
||||
# Work around Salt #18089: https://github.com/saltstack/salt/issues/18089
|
||||
/etc/sysctl.d/99-salt.conf:
|
||||
file.touch
|
||||
|
||||
# TODO: This should really be based on network strategy instead of os_family
|
||||
net.ipv4.ip_forward:
|
||||
sysctl.present:
|
||||
|
@ -6,7 +6,7 @@ metadata:
|
||||
spec:
|
||||
containers:
|
||||
- name: fluentd-cloud-logging
|
||||
image: gcr.io/google_containers/fluentd-gcp:1.8
|
||||
image: gcr.io/google_containers/fluentd-gcp:1.9
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
54
cluster/saltbase/salt/kube-master-addons/kube-master-addons.sh
Normal file → Executable file
54
cluster/saltbase/salt/kube-master-addons/kube-master-addons.sh
Normal file → Executable file
@ -15,28 +15,48 @@
|
||||
# limitations under the License.
|
||||
|
||||
# loadedImageFlags is a bit-flag to track which docker images loaded successfully.
|
||||
let loadedImageFlags=0;
|
||||
let loadedImageFlags=0
|
||||
|
||||
while true; do
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
if docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1; then
|
||||
let loadedImageFlags="$loadedImageFlags|1";
|
||||
fi;
|
||||
if docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1; then
|
||||
let loadedImageFlags="$loadedImageFlags|2";
|
||||
fi;
|
||||
if docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1; then
|
||||
let loadedImageFlags="$loadedImageFlags|4";
|
||||
fi;
|
||||
fi;
|
||||
restart_docker=false
|
||||
|
||||
# required docker images got installed. exit while loop.
|
||||
if [ $loadedImageFlags == 7 ]; then break; fi;
|
||||
if which docker 1>/dev/null 2>&1; then
|
||||
|
||||
# sleep for 5 seconds before attempting to load docker images again.
|
||||
sleep 5;
|
||||
timeout 30 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|1"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
done;
|
||||
timeout 30 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|2"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
|
||||
timeout 30 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc == 0 ]]; then
|
||||
let loadedImageFlags="$loadedImageFlags|4"
|
||||
elif [[ $rc == 124 ]]; then
|
||||
restart_docker=true
|
||||
fi
|
||||
fi
|
||||
|
||||
# required docker images got installed. exit while loop.
|
||||
if [[ $loadedImageFlags == 7 ]]; then break; fi
|
||||
|
||||
# Sometimes docker load hang, restart docker daemon resolve the issue
|
||||
if [[ $restart_docker ]]; then service docker restart; fi
|
||||
|
||||
# sleep for 15 seconds before attempting to load docker images again
|
||||
sleep 15
|
||||
|
||||
done
|
||||
|
||||
# Now exit. After kube-push, salt will notice that the service is down and it
|
||||
# will start it and new docker images will be loaded.
|
||||
|
@ -30,6 +30,7 @@ monit:
|
||||
- mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||
/etc/monit/conf.d/kube-addons:
|
||||
file:
|
||||
- managed
|
||||
@ -37,6 +38,7 @@ monit:
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 644
|
||||
{% endif %}
|
||||
|
||||
/etc/monit/monit_watcher.sh:
|
||||
file.managed:
|
||||
|
1
cluster/ubuntu/.gitignore
vendored
1
cluster/ubuntu/.gitignore
vendored
@ -1 +1,2 @@
|
||||
binaries
|
||||
skydns*
|
||||
|
@ -55,7 +55,7 @@ cp $ETCD/etcd $ETCD/etcdctl binaries/minion
|
||||
|
||||
# k8s
|
||||
echo "Download kubernetes release ..."
|
||||
K8S_VERSION=${K8S_VERSION:-"0.19.3"}
|
||||
K8S_VERSION=${K8S_VERSION:-"1.0.1"}
|
||||
|
||||
if [ ! -f kubernetes.tar.gz ] ; then
|
||||
curl -L https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v${K8S_VERSION}/kubernetes.tar.gz -o kubernetes.tar.gz
|
||||
|
@ -20,10 +20,10 @@
|
||||
# And separated with blank space like <user_1@ip_1> <user_2@ip_2> <user_3@ip_3>
|
||||
export nodes=${nodes:-"vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"}
|
||||
# Define all your nodes role: a(master) or i(minion) or ai(both master and minion), must be the order same
|
||||
roles=${roles:-"ai i i"}
|
||||
role=${role:-"ai i i"}
|
||||
# If it practically impossible to set an array as an environment variable
|
||||
# from a script, so assume variable is a string then convert it to an array
|
||||
export roles=($roles)
|
||||
export roles=($role)
|
||||
|
||||
# Define minion numbers
|
||||
export NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
@ -32,8 +32,7 @@ export NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET
|
||||
# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE
|
||||
export FLANNEL_NET=${FLANNEL_NET:-172.16.0.0/16}
|
||||
echo "FLANNEL_NET"
|
||||
echo $FLANNEL_NET
|
||||
|
||||
export FLANNEL_OPTS=${FLANNEL_OPTS:-"Network": 172.16.0.0/16}
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
@ -55,7 +54,7 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-true}"
|
||||
DOCKER_OPTS=${DOCKER_OPTS:-""}
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
# DNS_SERVER_IP must be a IP in SERVICE_CLUSTER_IP_RANGE
|
||||
DNS_SERVER_IP=${DNS_SERVER_IP:-"192.168.3.10"}
|
||||
DNS_DOMAIN=${DNS_DOMAIN:-"cluster.local"}
|
||||
|
@ -19,14 +19,11 @@
|
||||
set -e
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "util.sh"
|
||||
detect-master
|
||||
KUBE_SERVER="http:\/\/${KUBE_MASTER_IP}:8080"
|
||||
|
||||
source "config-default.sh"
|
||||
if [ "${ENABLE_CLUSTER_DNS}" == true ]; then
|
||||
echo "Deploying DNS on kubernetes"
|
||||
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g;s/kube_server_url/${KUBE_SERVER}/g;" skydns-rc.yaml.template > skydns-rc.yaml
|
||||
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" skydns-svc.yaml.template > skydns-svc.yaml
|
||||
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g;" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" > skydns-rc.yaml
|
||||
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.in" > skydns-svc.yaml
|
||||
|
||||
# use kubectl to create skydns rc and service
|
||||
"${KUBE_ROOT}/cluster/kubectl.sh" --namespace=kube-system create -f skydns-rc.yaml
|
||||
|
@ -1,61 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v4
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: {{ pillar['dns_replicas'] }}
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: gcr.io/google_containers/etcd:2.0.9
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.10
|
||||
args:
|
||||
# command = "/kube2sky"
|
||||
- -domain={{ pillar['dns_domain'] }}
|
||||
- -kube_master_url=kube_server_url
|
||||
- name: skydns
|
||||
image: gcr.io/google_containers/skydns:2015-03-11-001
|
||||
args:
|
||||
# command = "/skydns"
|
||||
- -machines=http://localhost:4001
|
||||
- -addr=0.0.0.0:53
|
||||
- -domain={{ pillar['dns_domain'] }}.
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- nslookup kubernetes.default.svc.{{ pillar['dns_domain'] }} localhost >/dev/null
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
@ -1,20 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{ pillar['dns_server'] }}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
@ -139,7 +139,7 @@ function verify-cluster {
|
||||
echo
|
||||
echo "Kubernetes cluster is running. The master is running at:"
|
||||
echo
|
||||
echo " http://${MASTER_IP}"
|
||||
echo " http://${MASTER_IP}:8080"
|
||||
echo
|
||||
|
||||
}
|
||||
@ -183,16 +183,6 @@ function verify-minion(){
|
||||
printf "\n"
|
||||
}
|
||||
|
||||
function genServiceAccountsKey() {
|
||||
SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-false}
|
||||
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-"/tmp/kube-serviceaccount.key"}
|
||||
# Generate ServiceAccount key if needed
|
||||
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
|
||||
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
|
||||
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
function create-etcd-opts(){
|
||||
cat <<EOF > ~/kube/default/etcd
|
||||
ETCD_OPTS="-name $1 \
|
||||
@ -212,15 +202,17 @@ KUBE_APISERVER_OPTS="--address=0.0.0.0 \
|
||||
--logtostderr=true \
|
||||
--service-cluster-ip-range=${1} \
|
||||
--admission_control=${2} \
|
||||
--service_account_key_file=/tmp/kube-serviceaccount.key \
|
||||
--service_account_lookup=false "
|
||||
--client-ca-file=/srv/kubernetes/ca.crt
|
||||
--tls-cert-file=/srv/kubernetes/server.cert
|
||||
--tls-private-key-file=/srv/kubernetes/server.key"
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-kube-controller-manager-opts(){
|
||||
cat <<EOF > ~/kube/default/kube-controller-manager
|
||||
KUBE_CONTROLLER_MANAGER_OPTS="--master=127.0.0.1:8080 \
|
||||
--service_account_private_key_file=/tmp/kube-serviceaccount.key \
|
||||
--root-ca-file=/srv/kubernetes/ca.crt
|
||||
--service-account-private-key-file=/srv/kubernetes/server.key \
|
||||
--logtostderr=true"
|
||||
EOF
|
||||
|
||||
@ -371,19 +363,22 @@ function provision-master() {
|
||||
echo "Deploying master on machine ${MASTER_IP}"
|
||||
echo
|
||||
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
|
||||
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube"
|
||||
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/binaries/master/ "${MASTER}:~/kube"
|
||||
|
||||
# remote login to MASTER and use sudo to configue k8s master
|
||||
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
|
||||
genServiceAccountsKey; \
|
||||
groupadd -f -r kube-cert; \
|
||||
~/kube/make-ca-cert ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
setClusterInfo; \
|
||||
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
|
||||
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \
|
||||
create-kube-controller-manager-opts "${MINION_IPS}"; \
|
||||
create-kube-scheduler-opts; \
|
||||
create-flanneld-opts; \
|
||||
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
|
||||
&& sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \
|
||||
sudo -p '[sudo] password to copy files and start master: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ;\
|
||||
sudo groupadd -f -r kube-cert; \
|
||||
sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/; \
|
||||
sudo service etcd start;"
|
||||
}
|
||||
|
||||
@ -412,12 +407,11 @@ function provision-masterandminion() {
|
||||
echo "Deploying master and minion on machine ${MASTER_IP}"
|
||||
echo
|
||||
ssh $SSH_OPTS $MASTER "mkdir -p ~/kube/default"
|
||||
scp -r $SSH_OPTS ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube"
|
||||
scp -r $SSH_OPTS saltbase/salt/generate-cert/make-ca-cert.sh ubuntu/config-default.sh ubuntu/util.sh ubuntu/master/* ubuntu/reconfDocker.sh ubuntu/minion/* ubuntu/binaries/master/ ubuntu/binaries/minion "${MASTER}:~/kube"
|
||||
|
||||
# remote login to the node and use sudo to configue k8s
|
||||
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
|
||||
setClusterInfo; \
|
||||
genServiceAccountsKey; \
|
||||
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
|
||||
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \
|
||||
create-kube-controller-manager-opts "${MINION_IPS}"; \
|
||||
@ -425,8 +419,10 @@ function provision-masterandminion() {
|
||||
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";
|
||||
create-kube-proxy-opts "${MASTER_IP}";\
|
||||
create-flanneld-opts; \
|
||||
sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ \
|
||||
&& sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \
|
||||
sudo -p '[sudo] password to copy files and start node: ' cp ~/kube/default/* /etc/default/ && sudo cp ~/kube/init_conf/* /etc/init/ && sudo cp ~/kube/init_scripts/* /etc/init.d/ ; \
|
||||
sudo groupadd -f -r kube-cert; \
|
||||
sudo ~/kube/make-ca-cert.sh ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
|
||||
sudo mkdir -p /opt/bin/ && sudo cp ~/kube/master/* /opt/bin/ && sudo cp ~/kube/minion/* /opt/bin/; \
|
||||
sudo service etcd start; \
|
||||
sudo -b ~/kube/reconfDocker.sh"
|
||||
}
|
||||
|
@ -21,14 +21,14 @@ NUM_MINIONS=${NUM_MINIONS-"1"}
|
||||
export NUM_MINIONS
|
||||
|
||||
# The IP of the master
|
||||
export MASTER_IP="10.245.1.2"
|
||||
export KUBE_MASTER_IP="10.245.1.2"
|
||||
export MASTER_IP=${MASTER_IP-"10.245.1.2"}
|
||||
export KUBE_MASTER_IP=${MASTER_IP}
|
||||
|
||||
export INSTANCE_PREFIX="kubernetes"
|
||||
export MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
|
||||
# Map out the IPs, names and container subnets of each minion
|
||||
export MINION_IP_BASE="10.245.1."
|
||||
export MINION_IP_BASE=${MINION_IP_BASE-"10.245.1."}
|
||||
MINION_CONTAINER_SUBNET_BASE="10.246"
|
||||
MASTER_CONTAINER_NETMASK="255.255.255.0"
|
||||
MASTER_CONTAINER_ADDR="${MINION_CONTAINER_SUBNET_BASE}.0.1"
|
||||
@ -74,7 +74,7 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
EXTRA_DOCKER_OPTS="-b=cbr0 --insecure-registry 10.0.0.0/8"
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.247.0.10"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
@ -27,12 +27,13 @@ rm -f /etc/sysconfig/network-scripts/ifcfg-enp0s3
|
||||
|
||||
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
|
||||
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
|
||||
sed -i 's/^NM_CONTROLLED=no/#NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
|
||||
grep -q ^NM_CONTROLLED= ${NETWORK_CONF_PATH}ifcfg-eth1 || echo 'NM_CONTROLLED=no' >> ${NETWORK_CONF_PATH}ifcfg-eth1
|
||||
sed -i 's/^#NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
|
||||
systemctl restart network
|
||||
|
||||
function release_not_found() {
|
||||
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
|
||||
echo "are running from a clone of the git repo, please run ./build/release.sh." >&2
|
||||
echo "are running from a clone of the git repo, please run 'make quick-release'." >&2
|
||||
echo "Note that this requires having Docker installed. If you are running " >&2
|
||||
echo "from a release tarball, something is wrong. Look at " >&2
|
||||
echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2
|
||||
@ -105,6 +106,7 @@ EOF
|
||||
mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
service_cluster_ip_range: '$(echo "$SERVICE_CLUSTER_IP_RANGE" | sed -e "s/'/''/g")'
|
||||
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||
|
@ -78,7 +78,8 @@ rm -f /etc/sysconfig/network-scripts/ifcfg-enp0s3
|
||||
|
||||
# Disable network interface being managed by Network Manager (needed for Fedora 21+)
|
||||
NETWORK_CONF_PATH=/etc/sysconfig/network-scripts/
|
||||
sed -i 's/^NM_CONTROLLED=no/#NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
|
||||
grep -q ^NM_CONTROLLED= ${NETWORK_CONF_PATH}ifcfg-eth1 || echo 'NM_CONTROLLED=no' >> ${NETWORK_CONF_PATH}ifcfg-eth1
|
||||
sed -i 's/^#NM_CONTROLLED=.*/NM_CONTROLLED=no/' ${NETWORK_CONF_PATH}ifcfg-eth1
|
||||
systemctl restart network
|
||||
|
||||
# Setup hosts file to support ping by hostname to master
|
||||
|
@ -22,7 +22,7 @@ set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
MINIONS_FILE=/tmp/minions-$$
|
||||
trap 'rm -rf "${MINIONS_FILE}"' EXIT
|
||||
|
@ -48,7 +48,7 @@ ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||
DNS_SERVER_IP="10.244.240.240"
|
||||
DNS_DOMAIN="cluster.local"
|
||||
DNS_REPLICAS=1
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3"
|
||||
pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -32,7 +31,7 @@ import (
|
||||
|
||||
var (
|
||||
functionDest = flag.StringP("funcDest", "f", "-", "Output for conversion functions; '-' means stdout")
|
||||
version = flag.StringP("version", "v", "v1beta3", "Version for conversion.")
|
||||
version = flag.StringP("version", "v", "v1", "Version for conversion.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1"
|
||||
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3"
|
||||
pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@ -33,7 +32,7 @@ import (
|
||||
|
||||
var (
|
||||
functionDest = flag.StringP("func-dest", "f", "-", "Output for deep copy functions; '-' means stdout")
|
||||
version = flag.StringP("version", "v", "v1beta3", "Version for deep copies.")
|
||||
version = flag.StringP("version", "v", "v1", "Version for deep copies.")
|
||||
overwrites = flag.StringP("overwrites", "o", "", "Comma-separated overwrites for package names")
|
||||
)
|
||||
|
||||
|
@ -152,8 +152,6 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
||||
glog.Fatalf("no public address for %s", host)
|
||||
}
|
||||
|
||||
// Enable v1beta3 in master only if we are starting the components for that api version.
|
||||
enableV1Beta3 := apiVersion == "v1beta3"
|
||||
// Create a master and install handlers into mux.
|
||||
m := master.New(&master.Config{
|
||||
EtcdHelper: helper,
|
||||
@ -162,7 +160,6 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
||||
EnableLogsSupport: false,
|
||||
EnableProfiling: true,
|
||||
APIPrefix: "/api",
|
||||
EnableV1Beta3: enableV1Beta3,
|
||||
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
|
||||
AdmissionControl: admit.NewAlwaysAdmit(),
|
||||
ReadWritePort: portNumber,
|
||||
@ -332,7 +329,7 @@ containers:
|
||||
desc: "static-pod-from-spec",
|
||||
fileContents: `{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1beta3",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "static-pod-from-spec"
|
||||
},
|
||||
@ -611,23 +608,6 @@ func runPatchTest(c *client.Client) {
|
||||
RemoveLabelBody []byte
|
||||
RemoveAllLabelsBody []byte
|
||||
}{
|
||||
"v1beta3": {
|
||||
api.JSONPatchType: {
|
||||
[]byte(`[{"op":"add","path":"/metadata/labels","value":{"foo":"bar","baz":"qux"}}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels/foo"}]`),
|
||||
[]byte(`[{"op":"remove","path":"/metadata/labels"}]`),
|
||||
},
|
||||
api.MergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":null}}`),
|
||||
},
|
||||
api.StrategicMergePatchType: {
|
||||
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
|
||||
[]byte(`{"metadata":{"labels":{"$patch":"replace"}}}`),
|
||||
},
|
||||
},
|
||||
"v1": {
|
||||
api.JSONPatchType: {
|
||||
[]byte(`[{"op":"add","path":"/metadata/labels","value":{"foo":"bar","baz":"qux"}}]`),
|
||||
@ -1027,7 +1007,7 @@ func ServeCachedManifestFile(contents string) (servingAddress string) {
|
||||
const (
|
||||
testPodSpecFile = `{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1beta3",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "container-vm-guestbook-pod-spec"
|
||||
},
|
||||
|
@ -1,24 +0,0 @@
|
||||
{
|
||||
"kind": "ReplicationController",
|
||||
"apiVersion": "v1beta3",
|
||||
"metadata": {
|
||||
"name": "nginx-controller",
|
||||
"labels": {"name": "nginx"}
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 2,
|
||||
"selector": {"name": "nginx"},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {"name": "nginx"}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "nginx",
|
||||
"image": "nginx",
|
||||
"ports": [{"containerPort": 80}]
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -86,7 +86,6 @@ type APIServer struct {
|
||||
EtcdServerList util.StringList
|
||||
EtcdConfigFile string
|
||||
EtcdPathPrefix string
|
||||
OldEtcdPathPrefix string
|
||||
CorsAllowedOriginList util.StringList
|
||||
AllowPrivileged bool
|
||||
ServiceClusterIPRange util.IPNet // TODO: make this a list
|
||||
@ -187,7 +186,6 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Var(&s.EtcdServerList, "etcd-servers", "List of etcd servers to watch (http://ip:port), comma separated. Mutually exclusive with -etcd-config")
|
||||
fs.StringVar(&s.EtcdConfigFile, "etcd-config", s.EtcdConfigFile, "The config file for the etcd client. Mutually exclusive with -etcd-servers.")
|
||||
fs.StringVar(&s.EtcdPathPrefix, "etcd-prefix", s.EtcdPathPrefix, "The prefix for all resource paths in etcd.")
|
||||
fs.StringVar(&s.OldEtcdPathPrefix, "old-etcd-prefix", s.OldEtcdPathPrefix, "The previous prefix for all resource paths in etcd, if any.")
|
||||
fs.Var(&s.CorsAllowedOriginList, "cors-allowed-origins", "List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. If this list is empty CORS will not be enabled.")
|
||||
fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, "If true, allow privileged containers.")
|
||||
fs.Var(&s.ServiceClusterIPRange, "service-cluster-ip-range", "A CIDR notation IP range from which to assign service cluster IPs. This must not overlap with any IP ranges assigned to nodes for pods.")
|
||||
@ -217,7 +215,7 @@ func (s *APIServer) verifyClusterIPFlags() {
|
||||
}
|
||||
|
||||
func newEtcd(etcdConfigFile string, etcdServerList util.StringList, storageVersion string, pathPrefix string) (helper tools.EtcdHelper, err error) {
|
||||
var client tools.EtcdGetSet
|
||||
var client tools.EtcdClient
|
||||
if etcdConfigFile != "" {
|
||||
client, err = etcd.NewClientFromFile(etcdConfigFile)
|
||||
if err != nil {
|
||||
@ -260,7 +258,10 @@ func (s *APIServer) Run(_ []string) error {
|
||||
HostNetworkSources: []string{},
|
||||
})
|
||||
|
||||
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
if err != nil {
|
||||
glog.Fatalf("Cloud provider could not be initialized: %v", err)
|
||||
}
|
||||
|
||||
kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig)
|
||||
if err != nil {
|
||||
@ -282,9 +283,6 @@ func (s *APIServer) Run(_ []string) error {
|
||||
}
|
||||
_ = disableLegacyAPIs // hush the compiler while we don't have legacy APIs to disable.
|
||||
|
||||
// v1beta3 is disabled by default. Users can enable it using "api/v1beta3=true"
|
||||
enableV1beta3 := s.getRuntimeConfigValue("api/v1beta3", false)
|
||||
|
||||
// "api/v1={true|false} allows users to enable/disable v1 API.
|
||||
// This takes preference over api/all and api/legacy, if specified.
|
||||
disableV1 := disableAllAPIs
|
||||
@ -305,14 +303,6 @@ func (s *APIServer) Run(_ []string) error {
|
||||
glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err)
|
||||
}
|
||||
|
||||
// TODO Is this the right place for migration to happen? Must *both* old and
|
||||
// new etcd prefix params be supplied for this to be valid?
|
||||
if s.OldEtcdPathPrefix != "" {
|
||||
if err = helper.MigrateKeys(s.OldEtcdPathPrefix); err != nil {
|
||||
glog.Fatalf("Migration of old etcd keys failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
n := net.IPNet(s.ServiceClusterIPRange)
|
||||
|
||||
// Default to the private server key for service account token signing
|
||||
@ -384,7 +374,6 @@ func (s *APIServer) Run(_ []string) error {
|
||||
SupportsBasicAuth: len(s.BasicAuthFile) > 0,
|
||||
Authorizer: authorizer,
|
||||
AdmissionControl: admissionController,
|
||||
EnableV1Beta3: enableV1beta3,
|
||||
DisableV1: disableV1,
|
||||
MasterServiceNamespace: s.MasterServiceNamespace,
|
||||
ClusterName: s.ClusterName,
|
||||
|
@ -186,7 +186,10 @@ func (s *CMServer) Run(_ []string) error {
|
||||
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas)
|
||||
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
|
||||
|
||||
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
if err != nil {
|
||||
glog.Fatalf("Cloud provider could not be initialized: %v", err)
|
||||
}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.RegisterRetryCount,
|
||||
s.PodEvictionTimeout, nodecontroller.NewPodEvictor(util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst)),
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
|
||||
clientcmdapi "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api"
|
||||
@ -130,8 +129,7 @@ func (s *ProxyServer) Run(_ []string) error {
|
||||
}
|
||||
|
||||
config.NewSourceAPI(
|
||||
client.Services(api.NamespaceAll),
|
||||
client.Endpoints(api.NamespaceAll),
|
||||
client,
|
||||
30*time.Second,
|
||||
serviceConfig.Channel("api"),
|
||||
endpointsConfig.Channel("api"),
|
||||
|
@ -69,11 +69,11 @@ func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
}
|
||||
|
||||
// ProbeNetworkPlugins collects all compiled-in plugins
|
||||
func ProbeNetworkPlugins() []network.NetworkPlugin {
|
||||
func ProbeNetworkPlugins(pluginDir string) []network.NetworkPlugin {
|
||||
allPlugins := []network.NetworkPlugin{}
|
||||
|
||||
// for each existing plugin, add to the list
|
||||
allPlugins = append(allPlugins, exec.ProbeNetworkPlugins()...)
|
||||
allPlugins = append(allPlugins, exec.ProbeNetworkPlugins(pluginDir)...)
|
||||
|
||||
return allPlugins
|
||||
}
|
||||
|
@ -103,6 +103,7 @@ type KubeletServer struct {
|
||||
ImageGCLowThresholdPercent int
|
||||
LowDiskSpaceThresholdMB int
|
||||
NetworkPluginName string
|
||||
NetworkPluginDir string
|
||||
CloudProvider string
|
||||
CloudConfigFile string
|
||||
TLSCertFile string
|
||||
@ -171,6 +172,7 @@ func NewKubeletServer() *KubeletServer {
|
||||
ImageGCLowThresholdPercent: 80,
|
||||
LowDiskSpaceThresholdMB: 256,
|
||||
NetworkPluginName: "",
|
||||
NetworkPluginDir: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/",
|
||||
HostNetworkSources: kubelet.FileSource,
|
||||
CertDirectory: "/var/run/kubernetes",
|
||||
NodeStatusUpdateFrequency: 10 * time.Second,
|
||||
@ -233,6 +235,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.IntVar(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%%")
|
||||
fs.IntVar(&s.LowDiskSpaceThresholdMB, "low-diskspace-threshold-mb", s.LowDiskSpaceThresholdMB, "The absolute free disk space, in MB, to maintain. When disk space falls below this threshold, new pods would be rejected. Default: 256")
|
||||
fs.StringVar(&s.NetworkPluginName, "network-plugin", s.NetworkPluginName, "<Warning: Alpha feature> The name of the network plugin to be invoked for various events in kubelet/pod lifecycle")
|
||||
fs.StringVar(&s.NetworkPluginDir, "network-plugin-dir", s.NetworkPluginDir, "<Warning: Alpha feature> The full path of the directory in which to search for network plugins")
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.StringVar(&s.ResourceContainer, "resource-container", s.ResourceContainer, "Absolute name of the resource-only container to create and run the Kubelet in (Default: /kubelet).")
|
||||
@ -240,7 +243,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&s.ContainerRuntime, "container_runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
|
||||
fs.StringVar(&s.SystemContainer, "system-container", s.SystemContainer, "Optional resource-only container in which to place all non-kernel processes that are not already in a container. Empty for no container. Rolling back the flag requires a reboot. (Default: \"\").")
|
||||
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
|
||||
fs.IntVar(&s.MaxPods, "max-pods", 100, "Number of Pods that can run on this Kubelet.")
|
||||
fs.IntVar(&s.MaxPods, "max-pods", 40, "Number of Pods that can run on this Kubelet.")
|
||||
fs.StringVar(&s.DockerExecHandlerName, "docker-exec-handler", s.DockerExecHandlerName, "Handler to use when executing a command in a container. Valid values are 'native' and 'nsenter'. Defaults to 'native'.")
|
||||
fs.StringVar(&s.PodCIDR, "pod-cidr", "", "The CIDR to use for pod IP addresses, only used in standalone mode. In cluster mode, this is obtained from the master.")
|
||||
// Flags intended for testing, not recommended used in production environments.
|
||||
@ -286,7 +289,10 @@ func (s *KubeletServer) Run(_ []string) error {
|
||||
DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
|
||||
RootFreeDiskMB: s.LowDiskSpaceThresholdMB,
|
||||
}
|
||||
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
|
||||
|
||||
hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
|
||||
@ -347,7 +353,7 @@ func (s *KubeletServer) Run(_ []string) error {
|
||||
KubeClient: apiclient,
|
||||
MasterServiceNamespace: s.MasterServiceNamespace,
|
||||
VolumePlugins: ProbeVolumePlugins(),
|
||||
NetworkPlugins: ProbeNetworkPlugins(),
|
||||
NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir),
|
||||
NetworkPluginName: s.NetworkPluginName,
|
||||
StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
|
||||
TLSOptions: tlsOptions,
|
||||
|
59
cmd/mungedocs/analytics.go
Normal file
59
cmd/mungedocs/analytics.go
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
var (
|
||||
beginMungeExp = regexp.QuoteMeta(beginMungeTag("GENERATED_ANALYTICS"))
|
||||
endMungeExp = regexp.QuoteMeta(endMungeTag("GENERATED_ANALYTICS"))
|
||||
analyticsExp = regexp.QuoteMeta("[ +
|
||||
"[^?]*" +
|
||||
regexp.QuoteMeta("?pixel)]()")
|
||||
|
||||
// Matches the analytics blurb, with or without the munge headers.
|
||||
analyticsRE = regexp.MustCompile(`[\n]*` + analyticsExp + `[\n]?` +
|
||||
`|` + `[\n]*` + beginMungeExp + `[^<]*` + endMungeExp)
|
||||
)
|
||||
|
||||
// This adds the analytics link to every .md file.
|
||||
func checkAnalytics(fileName string, fileBytes []byte) (output []byte, err error) {
|
||||
fileName = makeRepoRelative(fileName)
|
||||
desired := fmt.Sprintf(`
|
||||
|
||||
|
||||
`+beginMungeTag("GENERATED_ANALYTICS")+`
|
||||
[]()
|
||||
`+endMungeTag("GENERATED_ANALYTICS")+`
|
||||
`, fileName)
|
||||
if !analyticsRE.MatchString(desired) {
|
||||
fmt.Printf("%q does not match %q", analyticsRE.String(), desired)
|
||||
os.Exit(1)
|
||||
}
|
||||
//output = replaceNonPreformattedRegexp(fileBytes, analyticsRE, func(in []byte) []byte {
|
||||
output = analyticsRE.ReplaceAllFunc(fileBytes, func(in []byte) []byte {
|
||||
return []byte{}
|
||||
})
|
||||
output = bytes.TrimRight(output, "\n")
|
||||
output = append(output, []byte(desired)...)
|
||||
return output, nil
|
||||
}
|
89
cmd/mungedocs/analytics_test.go
Normal file
89
cmd/mungedocs/analytics_test.go
Normal file
@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAnalytics(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{
|
||||
"aoeu",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
"[]()",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" + "\n" +
|
||||
"[]()" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
{
|
||||
"prefix" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") +
|
||||
"\n" + "suffix",
|
||||
"prefix" + "\n" + "suffix" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
{
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n",
|
||||
"aoeu" + "\n" + "\n" + "\n" +
|
||||
beginMungeTag("GENERATED_ANALYTICS") + "\n" +
|
||||
"[]()" + "\n" +
|
||||
endMungeTag("GENERATED_ANALYTICS") + "\n"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
out, err := checkAnalytics("path/to/file-name.md", []byte(c.in))
|
||||
assert.NoError(t, err)
|
||||
if string(out) != c.out {
|
||||
t.Errorf("Expected \n\n%v\n\n but got \n\n%v\n\n", c.out, string(out))
|
||||
}
|
||||
}
|
||||
}
|
115
cmd/mungedocs/example_syncer.go
Normal file
115
cmd/mungedocs/example_syncer.go
Normal file
@ -0,0 +1,115 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const exampleMungeTag = "EXAMPLE"
|
||||
|
||||
// syncExamples updates all examples in markdown file.
|
||||
//
|
||||
// Finds the magic macro block tags, find the link to the example
|
||||
// specified in the tags, and replaces anything between those with
|
||||
// the content of the example, thereby syncing it.
|
||||
//
|
||||
// For example,
|
||||
// <!-- BEGIN MUNGE: EXAMPLE ../../examples/guestbook/frontend-controller.yaml -->
|
||||
//
|
||||
// ```yaml
|
||||
// foo:
|
||||
// bar:
|
||||
// ```
|
||||
//
|
||||
// [Download example](../../examples/guestbook/frontend-controller.yaml)
|
||||
// <!-- END MUNGE: EXAMPLE -->
|
||||
func syncExamples(filePath string, markdown []byte) ([]byte, error) {
|
||||
// find the example syncer begin tag
|
||||
header := beginMungeTag(fmt.Sprintf("%s %s", exampleMungeTag, `(([^ ])*.(yaml|json))`))
|
||||
exampleLinkRE := regexp.MustCompile(header)
|
||||
lines := splitLines(markdown)
|
||||
updatedMarkdown, err := updateExampleMacroBlock(filePath, lines, exampleLinkRE, endMungeTag(exampleMungeTag))
|
||||
if err != nil {
|
||||
return updatedMarkdown, err
|
||||
}
|
||||
return updatedMarkdown, nil
|
||||
}
|
||||
|
||||
// exampleContent retrieves the content of the file at linkPath
|
||||
func exampleContent(filePath, linkPath, fileType string) (content string, err error) {
|
||||
realRoot := path.Join(*rootDir, *repoRoot) + "/"
|
||||
path := path.Join(realRoot, path.Dir(filePath), linkPath)
|
||||
dat, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return content, err
|
||||
}
|
||||
content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, string(dat), linkPath)
|
||||
return
|
||||
}
|
||||
|
||||
// updateExampleMacroBlock sync the yaml/json example between begin tag and end tag
|
||||
func updateExampleMacroBlock(filePath string, lines []string, beginMarkExp *regexp.Regexp, endMark string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
betweenBeginAndEnd := false
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
if beginMarkExp.Match([]byte(trimmedLine)) {
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||
}
|
||||
betweenBeginAndEnd = true
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
match := beginMarkExp.FindStringSubmatch(line)
|
||||
if len(match) < 4 {
|
||||
return nil, fmt.Errorf("failed to parse the link in example header")
|
||||
}
|
||||
// match[0] is the entire expression; [1] is the link text and [3] is the file type (yaml or json).
|
||||
linkText := match[1]
|
||||
fileType := match[3]
|
||||
example, err := exampleContent(filePath, linkText, fileType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buffer.WriteString(example)
|
||||
} else if trimmedLine == endMark {
|
||||
if !betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
|
||||
}
|
||||
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
|
||||
buffer.WriteString("\n")
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
betweenBeginAndEnd = false
|
||||
} else {
|
||||
if !betweenBeginAndEnd {
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
58
cmd/mungedocs/example_syncer_test.go
Normal file
58
cmd/mungedocs/example_syncer_test.go
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_syncExamples(t *testing.T) {
|
||||
var podExample = `apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
`
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
},
|
||||
{
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
"<!-- BEGIN MUNGE: EXAMPLE ../mungedocs/testdata/pod.yaml -->\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n<!-- END MUNGE: EXAMPLE -->\n",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := syncExamples("mungedocs/filename.md", []byte(c.in))
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected example \n'%v' but got \n'%v'", c.out, string(actual))
|
||||
}
|
||||
}
|
||||
}
|
71
cmd/mungedocs/headers.go
Normal file
71
cmd/mungedocs/headers.go
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var headerRegex = regexp.MustCompile(`^(#+)\s*(.*)$`)
|
||||
var whitespaceRegex = regexp.MustCompile(`^\s*$`)
|
||||
|
||||
func fixHeaderLines(fileBytes []byte) []byte {
|
||||
lines := splitLines(fileBytes)
|
||||
out := []string{}
|
||||
for i := range lines {
|
||||
matches := headerRegex.FindStringSubmatch(lines[i])
|
||||
if matches == nil {
|
||||
out = append(out, lines[i])
|
||||
continue
|
||||
}
|
||||
if i > 0 && !whitespaceRegex.Match([]byte(out[len(out)-1])) {
|
||||
out = append(out, "")
|
||||
}
|
||||
out = append(out, fmt.Sprintf("%s %s", matches[1], matches[2]))
|
||||
if i+1 < len(lines) && !whitespaceRegex.Match([]byte(lines[i+1])) {
|
||||
out = append(out, "")
|
||||
}
|
||||
}
|
||||
final := strings.Join(out, "\n")
|
||||
// Preserve the end of the file.
|
||||
if len(fileBytes) > 0 && fileBytes[len(fileBytes)-1] == '\n' {
|
||||
final += "\n"
|
||||
}
|
||||
return []byte(final)
|
||||
}
|
||||
|
||||
// Header lines need whitespace around them and after the #s.
|
||||
func checkHeaderLines(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
fbs := splitByPreformatted(fileBytes)
|
||||
fbs = append([]fileBlock{{false, []byte{}}}, fbs...)
|
||||
fbs = append(fbs, fileBlock{false, []byte{}})
|
||||
|
||||
for i := range fbs {
|
||||
block := &fbs[i]
|
||||
if block.preformatted {
|
||||
continue
|
||||
}
|
||||
block.data = fixHeaderLines(block.data)
|
||||
}
|
||||
output := []byte{}
|
||||
for _, block := range fbs {
|
||||
output = append(output, block.data...)
|
||||
}
|
||||
return output, nil
|
||||
}
|
71
cmd/mungedocs/headers_test.go
Normal file
71
cmd/mungedocs/headers_test.go
Normal file
@ -0,0 +1,71 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHeaderLines(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{
|
||||
"# ok",
|
||||
"# ok",
|
||||
},
|
||||
{
|
||||
"## ok",
|
||||
"## ok",
|
||||
},
|
||||
{
|
||||
"##### ok",
|
||||
"##### ok",
|
||||
},
|
||||
{
|
||||
"##fix",
|
||||
"## fix",
|
||||
},
|
||||
{
|
||||
"foo\n\n##fix\n\nbar",
|
||||
"foo\n\n## fix\n\nbar",
|
||||
},
|
||||
{
|
||||
"foo\n##fix\nbar",
|
||||
"foo\n\n## fix\n\nbar",
|
||||
},
|
||||
{
|
||||
"foo\n```\n##fix\n```\nbar",
|
||||
"foo\n```\n##fix\n```\nbar",
|
||||
},
|
||||
{
|
||||
"foo\n#fix1\n##fix2\nbar",
|
||||
"foo\n\n# fix1\n\n## fix2\n\nbar",
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
actual, err := checkHeaderLines("filename.md", []byte(c.in))
|
||||
assert.NoError(t, err)
|
||||
if string(actual) != c.out {
|
||||
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual))
|
||||
}
|
||||
}
|
||||
}
|
116
cmd/mungedocs/kubectl_dash_f.go
Normal file
116
cmd/mungedocs/kubectl_dash_f.go
Normal file
@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Looks for lines that have kubectl commands with -f flags and files that
|
||||
// don't exist.
|
||||
func checkKubectlFileTargets(file string, markdown []byte) ([]byte, error) {
|
||||
inside := false
|
||||
lines := splitLines(markdown)
|
||||
errors := []string{}
|
||||
for i := range lines {
|
||||
if strings.HasPrefix(lines[i], "```") {
|
||||
inside = !inside
|
||||
}
|
||||
if inside {
|
||||
if err := lookForKubectl(lines, i); err != nil {
|
||||
errors = append(errors, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
err := error(nil)
|
||||
if len(errors) != 0 {
|
||||
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
||||
}
|
||||
return markdown, err
|
||||
}
|
||||
|
||||
func lookForKubectl(lines []string, lineNum int) error {
|
||||
fields := strings.Fields(lines[lineNum])
|
||||
for i := range fields {
|
||||
if fields[i] == "kubectl" {
|
||||
return gotKubectl(lineNum, fields, i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gotKubectl(line int, fields []string, fieldNum int) error {
|
||||
for i := fieldNum + 1; i < len(fields); i++ {
|
||||
switch fields[i] {
|
||||
case "create", "update", "replace", "delete":
|
||||
return gotCommand(line, fields, i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gotCommand(line int, fields []string, fieldNum int) error {
|
||||
for i := fieldNum + 1; i < len(fields); i++ {
|
||||
if strings.HasPrefix(fields[i], "-f") {
|
||||
return gotDashF(line, fields, i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func gotDashF(line int, fields []string, fieldNum int) error {
|
||||
target := ""
|
||||
if fields[fieldNum] == "-f" {
|
||||
if fieldNum+1 == len(fields) {
|
||||
return fmt.Errorf("ran out of fields after '-f'")
|
||||
}
|
||||
target = fields[fieldNum+1]
|
||||
} else {
|
||||
target = fields[fieldNum][2:]
|
||||
}
|
||||
// Turn dirs into file-like names.
|
||||
target = strings.TrimRight(target, "/")
|
||||
|
||||
// Now exclude special-cases
|
||||
|
||||
if target == "-" || target == "FILENAME" {
|
||||
// stdin and "FILENAME" are OK
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(target, "http://") || strings.HasPrefix(target, "https://") {
|
||||
// URLs are ok
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(target, "./") {
|
||||
// Same-dir files are usually created in the same example
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(target, "/") {
|
||||
// Absolute paths tend to be /tmp/* and created in the same example.
|
||||
return nil
|
||||
}
|
||||
|
||||
// If we got here we expect the file to exist.
|
||||
_, err := os.Stat(path.Join(*rootDir, *repoRoot, target))
|
||||
if os.IsNotExist(err) {
|
||||
return fmt.Errorf("%d: target file %q does not exist", line, target)
|
||||
}
|
||||
return err
|
||||
}
|
139
cmd/mungedocs/kubectl_dash_f_test.go
Normal file
139
cmd/mungedocs/kubectl_dash_f_test.go
Normal file
@ -0,0 +1,139 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestKubectlDashF(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
ok bool
|
||||
}{
|
||||
// No match
|
||||
{"", true},
|
||||
{
|
||||
"Foo\nBar\n",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\nkubectl blah blech\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```shell\nkubectl blah blech\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create blech\n```\nBar",
|
||||
true,
|
||||
},
|
||||
// Special cases
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f -\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f-\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f FILENAME\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -fFILENAME\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f http://google.com/foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -fhttp://google.com/foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f ./foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f./foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f /foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f/foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
// Real checks
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f mungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -fmungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah update -f mungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah update -fmungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah replace -f mungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah replace -fmungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah delete -f mungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah delete -fmungedocs.go\n```\nBar",
|
||||
true,
|
||||
},
|
||||
// Failures
|
||||
{
|
||||
"Foo\n```\nkubectl -blah delete -f does_not_exist\n```\nBar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah delete -fdoes_not_exist\n```\nBar",
|
||||
false,
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
*rootDir = ""
|
||||
*repoRoot = ""
|
||||
_, err := checkKubectlFileTargets("filename.md", []byte(c.in))
|
||||
if err != nil && c.ok {
|
||||
t.Errorf("case[%d]: expected success, got %v", i, err)
|
||||
}
|
||||
if err == nil && !c.ok {
|
||||
t.Errorf("case[%d]: unexpected success", i)
|
||||
}
|
||||
}
|
||||
}
|
204
cmd/mungedocs/links.go
Normal file
204
cmd/mungedocs/links.go
Normal file
@ -0,0 +1,204 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// Finds markdown links of the form [foo](bar "alt-text").
|
||||
linkRE = regexp.MustCompile(`\[([^]]*)\]\(([^)]*)\)`)
|
||||
// Splits the link target into link target and alt-text.
|
||||
altTextRE = regexp.MustCompile(`(.*)( ".*")`)
|
||||
)
|
||||
|
||||
// checkLinks assumes fileBytes has links in markdown syntax, and verifies that
|
||||
// any relative links actually point to files that exist.
|
||||
func checkLinks(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
dir := path.Dir(filePath)
|
||||
errors := []string{}
|
||||
|
||||
output := replaceNonPreformattedRegexp(fileBytes, linkRE, func(in []byte) (out []byte) {
|
||||
match := linkRE.FindSubmatch(in)
|
||||
// match[0] is the entire expression; [1] is the visible text and [2] is the link text.
|
||||
visibleText := string(match[1])
|
||||
linkText := string(match[2])
|
||||
altText := ""
|
||||
if parts := altTextRE.FindStringSubmatch(linkText); parts != nil {
|
||||
linkText = parts[1]
|
||||
altText = parts[2]
|
||||
}
|
||||
|
||||
// clean up some random garbage I found in our docs.
|
||||
linkText = strings.Trim(linkText, " ")
|
||||
linkText = strings.Trim(linkText, "\n")
|
||||
linkText = strings.Trim(linkText, " ")
|
||||
|
||||
u, err := url.Parse(linkText)
|
||||
if err != nil {
|
||||
errors = append(
|
||||
errors,
|
||||
fmt.Sprintf("link %q is unparsable: %v", linkText, err),
|
||||
)
|
||||
return in
|
||||
}
|
||||
|
||||
if u.Host != "" && u.Host != "github.com" {
|
||||
// We only care about relative links and links within github.
|
||||
return in
|
||||
}
|
||||
|
||||
suggestedVisibleText := visibleText
|
||||
if u.Path != "" && !strings.HasPrefix(linkText, "TODO:") {
|
||||
newPath, targetExists := checkPath(filePath, path.Clean(u.Path))
|
||||
if !targetExists {
|
||||
errors = append(
|
||||
errors,
|
||||
fmt.Sprintf("%q: target not found", linkText),
|
||||
)
|
||||
}
|
||||
u.Path = newPath
|
||||
if strings.HasPrefix(u.Path, "/") {
|
||||
u.Host = "github.com"
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
// Remove host and scheme from relative paths
|
||||
u.Host = ""
|
||||
u.Scheme = ""
|
||||
}
|
||||
// Make the visible text show the absolute path if it's
|
||||
// not nested in or beneath the current directory.
|
||||
if strings.HasPrefix(u.Path, "..") {
|
||||
suggestedVisibleText = makeRepoRelative(path.Join(dir, u.Path))
|
||||
} else {
|
||||
suggestedVisibleText = u.Path
|
||||
}
|
||||
if unescaped, err := url.QueryUnescape(u.String()); err != nil {
|
||||
// Remove %28 type stuff, be nice to humans.
|
||||
// And don't fight with the toc generator.
|
||||
linkText = unescaped
|
||||
} else {
|
||||
linkText = u.String()
|
||||
}
|
||||
}
|
||||
// If the current visible text is trying to be a file name, use
|
||||
// the correct file name.
|
||||
if strings.HasSuffix(visibleText, ".md") && !strings.ContainsAny(visibleText, ` '"`+"`") {
|
||||
visibleText = suggestedVisibleText
|
||||
}
|
||||
|
||||
return []byte(fmt.Sprintf("[%s](%s)", visibleText, linkText+altText))
|
||||
})
|
||||
err := error(nil)
|
||||
if len(errors) != 0 {
|
||||
err = fmt.Errorf("%s", strings.Join(errors, "\n"))
|
||||
}
|
||||
return output, err
|
||||
}
|
||||
|
||||
func makeRepoRelative(filePath string) string {
|
||||
realRoot := path.Join(*rootDir, *repoRoot) + "/"
|
||||
return strings.TrimPrefix(filePath, realRoot)
|
||||
}
|
||||
|
||||
// We have to append together before path.Clean will be able to tell that stuff
|
||||
// like ../docs isn't needed.
|
||||
func cleanPath(dirPath, linkPath string) string {
|
||||
clean := path.Clean(path.Join(dirPath, linkPath))
|
||||
if strings.HasPrefix(clean, dirPath+"/") {
|
||||
out := strings.TrimPrefix(clean, dirPath+"/")
|
||||
if out != linkPath {
|
||||
fmt.Printf("%s -> %s\n", linkPath, out)
|
||||
}
|
||||
return out
|
||||
}
|
||||
return linkPath
|
||||
}
|
||||
|
||||
func checkPath(filePath, linkPath string) (newPath string, ok bool) {
|
||||
dir := path.Dir(filePath)
|
||||
absFilePrefixes := []string{
|
||||
"/GoogleCloudPlatform/kubernetes/blob/master/",
|
||||
"/GoogleCloudPlatform/kubernetes/tree/master/",
|
||||
}
|
||||
for _, prefix := range absFilePrefixes {
|
||||
if strings.HasPrefix(linkPath, prefix) {
|
||||
linkPath = strings.TrimPrefix(linkPath, prefix)
|
||||
// Now linkPath is relative to the root of the repo. The below
|
||||
// loop that adds ../ at the beginning of the path should find
|
||||
// the right path.
|
||||
break
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(linkPath, "/") {
|
||||
// These links might go to e.g. the github issues page, or a
|
||||
// file at a particular revision, or another github project
|
||||
// entirely.
|
||||
return linkPath, true
|
||||
}
|
||||
linkPath = cleanPath(dir, linkPath)
|
||||
|
||||
// Fast exit if the link is already correct.
|
||||
if info, err := os.Stat(path.Join(dir, linkPath)); err == nil {
|
||||
if info.IsDir() {
|
||||
return linkPath + "/", true
|
||||
}
|
||||
return linkPath, true
|
||||
}
|
||||
|
||||
for strings.HasPrefix(linkPath, "../") {
|
||||
linkPath = strings.TrimPrefix(linkPath, "../")
|
||||
}
|
||||
|
||||
// Fix - vs _ automatically
|
||||
nameMungers := []func(string) string{
|
||||
func(s string) string { return s },
|
||||
func(s string) string { return strings.Replace(s, "-", "_", -1) },
|
||||
func(s string) string { return strings.Replace(s, "_", "-", -1) },
|
||||
}
|
||||
// Fix being moved into/out of admin (replace "admin" with directory
|
||||
// you're doing mass movements to/from).
|
||||
pathMungers := []func(string) string{
|
||||
func(s string) string { return s },
|
||||
func(s string) string { return path.Join("admin", s) },
|
||||
func(s string) string { return strings.TrimPrefix(s, "admin/") },
|
||||
}
|
||||
|
||||
for _, namer := range nameMungers {
|
||||
for _, pather := range pathMungers {
|
||||
newPath = pather(namer(linkPath))
|
||||
for i := 0; i < 7; i++ {
|
||||
// The file must exist.
|
||||
target := path.Join(dir, newPath)
|
||||
if info, err := os.Stat(target); err == nil {
|
||||
if info.IsDir() {
|
||||
return newPath + "/", true
|
||||
}
|
||||
return cleanPath(dir, newPath), true
|
||||
}
|
||||
newPath = path.Join("..", newPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
return linkPath, false
|
||||
}
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
@ -29,60 +30,140 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
|
||||
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
|
||||
verify = flag.Bool("verify", false, "Exit with status 1 if files would have needed changes but do not change.")
|
||||
rootDir = flag.String("root-dir", "", "Root directory containing documents to be processed.")
|
||||
repoRoot = flag.String("repo-root", "..", `Appended to --root-dir to get the repository root.
|
||||
It's done this way so that generally you just have to set --root-dir.
|
||||
Examples:
|
||||
* --root-dir=docs/ --repo-root=.. means the repository root is ./
|
||||
* --root-dir=/usr/local/long/path/repo/docs/ --repo-root=.. means the repository root is /usr/local/long/path/repo/
|
||||
* --root-dir=/usr/local/long/path/repo/docs/admin --repo-root=../.. means the repository root is /usr/local/long/path/repo/`)
|
||||
skipMunges = flag.String("skip-munges", "", "Comma-separated list of munges to *not* run. Available munges are: "+availableMungeList)
|
||||
|
||||
ErrChangesNeeded = errors.New("mungedocs: changes required")
|
||||
|
||||
// All of the munge operations to perform.
|
||||
// TODO: allow selection from command line. (e.g., just check links in the examples directory.)
|
||||
allMunges = []munge{
|
||||
{"table-of-contents", updateTOC},
|
||||
{"unversioned-warning", updateUnversionedWarning},
|
||||
{"check-links", checkLinks},
|
||||
{"blank-lines-surround-preformatted", checkPreformatted},
|
||||
{"header-lines", checkHeaderLines},
|
||||
{"analytics", checkAnalytics},
|
||||
{"kubectl-dash-f", checkKubectlFileTargets},
|
||||
{"sync-examples", syncExamples},
|
||||
}
|
||||
availableMungeList = func() string {
|
||||
names := []string{}
|
||||
for _, m := range allMunges {
|
||||
names = append(names, m.name)
|
||||
}
|
||||
return strings.Join(names, ",")
|
||||
}()
|
||||
)
|
||||
|
||||
func visitAndVerify(path string, i os.FileInfo, e error) error {
|
||||
return visitAndChangeOrVerify(path, i, e, false)
|
||||
// a munge processes a document, returning an updated document xor an error.
|
||||
// The fn is NOT allowed to mutate 'before', if changes are needed it must copy
|
||||
// data into a new byte array and return that.
|
||||
type munge struct {
|
||||
name string
|
||||
fn func(filePath string, before []byte) (after []byte, err error)
|
||||
}
|
||||
|
||||
func visitAndChange(path string, i os.FileInfo, e error) error {
|
||||
return visitAndChangeOrVerify(path, i, e, true)
|
||||
type fileProcessor struct {
|
||||
// Which munge functions should we call?
|
||||
munges []munge
|
||||
|
||||
// Are we allowed to make changes?
|
||||
verifyOnly bool
|
||||
}
|
||||
|
||||
// Either change a file or verify that it needs no changes (according to modify argument)
|
||||
func visitAndChangeOrVerify(path string, i os.FileInfo, e error, modify bool) error {
|
||||
func (f fileProcessor) visit(path string) error {
|
||||
if !strings.HasSuffix(path, ".md") {
|
||||
return nil
|
||||
}
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
before, err := ioutil.ReadAll(file)
|
||||
fileBytes, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
after, err := updateTOC(before)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if modify {
|
||||
// Write out new file with any changes.
|
||||
if !bytes.Equal(after, before) {
|
||||
file.Close()
|
||||
ioutil.WriteFile(path, after, 0644)
|
||||
modificationsMade := false
|
||||
errFound := false
|
||||
filePrinted := false
|
||||
for _, munge := range f.munges {
|
||||
after, err := munge.fn(path, fileBytes)
|
||||
if err != nil || !bytes.Equal(after, fileBytes) {
|
||||
if !filePrinted {
|
||||
fmt.Printf("%s\n----\n", path)
|
||||
filePrinted = true
|
||||
}
|
||||
fmt.Printf("%s:\n", munge.name)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
errFound = true
|
||||
} else {
|
||||
fmt.Println("contents were modified")
|
||||
modificationsMade = true
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
} else {
|
||||
// Just verify that there are no changes.
|
||||
if !bytes.Equal(after, before) {
|
||||
fileBytes = after
|
||||
}
|
||||
|
||||
// Write out new file with any changes.
|
||||
if modificationsMade {
|
||||
if f.verifyOnly {
|
||||
// We're not allowed to make changes.
|
||||
return ErrChangesNeeded
|
||||
}
|
||||
ioutil.WriteFile(path, fileBytes, 0644)
|
||||
}
|
||||
if errFound {
|
||||
return ErrChangesNeeded
|
||||
}
|
||||
|
||||
// TODO(erictune): more types of passes, such as:
|
||||
// Linkify terms
|
||||
// Verify links point to files.
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func newWalkFunc(fp *fileProcessor, changesNeeded *bool) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err := fp.visit(path); err != nil {
|
||||
*changesNeeded = true
|
||||
if err != ErrChangesNeeded {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func wantedMunges() (filtered []munge) {
|
||||
skipList := strings.Split(*skipMunges, ",")
|
||||
skipped := map[string]bool{}
|
||||
for _, m := range skipList {
|
||||
if len(m) > 0 {
|
||||
skipped[m] = true
|
||||
}
|
||||
}
|
||||
for _, m := range allMunges {
|
||||
if !skipped[m.name] {
|
||||
filtered = append(filtered, m)
|
||||
} else {
|
||||
// Remove from the map so we can verify that everything
|
||||
// requested was in fact valid.
|
||||
delete(skipped, m.name)
|
||||
}
|
||||
}
|
||||
if len(skipped) != 0 {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: requested to skip %v, but these are not valid munges. (valid: %v)\n", skipped, availableMungeList)
|
||||
os.Exit(1)
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
@ -91,31 +172,39 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// For each markdown file under source docs root, process the doc.
|
||||
// If any error occurs, will exit with failure.
|
||||
// If verify is true, then status is 0 for no changes needed, 1 for changes needed
|
||||
// and >1 for an error during processing.
|
||||
// If verify is false, then status is 0 if changes successfully made or no changes needed,
|
||||
// 1 if changes were needed but require human intervention, and >1 for an unexpected
|
||||
// error during processing.
|
||||
var err error
|
||||
if *verify {
|
||||
err = filepath.Walk(*rootDir, visitAndVerify)
|
||||
} else {
|
||||
err = filepath.Walk(*rootDir, visitAndChange)
|
||||
}
|
||||
if err != nil {
|
||||
if err == ErrChangesNeeded {
|
||||
if *verify {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"Some changes needed but not made due to --verify=true\n")
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"Some changes needed but human intervention is required\n")
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "filepath.Walk() returned %v\n", err)
|
||||
// Split the root dir of "foo/docs" into "foo" and "docs". We
|
||||
// chdir into "foo" and walk "docs" so the walk is always at a
|
||||
// relative path.
|
||||
stem, leaf := path.Split(strings.TrimRight(*rootDir, "/"))
|
||||
if err := os.Chdir(stem); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
fp := fileProcessor{
|
||||
munges: wantedMunges(),
|
||||
verifyOnly: *verify,
|
||||
}
|
||||
|
||||
// For each markdown file under source docs root, process the doc.
|
||||
// - If any error occurs: exit with failure (exit >1).
|
||||
// - If verify is true: exit 0 if no changes needed, exit 1 if changes
|
||||
// needed.
|
||||
// - If verify is false: exit 0 if changes successfully made or no
|
||||
// changes needed, exit 1 if manual changes are needed.
|
||||
var changesNeeded bool
|
||||
|
||||
err := filepath.Walk(leaf, newWalkFunc(&fp, &changesNeeded))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "ERROR: %v\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if changesNeeded {
|
||||
if *verify {
|
||||
fmt.Fprintf(os.Stderr, "FAIL: changes needed but not made due to --verify\n")
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "FAIL: some manual changes are still required.\n")
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
55
cmd/mungedocs/preformatted.go
Normal file
55
cmd/mungedocs/preformatted.go
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Blocks of ``` need to have blank lines on both sides or they don't look
|
||||
// right in HTML.
|
||||
func checkPreformatted(filePath string, fileBytes []byte) ([]byte, error) {
|
||||
f := splitByPreformatted(fileBytes)
|
||||
f = append(fileBlocks{{false, []byte{}}}, f...)
|
||||
f = append(f, fileBlock{false, []byte{}})
|
||||
|
||||
output := []byte(nil)
|
||||
for i := 1; i < len(f)-1; i++ {
|
||||
prev := &f[i-1]
|
||||
block := &f[i]
|
||||
next := &f[i+1]
|
||||
if !block.preformatted {
|
||||
continue
|
||||
}
|
||||
neededSuffix := []byte("\n\n")
|
||||
for !bytes.HasSuffix(prev.data, neededSuffix) {
|
||||
prev.data = append(prev.data, '\n')
|
||||
}
|
||||
for !bytes.HasSuffix(block.data, neededSuffix) {
|
||||
block.data = append(block.data, '\n')
|
||||
if bytes.HasPrefix(next.data, []byte("\n")) {
|
||||
// don't change the number of newlines unless needed.
|
||||
next.data = next.data[1:]
|
||||
if len(next.data) == 0 {
|
||||
f = append(f[:i+1], f[i+2:]...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, block := range f {
|
||||
output = append(output, block.data...)
|
||||
}
|
||||
return output, nil
|
||||
}
|
10
cmd/mungedocs/testdata/pod.yaml
vendored
Normal file
10
cmd/mungedocs/testdata/pod.yaml
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
@ -20,72 +20,32 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const tocMungeTag = "GENERATED_TOC"
|
||||
|
||||
// inserts/updates a table of contents in markdown file.
|
||||
//
|
||||
// First, builds a ToC.
|
||||
// Then, finds <!-- BEGIN GENERATED TOC --> and <!-- END GENERATED TOC -->, and replaces anything between those with
|
||||
// Then, finds the magic macro block tags and replaces anything between those with
|
||||
// the ToC, thereby updating any previously inserted ToC.
|
||||
//
|
||||
// TODO(erictune): put this in own package with tests
|
||||
func updateTOC(markdown []byte) ([]byte, error) {
|
||||
func updateTOC(filePath string, markdown []byte) ([]byte, error) {
|
||||
toc, err := buildTOC(markdown)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
updatedMarkdown, err := updateMacroBlock(markdown, "<!-- BEGIN GENERATED TOC -->", "<!-- END GENERATED TOC -->", string(toc))
|
||||
lines := splitLines(markdown)
|
||||
updatedMarkdown, err := updateMacroBlock(lines, beginMungeTag(tocMungeTag), endMungeTag(tocMungeTag), string(toc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return updatedMarkdown, nil
|
||||
}
|
||||
|
||||
// Replaces the text between matching "beginMark" and "endMark" within "document" with "insertThis".
|
||||
//
|
||||
// Delimiters should occupy own line.
|
||||
// Returns copy of document with modifications.
|
||||
func updateMacroBlock(document []byte, beginMark, endMark, insertThis string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
lines := strings.Split(string(document), "\n")
|
||||
// Skip trailing empty string from Split-ing
|
||||
if len(lines) > 0 && lines[len(lines)-1] == "" {
|
||||
lines = lines[:len(lines)-1]
|
||||
}
|
||||
betweenBeginAndEnd := false
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
if trimmedLine == beginMark {
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||
}
|
||||
betweenBeginAndEnd = true
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
} else if trimmedLine == endMark {
|
||||
if !betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
|
||||
}
|
||||
buffer.WriteString(insertThis)
|
||||
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
|
||||
buffer.WriteString("\n")
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
betweenBeginAndEnd = false
|
||||
} else {
|
||||
if !betweenBeginAndEnd {
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// builds table of contents for markdown file
|
||||
//
|
||||
// First scans for all section headers (lines that begin with "#" but not within code quotes)
|
||||
@ -94,18 +54,35 @@ func updateMacroBlock(document []byte, beginMark, endMark, insertThis string) ([
|
||||
// builds the ToC.
|
||||
func buildTOC(markdown []byte) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString("\n")
|
||||
scanner := bufio.NewScanner(bytes.NewReader(markdown))
|
||||
inBlockQuotes := false
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
match, err := regexp.Match("^```", []byte(line))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
inBlockQuotes = !inBlockQuotes
|
||||
continue
|
||||
}
|
||||
if inBlockQuotes {
|
||||
continue
|
||||
}
|
||||
noSharps := strings.TrimLeft(line, "#")
|
||||
numSharps := len(line) - len(noSharps)
|
||||
heading := strings.Trim(noSharps, " \n")
|
||||
if numSharps > 0 {
|
||||
indent := strings.Repeat(" ", numSharps-1)
|
||||
bookmark := strings.Replace(strings.ToLower(heading), " ", "-", -1)
|
||||
// remove symbols (except for -) in bookmarks
|
||||
r := regexp.MustCompile("[^A-Za-z0-9-]")
|
||||
bookmark = r.ReplaceAllString(bookmark, "")
|
||||
tocLine := fmt.Sprintf("%s- [%s](#%s)\n", indent, heading, bookmark)
|
||||
buffer.WriteString(tocLine)
|
||||
}
|
||||
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return []byte{}, err
|
||||
|
@ -22,54 +22,25 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_updateMacroBlock(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Lorem ipsum\ndolor sit amet\n",
|
||||
"Lorem ipsum\ndolor sit amet\n"},
|
||||
{"Lorem ipsum \n BEGIN\ndolor\nEND\nsit amet\n",
|
||||
"Lorem ipsum \n BEGIN\nfoo\n\nEND\nsit amet\n"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := updateMacroBlock([]byte(c.in), "BEGIN", "END", "foo\n")
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected '%v' but got '%v'", c.out, string(actual))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_updateMacroBlock_errors(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
}{
|
||||
{"BEGIN\n"},
|
||||
{"blah\nBEGIN\nblah"},
|
||||
{"END\n"},
|
||||
{"blah\nEND\nblah\n"},
|
||||
{"END\nBEGIN"},
|
||||
{"BEGIN\nEND\nEND"},
|
||||
{"BEGIN\nBEGIN\nEND"},
|
||||
{"BEGIN\nBEGIN\nEND\nEND"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
_, err := updateMacroBlock([]byte(c.in), "BEGIN", "END", "foo")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildTOC(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Lorem ipsum\ndolor sit amet\n", ""},
|
||||
{"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n",
|
||||
"- [Title](#title)\n - [Section Heading](#section-heading)\n"},
|
||||
{"", "\n"},
|
||||
{"Lorem ipsum\ndolor sit amet\n", "\n"},
|
||||
{
|
||||
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n",
|
||||
},
|
||||
{
|
||||
"# Title\nLorem ipsum \n## Section Heading\ndolor sit amet\n```bash\n#!/bin/sh\n```",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n",
|
||||
},
|
||||
{
|
||||
"# Title\nLorem ipsum \n## Section Heading\n### Ok, why doesn't this work? ...add 4 *more* `symbols`!\ndolor sit amet\n",
|
||||
"\n- [Title](#title)\n - [Section Heading](#section-heading)\n - [Ok, why doesn't this work? ...add 4 *more* `symbols`!](#ok-why-doesnt-this-work-add-4-more-symbols)\n",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := buildTOC([]byte(c.in))
|
||||
@ -86,13 +57,17 @@ func Test_updateTOC(t *testing.T) {
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Lorem ipsum\ndolor sit amet\n",
|
||||
"Lorem ipsum\ndolor sit amet\n"},
|
||||
{"# Title\nLorem ipsum \n**table of contents**\n<!-- BEGIN GENERATED TOC -->\nold cruft\n<!-- END GENERATED TOC -->\n## Section Heading\ndolor sit amet\n",
|
||||
"# Title\nLorem ipsum \n**table of contents**\n<!-- BEGIN GENERATED TOC -->\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n<!-- END GENERATED TOC -->\n## Section Heading\ndolor sit amet\n"},
|
||||
{
|
||||
"Lorem ipsum\ndolor sit amet\n",
|
||||
"Lorem ipsum\ndolor sit amet\n",
|
||||
},
|
||||
{
|
||||
"# Title\nLorem ipsum \n**table of contents**\n<!-- BEGIN MUNGE: GENERATED_TOC -->\nold cruft\n<!-- END MUNGE: GENERATED_TOC -->\n## Section Heading\ndolor sit amet\n",
|
||||
"# Title\nLorem ipsum \n**table of contents**\n<!-- BEGIN MUNGE: GENERATED_TOC -->\n\n- [Title](#title)\n - [Section Heading](#section-heading)\n\n<!-- END MUNGE: GENERATED_TOC -->\n## Section Heading\ndolor sit amet\n",
|
||||
},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := updateTOC([]byte(c.in))
|
||||
actual, err := updateTOC("filename.md", []byte(c.in))
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected TOC '%v' but got '%v'", c.out, string(actual))
|
||||
|
72
cmd/mungedocs/unversioned_warning.go
Normal file
72
cmd/mungedocs/unversioned_warning.go
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import "fmt"
|
||||
|
||||
const unversionedWarningTag = "UNVERSIONED_WARNING"
|
||||
|
||||
var beginUnversionedWarning = beginMungeTag(unversionedWarningTag)
|
||||
var endUnversionedWarning = endMungeTag(unversionedWarningTag)
|
||||
|
||||
const unversionedWarningFmt = `
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/%s).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
`
|
||||
|
||||
func makeUnversionedWarning(fileName string) string {
|
||||
return fmt.Sprintf(unversionedWarningFmt, fileName)
|
||||
}
|
||||
|
||||
// inserts/updates a warning for unversioned docs
|
||||
func updateUnversionedWarning(file string, markdown []byte) ([]byte, error) {
|
||||
lines := splitLines(markdown)
|
||||
if hasLine(lines, "<!-- TAG IS_VERSIONED -->") {
|
||||
// No warnings on release branches
|
||||
return markdown, nil
|
||||
}
|
||||
if !hasMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning) {
|
||||
lines = append([]string{beginUnversionedWarning, endUnversionedWarning}, lines...)
|
||||
}
|
||||
return updateMacroBlock(lines, beginUnversionedWarning, endUnversionedWarning, makeUnversionedWarning(file))
|
||||
}
|
64
cmd/mungedocs/unversioned_warning_test.go
Normal file
64
cmd/mungedocs/unversioned_warning_test.go
Normal file
@ -0,0 +1,64 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestUnversionedWarning(t *testing.T) {
|
||||
warningBlock := beginUnversionedWarning + "\n" + makeUnversionedWarning("filename.md") + "\n" + endUnversionedWarning + "\n"
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", warningBlock},
|
||||
{
|
||||
"Foo\nBar\n",
|
||||
warningBlock + "Foo\nBar\n",
|
||||
},
|
||||
{
|
||||
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
|
||||
"Foo\n<!-- TAG IS_VERSIONED -->\nBar",
|
||||
},
|
||||
{
|
||||
beginUnversionedWarning + "\n" + endUnversionedWarning + "\n",
|
||||
warningBlock,
|
||||
},
|
||||
{
|
||||
beginUnversionedWarning + "\n" + "something\n" + endUnversionedWarning + "\n",
|
||||
warningBlock,
|
||||
},
|
||||
{
|
||||
"Foo\n" + beginUnversionedWarning + "\n" + endUnversionedWarning + "\nBar\n",
|
||||
"Foo\n" + warningBlock + "Bar\n",
|
||||
},
|
||||
{
|
||||
"Foo\n" + warningBlock + "Bar\n",
|
||||
"Foo\n" + warningBlock + "Bar\n",
|
||||
},
|
||||
}
|
||||
for i, c := range cases {
|
||||
actual, err := updateUnversionedWarning("filename.md", []byte(c.in))
|
||||
assert.NoError(t, err)
|
||||
if string(actual) != c.out {
|
||||
t.Errorf("case[%d]: expected %q got %q", i, c.out, string(actual))
|
||||
}
|
||||
}
|
||||
}
|
183
cmd/mungedocs/util.go
Normal file
183
cmd/mungedocs/util.go
Normal file
@ -0,0 +1,183 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Splits a document up into a slice of lines.
|
||||
func splitLines(document []byte) []string {
|
||||
lines := strings.Split(string(document), "\n")
|
||||
// Skip trailing empty string from Split-ing
|
||||
if len(lines) > 0 && lines[len(lines)-1] == "" {
|
||||
lines = lines[:len(lines)-1]
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// Replaces the text between matching "beginMark" and "endMark" within the
|
||||
// document represented by "lines" with "insertThis".
|
||||
//
|
||||
// Delimiters should occupy own line.
|
||||
// Returns copy of document with modifications.
|
||||
func updateMacroBlock(lines []string, beginMark, endMark, insertThis string) ([]byte, error) {
|
||||
var buffer bytes.Buffer
|
||||
betweenBeginAndEnd := false
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
if trimmedLine == beginMark {
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found second begin mark while updating macro blocks")
|
||||
}
|
||||
betweenBeginAndEnd = true
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
} else if trimmedLine == endMark {
|
||||
if !betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("found end mark without being mark while updating macro blocks")
|
||||
}
|
||||
buffer.WriteString(insertThis)
|
||||
// Extra newline avoids github markdown bug where comment ends up on same line as last bullet.
|
||||
buffer.WriteString("\n")
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
betweenBeginAndEnd = false
|
||||
} else {
|
||||
if !betweenBeginAndEnd {
|
||||
buffer.WriteString(line)
|
||||
buffer.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
if betweenBeginAndEnd {
|
||||
return nil, fmt.Errorf("never found closing end mark while updating macro blocks")
|
||||
}
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Tests that a document, represented as a slice of lines, has a line. Ignores
|
||||
// leading and trailing space.
|
||||
func hasLine(lines []string, needle string) bool {
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
if trimmedLine == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Tests that a document, represented as a slice of lines, has a macro block.
|
||||
func hasMacroBlock(lines []string, begin string, end string) bool {
|
||||
foundBegin := false
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.Trim(line, " \n")
|
||||
switch {
|
||||
case !foundBegin && trimmedLine == begin:
|
||||
foundBegin = true
|
||||
case foundBegin && trimmedLine == end:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the canonical begin-tag for a given description. This does not
|
||||
// include the trailing newline.
|
||||
func beginMungeTag(desc string) string {
|
||||
return fmt.Sprintf("<!-- BEGIN MUNGE: %s -->", desc)
|
||||
}
|
||||
|
||||
// Returns the canonical end-tag for a given description. This does not
|
||||
// include the trailing newline.
|
||||
func endMungeTag(desc string) string {
|
||||
return fmt.Sprintf("<!-- END MUNGE: %s -->", desc)
|
||||
}
|
||||
|
||||
// Calls 'replace' for all sections of the document not in ``` / ``` blocks. So
|
||||
// that you don't have false positives inside those blocks.
|
||||
func replaceNonPreformatted(input []byte, replace func([]byte) []byte) []byte {
|
||||
f := splitByPreformatted(input)
|
||||
output := []byte(nil)
|
||||
for _, block := range f {
|
||||
if block.preformatted {
|
||||
output = append(output, block.data...)
|
||||
} else {
|
||||
output = append(output, replace(block.data)...)
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
type fileBlock struct {
|
||||
preformatted bool
|
||||
data []byte
|
||||
}
|
||||
|
||||
type fileBlocks []fileBlock
|
||||
|
||||
var (
|
||||
// Finds all preformatted block start/stops.
|
||||
preformatRE = regexp.MustCompile("^\\s*```")
|
||||
notPreformatRE = regexp.MustCompile("^\\s*```.*```")
|
||||
)
|
||||
|
||||
func splitByPreformatted(input []byte) fileBlocks {
|
||||
f := fileBlocks{}
|
||||
|
||||
cur := []byte(nil)
|
||||
preformatted := false
|
||||
// SplitAfter keeps the newline, so you don't have to worry about
|
||||
// omitting it on the last line or anything. Also, the documentation
|
||||
// claims it's unicode safe.
|
||||
for _, line := range bytes.SplitAfter(input, []byte("\n")) {
|
||||
if !preformatted {
|
||||
if preformatRE.Match(line) && !notPreformatRE.Match(line) {
|
||||
if len(cur) > 0 {
|
||||
f = append(f, fileBlock{false, cur})
|
||||
}
|
||||
cur = []byte{}
|
||||
preformatted = true
|
||||
}
|
||||
cur = append(cur, line...)
|
||||
} else {
|
||||
cur = append(cur, line...)
|
||||
if preformatRE.Match(line) {
|
||||
if len(cur) > 0 {
|
||||
f = append(f, fileBlock{true, cur})
|
||||
}
|
||||
cur = []byte{}
|
||||
preformatted = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(cur) > 0 {
|
||||
f = append(f, fileBlock{preformatted, cur})
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// As above, but further uses exp to parse the non-preformatted sections.
|
||||
func replaceNonPreformattedRegexp(input []byte, exp *regexp.Regexp, replace func([]byte) []byte) []byte {
|
||||
return replaceNonPreformatted(input, func(in []byte) []byte {
|
||||
return exp.ReplaceAllFunc(in, replace)
|
||||
})
|
||||
}
|
179
cmd/mungedocs/util_test.go
Normal file
179
cmd/mungedocs/util_test.go
Normal file
@ -0,0 +1,179 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_updateMacroBlock(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"Lorem ipsum\ndolor sit amet\n",
|
||||
"Lorem ipsum\ndolor sit amet\n"},
|
||||
{"Lorem ipsum \n BEGIN\ndolor\nEND\nsit amet\n",
|
||||
"Lorem ipsum \n BEGIN\nfoo\n\nEND\nsit amet\n"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
actual, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo\n")
|
||||
assert.NoError(t, err)
|
||||
if c.out != string(actual) {
|
||||
t.Errorf("Expected '%v' but got '%v'", c.out, string(actual))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_updateMacroBlock_errors(t *testing.T) {
|
||||
var cases = []struct {
|
||||
in string
|
||||
}{
|
||||
{"BEGIN\n"},
|
||||
{"blah\nBEGIN\nblah"},
|
||||
{"END\n"},
|
||||
{"blah\nEND\nblah\n"},
|
||||
{"END\nBEGIN"},
|
||||
{"BEGIN\nEND\nEND"},
|
||||
{"BEGIN\nBEGIN\nEND"},
|
||||
{"BEGIN\nBEGIN\nEND\nEND"},
|
||||
}
|
||||
for _, c := range cases {
|
||||
_, err := updateMacroBlock(splitLines([]byte(c.in)), "BEGIN", "END", "foo")
|
||||
assert.Error(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasLine(t *testing.T) {
|
||||
cases := []struct {
|
||||
lines []string
|
||||
needle string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"abc", "def", "ghi"}, "abc", true},
|
||||
{[]string{" abc", "def", "ghi"}, "abc", true},
|
||||
{[]string{"abc ", "def", "ghi"}, "abc", true},
|
||||
{[]string{"\n abc", "def", "ghi"}, "abc", true},
|
||||
{[]string{"abc \n", "def", "ghi"}, "abc", true},
|
||||
{[]string{"abc", "def", "ghi"}, "def", true},
|
||||
{[]string{"abc", "def", "ghi"}, "ghi", true},
|
||||
{[]string{"abc", "def", "ghi"}, "xyz", false},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
if hasLine(c.lines, c.needle) != c.expected {
|
||||
t.Errorf("case[%d]: %q, expected %t, got %t", i, c.needle, c.expected, !c.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasMacroBlock(t *testing.T) {
|
||||
cases := []struct {
|
||||
lines []string
|
||||
begin string
|
||||
end string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"<<<", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", "abc", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", "<<<", "abc", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", "abc", ">>>", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<", ">>>", "<<<", ">>>"}, "<<<", ">>>", true},
|
||||
{[]string{"<<<"}, "<<<", ">>>", false},
|
||||
{[]string{">>>"}, "<<<", ">>>", false},
|
||||
{[]string{"<<<", "abc"}, "<<<", ">>>", false},
|
||||
{[]string{"abc", ">>>"}, "<<<", ">>>", false},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
if hasMacroBlock(c.lines, c.begin, c.end) != c.expected {
|
||||
t.Errorf("case[%d]: %q,%q, expected %t, got %t", i, c.begin, c.end, c.expected, !c.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceNonPreformatted(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"aoeu", ""},
|
||||
{"aoeu\n```\naoeu\n```\naoeu", "```\naoeu\n```\n"},
|
||||
{"ao\neu\n```\naoeu\n\n\n", "```\naoeu\n\n\n"},
|
||||
{"aoeu ```aoeu``` aoeu", ""},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
out := string(replaceNonPreformatted([]byte(c.in), func([]byte) []byte { return nil }))
|
||||
if out != c.out {
|
||||
t.Errorf("%v: got %q, wanted %q", i, out, c.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceNonPreformattedNoChange(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
}{
|
||||
{"aoeu"},
|
||||
{"aoeu\n```\naoeu\n```\naoeu"},
|
||||
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu"},
|
||||
{"ao\neu\n```\naoeu\n\n\n"},
|
||||
{"aoeu ```aoeu``` aoeu"},
|
||||
{"aoeu\n```\naoeu\n```"},
|
||||
{"aoeu\n```\naoeu\n```\n"},
|
||||
{"aoeu\n```\naoeu\n```\n\n"},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
out := string(replaceNonPreformatted([]byte(c.in), func(in []byte) []byte { return in }))
|
||||
if out != c.in {
|
||||
t.Errorf("%v: got %q, wanted %q", i, out, c.in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceNonPreformattedCallOrder(t *testing.T) {
|
||||
cases := []struct {
|
||||
in string
|
||||
expect []string
|
||||
}{
|
||||
{"aoeu", []string{"aoeu"}},
|
||||
{"aoeu\n```\naoeu\n```\naoeu", []string{"aoeu\n", "aoeu"}},
|
||||
{"aoeu\n\n```\n\naoeu\n\n```\n\naoeu", []string{"aoeu\n\n", "\naoeu"}},
|
||||
{"ao\neu\n```\naoeu\n\n\n", []string{"ao\neu\n"}},
|
||||
{"aoeu ```aoeu``` aoeu", []string{"aoeu ```aoeu``` aoeu"}},
|
||||
{"aoeu\n```\naoeu\n```", []string{"aoeu\n"}},
|
||||
{"aoeu\n```\naoeu\n```\n", []string{"aoeu\n"}},
|
||||
{"aoeu\n```\naoeu\n```\n\n", []string{"aoeu\n", "\n"}},
|
||||
}
|
||||
|
||||
for i, c := range cases {
|
||||
got := []string{}
|
||||
replaceNonPreformatted([]byte(c.in), func(in []byte) []byte {
|
||||
got = append(got, string(in))
|
||||
return in
|
||||
})
|
||||
if e, a := c.expect, got; !reflect.DeepEqual(e, a) {
|
||||
t.Errorf("%v: got %q, wanted %q", i, a, e)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,54 +1,59 @@
|
||||
# Kubernetes Ansible
|
||||
|
||||
This playbook helps you to set up a Kubernetes cluster on machines where you
|
||||
can't or don't want to use the salt scripts and cluster up/down tools. They
|
||||
can be real hardware, VMs, things in a public cloud, etc.
|
||||
This playbook and set of roles set up a Kubernetes cluster onto machines. They
|
||||
can be real hardware, VMs, things in a public cloud, etc. Anything that you can connect to via SSH.
|
||||
|
||||
## Before starting
|
||||
|
||||
* Record the IP address/hostname of which machine you want to be your master (only support a single master)
|
||||
* Record the IP address/hostname of the machine you want to be your etcd server (often same as master, only one)
|
||||
* Record the IP addresses/hostname of the machines you want to be your nodes. (the master can also be a node)
|
||||
* Make sure your ansible running machine has ansible 1.9 and python-netaddr installed.
|
||||
|
||||
### Configure the inventory file
|
||||
## Setup
|
||||
|
||||
Stick the system information gathered above into the 'inventory' file.
|
||||
### Configure inventory
|
||||
|
||||
### Configure your cluster
|
||||
Add the system information gathered above into the 'inventory' file, or create a new inventory file for the cluster.
|
||||
|
||||
You will want to look though all of the options in `group_vars/all.yml` and
|
||||
set the variables to reflect your needs. The options should be described there
|
||||
### Configure Cluster options
|
||||
|
||||
Look though all of the options in `group_vars/all.yml` and
|
||||
set the variables to reflect your needs. The options are described there
|
||||
in full detail.
|
||||
|
||||
### Set up the actual kubernetes cluster
|
||||
## Running the playbook
|
||||
|
||||
Now run the setup:
|
||||
After going through the setup, run the setup script provided:
|
||||
|
||||
`$ ./setup.sh`
|
||||
|
||||
In generel this will work on very recent Fedora, rawhide or F21. Future work to
|
||||
support RHEL7, CentOS, and possible other distros should be forthcoming.
|
||||
|
||||
### You can just set up certain parts instead of doing it all
|
||||
|
||||
Only etcd:
|
||||
|
||||
`$ ./setup.sh --tags=etcd`
|
||||
|
||||
Only the kubernetes master:
|
||||
|
||||
`$ ./setup.sh --tags=masters`
|
||||
|
||||
Only the kubernetes nodes:
|
||||
|
||||
`$ ./setup.sh --tags=nodes`
|
||||
|
||||
### You may overwrite the inventory file by doing
|
||||
You may override the inventory file by doing:
|
||||
|
||||
`INVENTORY=myinventory ./setup.sh`
|
||||
|
||||
Only flannel:
|
||||
|
||||
$ ./setup.sh --tags=flannel
|
||||
In general this will work on very recent Fedora, rawhide or F21. Future work to
|
||||
support RHEL7, CentOS, and possible other distros should be forthcoming.
|
||||
|
||||
### Targeted runs
|
||||
|
||||
You can just setup certain parts instead of doing it all.
|
||||
|
||||
#### etcd
|
||||
|
||||
`$ ./setup.sh --tags=etcd`
|
||||
|
||||
#### Kubernetes master
|
||||
|
||||
`$ ./setup.sh --tags=masters`
|
||||
|
||||
#### kubernetes nodes
|
||||
|
||||
`$ ./setup.sh --tags=nodes`
|
||||
|
||||
### flannel
|
||||
|
||||
`$ ./setup.sh --tags=flannel`
|
||||
|
||||
[]()
|
||||
|
@ -1,3 +1,11 @@
|
||||
# This value determines how kubernetes binaries, config files, and service
|
||||
# files are loaded onto the target machines. The following are the only
|
||||
# valid options:
|
||||
#
|
||||
# localBuild - requires make release to have been run to build local binaries
|
||||
# packageManager - will install packages from your distribution using yum/dnf/apt
|
||||
source_type: localBuild
|
||||
|
||||
# will be used as the Internal dns domain name if DNS is enabled. Services
|
||||
# will be discoverable under <service-name>.<namespace>.svc.<domainname>, e.g.
|
||||
# myservice.default.svc.cluster.local
|
||||
@ -50,6 +58,9 @@ cluster_logging: true
|
||||
# Turn to false to disable cluster monitoring with heapster and influxdb
|
||||
cluster_monitoring: true
|
||||
|
||||
# Turn to false to disable the kube-ui addon for this cluster
|
||||
kube-ui: false
|
||||
|
||||
# Turn this varable to 'false' to disable whole DNS configuration.
|
||||
dns_setup: true
|
||||
# How many replicas in the Replication Controller
|
||||
|
@ -0,0 +1,5 @@
|
||||
[virt7-docker-common-candidate]
|
||||
name=virt7-docker-common-candidate
|
||||
baseurl=http://cbs.centos.org/repos/virt7-docker-common-candidate/x86_64/os/
|
||||
enabled=0
|
||||
gpgcheck=0
|
@ -1,3 +1,6 @@
|
||||
---
|
||||
- name: CentOS | Install Testing centos7 repo for new tool versions
|
||||
- name: CentOS | Install Testing centos7 repo
|
||||
copy: src=virt7-testing.repo dest=/etc/yum.repos.d/virt7-testing.repo
|
||||
|
||||
- name: CentOS | Install docker-common-candidate centos7 repo
|
||||
copy: src=virt7-docker-common-candidate.repo dest=/etc/yum.repos.d/virt7-docker-common-candidate.repo
|
||||
|
@ -27,6 +27,14 @@
|
||||
has_rpm: true
|
||||
when: s.stat.exists
|
||||
|
||||
- name: Init the has_firewalld fact
|
||||
set_fact:
|
||||
has_firewalld: false
|
||||
|
||||
- name: Init the has_iptables fact
|
||||
set_fact:
|
||||
has_iptables: false
|
||||
|
||||
# collect information about what packages are installed
|
||||
- include: rpm.yml
|
||||
when: has_rpm
|
||||
|
@ -5,10 +5,6 @@
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Init the has_firewalld fact
|
||||
set_fact:
|
||||
has_firewalld: false
|
||||
|
||||
- name: Set the has_firewalld fact
|
||||
set_fact:
|
||||
has_firewalld: true
|
||||
@ -20,10 +16,6 @@
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
|
||||
- name: Init the has_iptables fact
|
||||
set_fact:
|
||||
has_iptables: false
|
||||
|
||||
- name: Set the has_iptables fact
|
||||
set_fact:
|
||||
has_iptables: true
|
||||
|
10
contrib/ansible/roles/docker/tasks/debian-install.yml
Normal file
10
contrib/ansible/roles/docker/tasks/debian-install.yml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
- name: DEBIAN | Make sure this is stretch or sid, jessies does not have docker
|
||||
fail: msg="Docker.io only available in sid and stretch, https://wiki.debian.org/Docker"
|
||||
when: ansible_lsb.codename != "stretch" and ansible_lsb.codename != "sid"
|
||||
|
||||
- name: DEBIAN | Install Docker
|
||||
action: "{{ ansible_pkg_mgr }}"
|
||||
args:
|
||||
name: docker.io
|
||||
state: latest
|
@ -4,3 +4,4 @@
|
||||
args:
|
||||
name: docker
|
||||
state: latest
|
||||
when: not is_atomic
|
||||
|
@ -1,32 +1,51 @@
|
||||
---
|
||||
- include: debian-install.yml
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- include: generic-install.yml
|
||||
when: not is_atomic
|
||||
when: ansible_distribution != "Debian"
|
||||
|
||||
- name: Set docker config file directory
|
||||
set_fact:
|
||||
docker_config_dir: "/etc/sysconfig"
|
||||
|
||||
- name: Override docker config file directory for Debian
|
||||
set_fact:
|
||||
docker_config_dir: "/etc/default"
|
||||
when: ansible_distribution == "Debian"
|
||||
|
||||
- name: Verify docker config files exists
|
||||
file: path={{ docker_config_dir }}/{{ item }} state=touch
|
||||
changed_when: false
|
||||
with_items:
|
||||
- docker
|
||||
- docker-network
|
||||
|
||||
- name: Turn down docker logging
|
||||
lineinfile: dest=/etc/sysconfig/docker regexp=^OPTIONS= line=OPTIONS="--selinux-enabled --log-level=warn"
|
||||
lineinfile: dest={{ docker_config_dir }}/docker regexp=^OPTIONS= line=OPTIONS="--selinux-enabled --log-level=warn"
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Install http_proxy into docker-network
|
||||
lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTP_PROXY= line=HTTP_PROXY="{{ http_proxy }}"
|
||||
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^HTTP_PROXY= line=HTTP_PROXY="{{ http_proxy }}"
|
||||
when: http_proxy is defined
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Install https_proxy into docker-network
|
||||
lineinfile: dest=/etc/sysconfig/docker-network regexp=^HTTPS_PROXY= line=HTTPS_PROXY="{{ https_proxy }}"
|
||||
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^HTTPS_PROXY= line=HTTPS_PROXY="{{ https_proxy }}"
|
||||
when: https_proxy is defined
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Install no-proxy into docker-network
|
||||
lineinfile: dest=/etc/sysconfig/docker-network regexp=^NO_PROXY= line=NO_PROXY="{{ no_proxy }}"
|
||||
lineinfile: dest={{ docker_config_dir }}/docker-network regexp=^NO_PROXY= line=NO_PROXY="{{ no_proxy }}"
|
||||
when: no_proxy is defined
|
||||
notify:
|
||||
- restart docker
|
||||
|
||||
- name: Add any insecure registrys to docker config
|
||||
lineinfile: dest=/etc/sysconfig/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY='{% for reg in insecure_registrys %}--insecure-registry="{{ reg }}" {% endfor %}'
|
||||
lineinfile: dest={{ docker_config_dir }}/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY='{% for reg in insecure_registrys %}--insecure-registry="{{ reg }}" {% endfor %}'
|
||||
when: insecure_registrys is defined and insecure_registrys > 0
|
||||
notify:
|
||||
- restart docker
|
||||
|
15
contrib/ansible/roles/etcd/files/etcd.service
Normal file
15
contrib/ansible/roles/etcd/files/etcd.service
Normal file
@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=Etcd Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
WorkingDirectory=/var/lib/etcd/
|
||||
EnvironmentFile=-/etc/etcd/etcd.conf
|
||||
User=etcd
|
||||
ExecStart=/usr/bin/etcd
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user