Merge branch 'master' of github.com:asridharan/kubernetes into fix_docker_doc

This commit is contained in:
Avinash Sridharan
2015-07-30 14:30:08 -07:00
150 changed files with 4439 additions and 1440 deletions

4
Godeps/Godeps.json generated
View File

@@ -425,6 +425,10 @@
"ImportPath": "github.com/mitchellh/mapstructure", "ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf" "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
}, },
{
"ImportPath": "github.com/mxk/go-flowrate/flowrate",
"Rev": "cca7078d478f8520f85629ad7c68962d31ed7682"
},
{ {
"ImportPath": "github.com/onsi/ginkgo", "ImportPath": "github.com/onsi/ginkgo",
"Comment": "v1.2.0-6-gd981d36", "Comment": "v1.2.0-6-gd981d36",

View File

@@ -0,0 +1,267 @@
//
// Written by Maxim Khitrov (November 2012)
//
// Package flowrate provides the tools for monitoring and limiting the flow rate
// of an arbitrary data stream.
package flowrate
import (
"math"
"sync"
"time"
)
// Monitor monitors and limits the transfer rate of a data stream.
type Monitor struct {
mu sync.Mutex // Mutex guarding access to all internal fields
active bool // Flag indicating an active transfer
start time.Duration // Transfer start time (clock() value)
bytes int64 // Total number of bytes transferred
samples int64 // Total number of samples taken
rSample float64 // Most recent transfer rate sample (bytes per second)
rEMA float64 // Exponential moving average of rSample
rPeak float64 // Peak transfer rate (max of all rSamples)
rWindow float64 // rEMA window (seconds)
sBytes int64 // Number of bytes transferred since sLast
sLast time.Duration // Most recent sample time (stop time when inactive)
sRate time.Duration // Sampling rate
tBytes int64 // Number of bytes expected in the current transfer
tLast time.Duration // Time of the most recent transfer of at least 1 byte
}
// New creates a new flow control monitor. Instantaneous transfer rate is
// measured and updated for each sampleRate interval. windowSize determines the
// weight of each sample in the exponential moving average (EMA) calculation.
// The exact formulas are:
//
// sampleTime = currentTime - prevSampleTime
// sampleRate = byteCount / sampleTime
// weight = 1 - exp(-sampleTime/windowSize)
// newRate = weight*sampleRate + (1-weight)*oldRate
//
// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s,
// respectively.
func New(sampleRate, windowSize time.Duration) *Monitor {
if sampleRate = clockRound(sampleRate); sampleRate <= 0 {
sampleRate = 5 * clockRate
}
if windowSize <= 0 {
windowSize = 1 * time.Second
}
now := clock()
return &Monitor{
active: true,
start: now,
rWindow: windowSize.Seconds(),
sLast: now,
sRate: sampleRate,
tLast: now,
}
}
// Update records the transfer of n bytes and returns n. It should be called
// after each Read/Write operation, even if n is 0.
func (m *Monitor) Update(n int) int {
m.mu.Lock()
m.update(n)
m.mu.Unlock()
return n
}
// IO is a convenience method intended to wrap io.Reader and io.Writer method
// execution. It calls m.Update(n) and then returns (n, err) unmodified.
func (m *Monitor) IO(n int, err error) (int, error) {
return m.Update(n), err
}
// Done marks the transfer as finished and prevents any further updates or
// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and
// Limit methods become NOOPs. It returns the total number of bytes transferred.
func (m *Monitor) Done() int64 {
m.mu.Lock()
if now := m.update(0); m.sBytes > 0 {
m.reset(now)
}
m.active = false
m.tLast = 0
n := m.bytes
m.mu.Unlock()
return n
}
// timeRemLimit is the maximum Status.TimeRem value.
const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second
// Status represents the current Monitor status. All transfer rates are in bytes
// per second rounded to the nearest byte.
type Status struct {
Active bool // Flag indicating an active transfer
Start time.Time // Transfer start time
Duration time.Duration // Time period covered by the statistics
Idle time.Duration // Time since the last transfer of at least 1 byte
Bytes int64 // Total number of bytes transferred
Samples int64 // Total number of samples taken
InstRate int64 // Instantaneous transfer rate
CurRate int64 // Current transfer rate (EMA of InstRate)
AvgRate int64 // Average transfer rate (Bytes / Duration)
PeakRate int64 // Maximum instantaneous transfer rate
BytesRem int64 // Number of bytes remaining in the transfer
TimeRem time.Duration // Estimated time to completion
Progress Percent // Overall transfer progress
}
// Status returns current transfer status information. The returned value
// becomes static after a call to Done.
func (m *Monitor) Status() Status {
m.mu.Lock()
now := m.update(0)
s := Status{
Active: m.active,
Start: clockToTime(m.start),
Duration: m.sLast - m.start,
Idle: now - m.tLast,
Bytes: m.bytes,
Samples: m.samples,
PeakRate: round(m.rPeak),
BytesRem: m.tBytes - m.bytes,
Progress: percentOf(float64(m.bytes), float64(m.tBytes)),
}
if s.BytesRem < 0 {
s.BytesRem = 0
}
if s.Duration > 0 {
rAvg := float64(s.Bytes) / s.Duration.Seconds()
s.AvgRate = round(rAvg)
if s.Active {
s.InstRate = round(m.rSample)
s.CurRate = round(m.rEMA)
if s.BytesRem > 0 {
if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 {
ns := float64(s.BytesRem) / tRate * 1e9
if ns > float64(timeRemLimit) {
ns = float64(timeRemLimit)
}
s.TimeRem = clockRound(time.Duration(ns))
}
}
}
}
m.mu.Unlock()
return s
}
// Limit restricts the instantaneous (per-sample) data flow to rate bytes per
// second. It returns the maximum number of bytes (0 <= n <= want) that may be
// transferred immediately without exceeding the limit. If block == true, the
// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1,
// or the transfer is inactive (after a call to Done).
//
// At least one byte is always allowed to be transferred in any given sampling
// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate
// is 10 bytes per second.
//
// For usage examples, see the implementation of Reader and Writer in io.go.
func (m *Monitor) Limit(want int, rate int64, block bool) (n int) {
if want < 1 || rate < 1 {
return want
}
m.mu.Lock()
// Determine the maximum number of bytes that can be sent in one sample
limit := round(float64(rate) * m.sRate.Seconds())
if limit <= 0 {
limit = 1
}
// If block == true, wait until m.sBytes < limit
if now := m.update(0); block {
for m.sBytes >= limit && m.active {
now = m.waitNextSample(now)
}
}
// Make limit <= want (unlimited if the transfer is no longer active)
if limit -= m.sBytes; limit > int64(want) || !m.active {
limit = int64(want)
}
m.mu.Unlock()
if limit < 0 {
limit = 0
}
return int(limit)
}
// SetTransferSize specifies the total size of the data transfer, which allows
// the Monitor to calculate the overall progress and time to completion.
func (m *Monitor) SetTransferSize(bytes int64) {
if bytes < 0 {
bytes = 0
}
m.mu.Lock()
m.tBytes = bytes
m.mu.Unlock()
}
// update accumulates the transferred byte count for the current sample until
// clock() - m.sLast >= m.sRate. The monitor status is updated once the current
// sample is done.
func (m *Monitor) update(n int) (now time.Duration) {
if !m.active {
return
}
if now = clock(); n > 0 {
m.tLast = now
}
m.sBytes += int64(n)
if sTime := now - m.sLast; sTime >= m.sRate {
t := sTime.Seconds()
if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak {
m.rPeak = m.rSample
}
// Exponential moving average using a method similar to *nix load
// average calculation. Longer sampling periods carry greater weight.
if m.samples > 0 {
w := math.Exp(-t / m.rWindow)
m.rEMA = m.rSample + w*(m.rEMA-m.rSample)
} else {
m.rEMA = m.rSample
}
m.reset(now)
}
return
}
// reset clears the current sample state in preparation for the next sample.
func (m *Monitor) reset(sampleTime time.Duration) {
m.bytes += m.sBytes
m.samples++
m.sBytes = 0
m.sLast = sampleTime
}
// waitNextSample sleeps for the remainder of the current sample. The lock is
// released and reacquired during the actual sleep period, so it's possible for
// the transfer to be inactive when this method returns.
func (m *Monitor) waitNextSample(now time.Duration) time.Duration {
const minWait = 5 * time.Millisecond
current := m.sLast
// sleep until the last sample time changes (ideally, just one iteration)
for m.sLast == current && m.active {
d := current + m.sRate - now
m.mu.Unlock()
if d < minWait {
d = minWait
}
time.Sleep(d)
m.mu.Lock()
now = m.update(0)
}
return now
}

View File

@@ -0,0 +1,133 @@
//
// Written by Maxim Khitrov (November 2012)
//
package flowrate
import (
"errors"
"io"
)
// ErrLimit is returned by the Writer when a non-blocking write is short due to
// the transfer rate limit.
var ErrLimit = errors.New("flowrate: flow rate limit exceeded")
// Limiter is implemented by the Reader and Writer to provide a consistent
// interface for monitoring and controlling data transfer.
type Limiter interface {
Done() int64
Status() Status
SetTransferSize(bytes int64)
SetLimit(new int64) (old int64)
SetBlocking(new bool) (old bool)
}
// Reader implements io.ReadCloser with a restriction on the rate of data
// transfer.
type Reader struct {
io.Reader // Data source
*Monitor // Flow control monitor
limit int64 // Rate limit in bytes per second (unlimited when <= 0)
block bool // What to do when no new bytes can be read due to the limit
}
// NewReader restricts all Read operations on r to limit bytes per second.
func NewReader(r io.Reader, limit int64) *Reader {
return &Reader{r, New(0, 0), limit, true}
}
// Read reads up to len(p) bytes into p without exceeding the current transfer
// rate limit. It returns (0, nil) immediately if r is non-blocking and no new
// bytes can be read at this time.
func (r *Reader) Read(p []byte) (n int, err error) {
p = p[:r.Limit(len(p), r.limit, r.block)]
if len(p) > 0 {
n, err = r.IO(r.Reader.Read(p))
}
return
}
// SetLimit changes the transfer rate limit to new bytes per second and returns
// the previous setting.
func (r *Reader) SetLimit(new int64) (old int64) {
old, r.limit = r.limit, new
return
}
// SetBlocking changes the blocking behavior and returns the previous setting. A
// Read call on a non-blocking reader returns immediately if no additional bytes
// may be read at this time due to the rate limit.
func (r *Reader) SetBlocking(new bool) (old bool) {
old, r.block = r.block, new
return
}
// Close closes the underlying reader if it implements the io.Closer interface.
func (r *Reader) Close() error {
defer r.Done()
if c, ok := r.Reader.(io.Closer); ok {
return c.Close()
}
return nil
}
// Writer implements io.WriteCloser with a restriction on the rate of data
// transfer.
type Writer struct {
io.Writer // Data destination
*Monitor // Flow control monitor
limit int64 // Rate limit in bytes per second (unlimited when <= 0)
block bool // What to do when no new bytes can be written due to the limit
}
// NewWriter restricts all Write operations on w to limit bytes per second. The
// transfer rate and the default blocking behavior (true) can be changed
// directly on the returned *Writer.
func NewWriter(w io.Writer, limit int64) *Writer {
return &Writer{w, New(0, 0), limit, true}
}
// Write writes len(p) bytes from p to the underlying data stream without
// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is
// non-blocking and no additional bytes can be written at this time.
func (w *Writer) Write(p []byte) (n int, err error) {
var c int
for len(p) > 0 && err == nil {
s := p[:w.Limit(len(p), w.limit, w.block)]
if len(s) > 0 {
c, err = w.IO(w.Writer.Write(s))
} else {
return n, ErrLimit
}
p = p[c:]
n += c
}
return
}
// SetLimit changes the transfer rate limit to new bytes per second and returns
// the previous setting.
func (w *Writer) SetLimit(new int64) (old int64) {
old, w.limit = w.limit, new
return
}
// SetBlocking changes the blocking behavior and returns the previous setting. A
// Write call on a non-blocking writer returns as soon as no additional bytes
// may be written at this time due to the rate limit.
func (w *Writer) SetBlocking(new bool) (old bool) {
old, w.block = w.block, new
return
}
// Close closes the underlying writer if it implements the io.Closer interface.
func (w *Writer) Close() error {
defer w.Done()
if c, ok := w.Writer.(io.Closer); ok {
return c.Close()
}
return nil
}

View File

@@ -0,0 +1,146 @@
//
// Written by Maxim Khitrov (November 2012)
//
package flowrate
import (
"bytes"
"reflect"
"testing"
"time"
)
const (
_50ms = 50 * time.Millisecond
_100ms = 100 * time.Millisecond
_200ms = 200 * time.Millisecond
_300ms = 300 * time.Millisecond
_400ms = 400 * time.Millisecond
_500ms = 500 * time.Millisecond
)
func nextStatus(m *Monitor) Status {
samples := m.samples
for i := 0; i < 30; i++ {
if s := m.Status(); s.Samples != samples {
return s
}
time.Sleep(5 * time.Millisecond)
}
return m.Status()
}
func TestReader(t *testing.T) {
in := make([]byte, 100)
for i := range in {
in[i] = byte(i)
}
b := make([]byte, 100)
r := NewReader(bytes.NewReader(in), 100)
start := time.Now()
// Make sure r implements Limiter
_ = Limiter(r)
// 1st read of 10 bytes is performed immediately
if n, err := r.Read(b); n != 10 || err != nil {
t.Fatalf("r.Read(b) expected 10 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt > _50ms {
t.Fatalf("r.Read(b) took too long (%v)", rt)
}
// No new Reads allowed in the current sample
r.SetBlocking(false)
if n, err := r.Read(b); n != 0 || err != nil {
t.Fatalf("r.Read(b) expected 0 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt > _50ms {
t.Fatalf("r.Read(b) took too long (%v)", rt)
}
status := [6]Status{0: r.Status()} // No samples in the first status
// 2nd read of 10 bytes blocks until the next sample
r.SetBlocking(true)
if n, err := r.Read(b[10:]); n != 10 || err != nil {
t.Fatalf("r.Read(b[10:]) expected 10 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt < _100ms {
t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt)
}
status[1] = r.Status() // 1st sample
status[2] = nextStatus(r.Monitor) // 2nd sample
status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample
if n := r.Done(); n != 20 {
t.Fatalf("r.Done() expected 20; got %v", n)
}
status[4] = r.Status()
status[5] = nextStatus(r.Monitor) // Timeout
start = status[0].Start
// Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress
want := []Status{
Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0},
Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0},
Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0},
Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0},
}
for i, s := range status {
if !reflect.DeepEqual(&s, &want[i]) {
t.Errorf("r.Status(%v) expected %v; got %v", i, want[i], s)
}
}
if !bytes.Equal(b[:20], in[:20]) {
t.Errorf("r.Read() input doesn't match output")
}
}
func TestWriter(t *testing.T) {
b := make([]byte, 100)
for i := range b {
b[i] = byte(i)
}
w := NewWriter(&bytes.Buffer{}, 200)
start := time.Now()
// Make sure w implements Limiter
_ = Limiter(w)
// Non-blocking 20-byte write for the first sample returns ErrLimit
w.SetBlocking(false)
if n, err := w.Write(b); n != 20 || err != ErrLimit {
t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err)
} else if rt := time.Since(start); rt > _50ms {
t.Fatalf("w.Write(b) took too long (%v)", rt)
}
// Blocking 80-byte write
w.SetBlocking(true)
if n, err := w.Write(b[20:]); n != 80 || err != nil {
t.Fatalf("w.Write(b[20:]) expected 80 (<nil>); got %v (%v)", n, err)
} else if rt := time.Since(start); rt < _400ms {
t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt)
}
w.SetTransferSize(100)
status := []Status{w.Status(), nextStatus(w.Monitor)}
start = status[0].Start
// Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress
want := []Status{
Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000},
Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000},
}
for i, s := range status {
if !reflect.DeepEqual(&s, &want[i]) {
t.Errorf("w.Status(%v) expected %v; got %v", i, want[i], s)
}
}
if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) {
t.Errorf("w.Write() input doesn't match output")
}
}

View File

@@ -0,0 +1,67 @@
//
// Written by Maxim Khitrov (November 2012)
//
package flowrate
import (
"math"
"strconv"
"time"
)
// clockRate is the resolution and precision of clock().
const clockRate = 20 * time.Millisecond
// czero is the process start time rounded down to the nearest clockRate
// increment.
var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate
// clock returns a low resolution timestamp relative to the process start time.
func clock() time.Duration {
return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero
}
// clockToTime converts a clock() timestamp to an absolute time.Time value.
func clockToTime(c time.Duration) time.Time {
return time.Unix(0, int64(czero+c))
}
// clockRound returns d rounded to the nearest clockRate increment.
func clockRound(d time.Duration) time.Duration {
return (d + clockRate>>1) / clockRate * clockRate
}
// round returns x rounded to the nearest int64 (non-negative values only).
func round(x float64) int64 {
if _, frac := math.Modf(x); frac >= 0.5 {
return int64(math.Ceil(x))
}
return int64(math.Floor(x))
}
// Percent represents a percentage in increments of 1/1000th of a percent.
type Percent uint32
// percentOf calculates what percent of the total is x.
func percentOf(x, total float64) Percent {
if x < 0 || total <= 0 {
return 0
} else if p := round(x / total * 1e5); p <= math.MaxUint32 {
return Percent(p)
}
return Percent(math.MaxUint32)
}
func (p Percent) Float() float64 {
return float64(p) * 1e-3
}
func (p Percent) String() string {
var buf [12]byte
b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10)
n := len(b)
b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10)
b[n] = '.'
return string(append(b, '%'))
}

View File

@@ -5706,6 +5706,54 @@
"summary": "connect GET requests to exec of Pod", "summary": "connect GET requests to exec of Pod",
"nickname": "connectGetNamespacedPodExec", "nickname": "connectGetNamespacedPodExec",
"parameters": [ "parameters": [
{
"type": "boolean",
"paramType": "query",
"name": "stdin",
"description": "redirect the standard input stream of the pod for this call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stdout",
"description": "redirect the standard output stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stderr",
"description": "redirect the standard error stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "tty",
"description": "allocate a terminal for this exec call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "query",
"name": "container",
"description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.",
"required": false,
"allowMultiple": false
},
{
"type": "",
"paramType": "query",
"name": "command",
"description": "the command to execute; argv array; not executed within a shell",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -5736,6 +5784,54 @@
"summary": "connect POST requests to exec of Pod", "summary": "connect POST requests to exec of Pod",
"nickname": "connectPostNamespacedPodExec", "nickname": "connectPostNamespacedPodExec",
"parameters": [ "parameters": [
{
"type": "boolean",
"paramType": "query",
"name": "stdin",
"description": "redirect the standard input stream of the pod for this call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stdout",
"description": "redirect the standard output stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "stderr",
"description": "redirect the standard error stream of the pod for this call; defaults to true",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "tty",
"description": "allocate a terminal for this exec call; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "string",
"paramType": "query",
"name": "container",
"description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.",
"required": false,
"allowMultiple": false
},
{
"type": "",
"paramType": "query",
"name": "command",
"description": "the command to execute; argv array; not executed within a shell",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -5780,6 +5876,30 @@
"required": false, "required": false,
"allowMultiple": false "allowMultiple": false
}, },
{
"type": "string",
"paramType": "query",
"name": "container",
"description": "the container for which to stream logs; defaults to only container if there is one container in the pod",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "follow",
"description": "follow the log stream of the pod; defaults to false",
"required": false,
"allowMultiple": false
},
{
"type": "boolean",
"paramType": "query",
"name": "previous",
"description": "return previous terminated container logs; defaults to false",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -5889,6 +6009,14 @@
"summary": "connect GET requests to proxy of Pod", "summary": "connect GET requests to proxy of Pod",
"nickname": "connectGetNamespacedPodProxy", "nickname": "connectGetNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -5919,6 +6047,14 @@
"summary": "connect POST requests to proxy of Pod", "summary": "connect POST requests to proxy of Pod",
"nickname": "connectPostNamespacedPodProxy", "nickname": "connectPostNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -5949,6 +6085,14 @@
"summary": "connect PUT requests to proxy of Pod", "summary": "connect PUT requests to proxy of Pod",
"nickname": "connectPutNamespacedPodProxy", "nickname": "connectPutNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -5979,6 +6123,14 @@
"summary": "connect DELETE requests to proxy of Pod", "summary": "connect DELETE requests to proxy of Pod",
"nickname": "connectDeleteNamespacedPodProxy", "nickname": "connectDeleteNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6009,6 +6161,14 @@
"summary": "connect HEAD requests to proxy of Pod", "summary": "connect HEAD requests to proxy of Pod",
"nickname": "connectHeadNamespacedPodProxy", "nickname": "connectHeadNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6039,6 +6199,14 @@
"summary": "connect OPTIONS requests to proxy of Pod", "summary": "connect OPTIONS requests to proxy of Pod",
"nickname": "connectOptionsNamespacedPodProxy", "nickname": "connectOptionsNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6075,6 +6243,14 @@
"summary": "connect GET requests to proxy of Pod", "summary": "connect GET requests to proxy of Pod",
"nickname": "connectGetNamespacedPodProxy", "nickname": "connectGetNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6113,6 +6289,14 @@
"summary": "connect POST requests to proxy of Pod", "summary": "connect POST requests to proxy of Pod",
"nickname": "connectPostNamespacedPodProxy", "nickname": "connectPostNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6151,6 +6335,14 @@
"summary": "connect PUT requests to proxy of Pod", "summary": "connect PUT requests to proxy of Pod",
"nickname": "connectPutNamespacedPodProxy", "nickname": "connectPutNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6189,6 +6381,14 @@
"summary": "connect DELETE requests to proxy of Pod", "summary": "connect DELETE requests to proxy of Pod",
"nickname": "connectDeleteNamespacedPodProxy", "nickname": "connectDeleteNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6227,6 +6427,14 @@
"summary": "connect HEAD requests to proxy of Pod", "summary": "connect HEAD requests to proxy of Pod",
"nickname": "connectHeadNamespacedPodProxy", "nickname": "connectHeadNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",
@@ -6265,6 +6473,14 @@
"summary": "connect OPTIONS requests to proxy of Pod", "summary": "connect OPTIONS requests to proxy of Pod",
"nickname": "connectOptionsNamespacedPodProxy", "nickname": "connectOptionsNamespacedPodProxy",
"parameters": [ "parameters": [
{
"type": "string",
"paramType": "query",
"name": "path",
"description": "URL path to use in proxy request to pod",
"required": false,
"allowMultiple": false
},
{ {
"type": "string", "type": "string",
"paramType": "path", "paramType": "path",

View File

@@ -12,7 +12,7 @@ the system will bring them back to the original state, in particular:
On the cluster, the add-ons are kept in ```/etc/kubernetes/addons``` on the master node, in yaml files On the cluster, the add-ons are kept in ```/etc/kubernetes/addons``` on the master node, in yaml files
(json is not supported at the moment). A system daemon periodically checks if (json is not supported at the moment). A system daemon periodically checks if
the contents of this directory is consistent with the add-one objects on the API the contents of this directory is consistent with the add-on objects on the API
server. If any difference is spotted, the system updates the API objects server. If any difference is spotted, the system updates the API objects
accordingly. (Limitation: for now, the system compares only the names of objects accordingly. (Limitation: for now, the system compares only the names of objects
in the directory and on the API server. So changes in parameters may not be in the directory and on the API server. So changes in parameters may not be

View File

@@ -1,26 +1,26 @@
apiVersion: v1 apiVersion: v1
kind: ReplicationController kind: ReplicationController
metadata: metadata:
name: monitoring-heapster-v5 name: monitoring-heapster-v6
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
replicas: 1 replicas: 1
selector: selector:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
template: template:
metadata: metadata:
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
containers: containers:
- image: gcr.io/google_containers/heapster:v0.16.0 - image: gcr.io/google_containers/heapster:v0.17.0
name: heapster name: heapster
resources: resources:
limits: limits:
@@ -33,11 +33,3 @@ spec:
- --sink=gcl - --sink=gcl
- --poll_duration=2m - --poll_duration=2m
- --stats_resolution=1m - --stats_resolution=1m
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"

View File

@@ -1,26 +1,26 @@
apiVersion: v1 apiVersion: v1
kind: ReplicationController kind: ReplicationController
metadata: metadata:
name: monitoring-heapster-v5 name: monitoring-heapster-v6
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
replicas: 1 replicas: 1
selector: selector:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
template: template:
metadata: metadata:
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
containers: containers:
- image: gcr.io/google_containers/heapster:v0.16.0 - image: gcr.io/google_containers/heapster:v0.17.0
name: heapster name: heapster
resources: resources:
limits: limits:
@@ -33,11 +33,3 @@ spec:
- --sink=influxdb:http://monitoring-influxdb:8086 - --sink=influxdb:http://monitoring-influxdb:8086
- --poll_duration=2m - --poll_duration=2m
- --stats_resolution=1m - --stats_resolution=1m
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"

View File

@@ -1,26 +1,26 @@
apiVersion: v1 apiVersion: v1
kind: ReplicationController kind: ReplicationController
metadata: metadata:
name: monitoring-heapster-v5 name: monitoring-heapster-v6
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
replicas: 1 replicas: 1
selector: selector:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
template: template:
metadata: metadata:
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
containers: containers:
- image: gcr.io/google_containers/heapster:v0.16.0 - image: gcr.io/google_containers/heapster:v0.17.0
name: heapster name: heapster
resources: resources:
limits: limits:

View File

@@ -1,26 +1,26 @@
apiVersion: v1 apiVersion: v1
kind: ReplicationController kind: ReplicationController
metadata: metadata:
name: monitoring-heapster-v5 name: monitoring-heapster-v6
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
replicas: 1 replicas: 1
selector: selector:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
template: template:
metadata: metadata:
labels: labels:
k8s-app: heapster k8s-app: heapster
version: v5 version: v6
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
containers: containers:
- image: gcr.io/google_containers/heapster:v0.16.0 - image: gcr.io/google_containers/heapster:v0.17.0
name: heapster name: heapster
resources: resources:
limits: limits:

View File

@@ -1,6 +1,6 @@
# build the hyperkube image. # build the hyperkube image.
VERSION=v0.18.2 VERSION=v1.0.1
all: all:
cp ../../saltbase/salt/helpers/safe_format_and_mount . cp ../../saltbase/salt/helpers/safe_format_and_mount .

View File

@@ -39,6 +39,8 @@ export FLANNEL_OPTS=${FLANNEL_OPTS:-"Network": 172.16.0.0/16}
# Admission Controllers to invoke prior to persisting objects in cluster # Admission Controllers to invoke prior to persisting objects in cluster
export ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny export ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny
SERVICE_NODE_PORT_RANGE=${SERVICE_NODE_PORT_RANGE:-"30000-32767"}
# Optional: Enable node logging. # Optional: Enable node logging.
ENABLE_NODE_LOGGING=false ENABLE_NODE_LOGGING=false
LOGGING_DESTINATION=${LOGGING_DESTINATION:-elasticsearch} LOGGING_DESTINATION=${LOGGING_DESTINATION:-elasticsearch}

View File

@@ -202,6 +202,7 @@ KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \
--logtostderr=true \ --logtostderr=true \
--service-cluster-ip-range=${1} \ --service-cluster-ip-range=${1} \
--admission-control=${2} \ --admission-control=${2} \
--service-node-port-range=${3} \
--client-ca-file=/srv/kubernetes/ca.crt \ --client-ca-file=/srv/kubernetes/ca.crt \
--tls-cert-file=/srv/kubernetes/server.cert \ --tls-cert-file=/srv/kubernetes/server.cert \
--tls-private-key-file=/srv/kubernetes/server.key" --tls-private-key-file=/srv/kubernetes/server.key"
@@ -371,7 +372,7 @@ function provision-master() {
~/kube/make-ca-cert ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \ ~/kube/make-ca-cert ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \
setClusterInfo; \ setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \ create-kube-scheduler-opts; \
create-flanneld-opts; \ create-flanneld-opts; \
@@ -413,7 +414,7 @@ function provision-masterandminion() {
ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \
setClusterInfo; \ setClusterInfo; \
create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \
create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \ create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \
create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \
create-kube-scheduler-opts; \ create-kube-scheduler-opts; \
create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}"; create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";

View File

@@ -17,13 +17,16 @@ limitations under the License.
package main package main
import ( import (
"fmt"
"io" "io"
"os" "os"
"path"
"runtime" "runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1"
pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog" "github.com/golang/glog"
flag "github.com/spf13/pflag" flag "github.com/spf13/pflag"
@@ -50,7 +53,9 @@ func main() {
funcOut = file funcOut = file
} }
generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw()) generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", *version))
apiShort := generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api")
generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource")
// TODO(wojtek-t): Change the overwrites to a flag. // TODO(wojtek-t): Change the overwrites to a flag.
generator.OverwritePackage(*version, "") generator.OverwritePackage(*version, "")
for _, knownType := range api.Scheme.KnownTypes(*version) { for _, knownType := range api.Scheme.KnownTypes(*version) {
@@ -58,10 +63,14 @@ func main() {
glog.Errorf("error while generating conversion functions for %v: %v", knownType, err) glog.Errorf("error while generating conversion functions for %v: %v", knownType, err)
} }
} }
generator.RepackImports(util.NewStringSet())
if err := generator.WriteImports(funcOut); err != nil {
glog.Fatalf("error while writing imports: %v", err)
}
if err := generator.WriteConversionFunctions(funcOut); err != nil { if err := generator.WriteConversionFunctions(funcOut); err != nil {
glog.Fatalf("Error while writing conversion functions: %v", err) glog.Fatalf("Error while writing conversion functions: %v", err)
} }
if err := generator.RegisterConversionFunctions(funcOut); err != nil { if err := generator.RegisterConversionFunctions(funcOut, fmt.Sprintf("%s.Scheme", apiShort)); err != nil {
glog.Fatalf("Error while writing conversion functions: %v", err) glog.Fatalf("Error while writing conversion functions: %v", err)
} }
} }

View File

@@ -19,12 +19,14 @@ package main
import ( import (
"io" "io"
"os" "os"
"path"
"runtime" "runtime"
"strings" "strings"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
_ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1"
pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog" "github.com/golang/glog"
flag "github.com/spf13/pflag" flag "github.com/spf13/pflag"
@@ -53,10 +55,14 @@ func main() {
} }
knownVersion := *version knownVersion := *version
registerTo := "api.Scheme"
if knownVersion == "api" { if knownVersion == "api" {
knownVersion = api.Scheme.Raw().InternalVersion knownVersion = api.Scheme.Raw().InternalVersion
registerTo = "Scheme"
} }
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw()) pkgPath := path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", knownVersion)
generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), pkgPath, util.NewStringSet("github.com/GoogleCloudPlatform/kubernetes"))
generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api")
for _, overwrite := range strings.Split(*overwrites, ",") { for _, overwrite := range strings.Split(*overwrites, ",") {
vals := strings.Split(overwrite, "=") vals := strings.Split(overwrite, "=")
@@ -67,13 +73,14 @@ func main() {
glog.Errorf("error while generating deep copy functions for %v: %v", knownType, err) glog.Errorf("error while generating deep copy functions for %v: %v", knownType, err)
} }
} }
if err := generator.WriteImports(funcOut, *version); err != nil { generator.RepackImports()
if err := generator.WriteImports(funcOut); err != nil {
glog.Fatalf("error while writing imports: %v", err) glog.Fatalf("error while writing imports: %v", err)
} }
if err := generator.WriteDeepCopyFunctions(funcOut); err != nil { if err := generator.WriteDeepCopyFunctions(funcOut); err != nil {
glog.Fatalf("error while writing deep copy functions: %v", err) glog.Fatalf("error while writing deep copy functions: %v", err)
} }
if err := generator.RegisterDeepCopyFunctions(funcOut, *version); err != nil { if err := generator.RegisterDeepCopyFunctions(funcOut, registerTo); err != nil {
glog.Fatalf("error while registering deep copy functions: %v", err) glog.Fatalf("error while registering deep copy functions: %v", err)
} }
} }

View File

@@ -101,6 +101,7 @@ type APIServer struct {
LongRunningRequestRE string LongRunningRequestRE string
SSHUser string SSHUser string
SSHKeyfile string SSHKeyfile string
MaxConnectionBytesPerSec int64
} }
// NewAPIServer creates a new APIServer object with default parameters // NewAPIServer creates a new APIServer object with default parameters
@@ -205,6 +206,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.LongRunningRequestRE, "long-running-request-regexp", defaultLongRunningRequestRE, "A regular expression matching long running requests which should be excluded from maximum inflight request handling.") fs.StringVar(&s.LongRunningRequestRE, "long-running-request-regexp", defaultLongRunningRequestRE, "A regular expression matching long running requests which should be excluded from maximum inflight request handling.")
fs.StringVar(&s.SSHUser, "ssh-user", "", "If non-empty, use secure SSH proxy to the nodes, using this user name") fs.StringVar(&s.SSHUser, "ssh-user", "", "If non-empty, use secure SSH proxy to the nodes, using this user name")
fs.StringVar(&s.SSHKeyfile, "ssh-keyfile", "", "If non-empty, use secure SSH proxy to the nodes, using this user keyfile") fs.StringVar(&s.SSHKeyfile, "ssh-keyfile", "", "If non-empty, use secure SSH proxy to the nodes, using this user keyfile")
fs.Int64Var(&s.MaxConnectionBytesPerSec, "max-connection-bytes-per-sec", 0, "If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests")
} }
// TODO: Longer term we should read this from some config store, rather than a flag. // TODO: Longer term we should read this from some config store, rather than a flag.
@@ -255,7 +257,8 @@ func (s *APIServer) Run(_ []string) error {
capabilities.Initialize(capabilities.Capabilities{ capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged, AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources. // TODO(vmarmol): Implement support for HostNetworkSources.
HostNetworkSources: []string{}, HostNetworkSources: []string{},
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
}) })
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)

View File

@@ -69,6 +69,7 @@ type KubeletServer struct {
FileCheckFrequency time.Duration FileCheckFrequency time.Duration
HTTPCheckFrequency time.Duration HTTPCheckFrequency time.Duration
ManifestURL string ManifestURL string
ManifestURLHeader string
EnableServer bool EnableServer bool
Address util.IP Address util.IP
Port uint Port uint
@@ -193,6 +194,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.DurationVar(&s.FileCheckFrequency, "file-check-frequency", s.FileCheckFrequency, "Duration between checking config files for new data") fs.DurationVar(&s.FileCheckFrequency, "file-check-frequency", s.FileCheckFrequency, "Duration between checking config files for new data")
fs.DurationVar(&s.HTTPCheckFrequency, "http-check-frequency", s.HTTPCheckFrequency, "Duration between checking http for new data") fs.DurationVar(&s.HTTPCheckFrequency, "http-check-frequency", s.HTTPCheckFrequency, "Duration between checking http for new data")
fs.StringVar(&s.ManifestURL, "manifest-url", s.ManifestURL, "URL for accessing the container manifest") fs.StringVar(&s.ManifestURL, "manifest-url", s.ManifestURL, "URL for accessing the container manifest")
fs.StringVar(&s.ManifestURLHeader, "manifest-url-header", s.ManifestURLHeader, "HTTP header to use when accessing the manifest URL, with the key separated from the value with a ':', as in 'key:value'")
fs.BoolVar(&s.EnableServer, "enable-server", s.EnableServer, "Enable the Kubelet's server") fs.BoolVar(&s.EnableServer, "enable-server", s.EnableServer, "Enable the Kubelet's server")
fs.Var(&s.Address, "address", "The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)") fs.Var(&s.Address, "address", "The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)")
fs.UintVar(&s.Port, "port", s.Port, "The port for the Kubelet to serve on. Note that \"kubectl logs\" will not work if you set this flag.") // see #9325 fs.UintVar(&s.Port, "port", s.Port, "The port for the Kubelet to serve on. Note that \"kubectl logs\" will not work if you set this flag.") // see #9325
@@ -295,6 +297,15 @@ func (s *KubeletServer) Run(_ []string) error {
} }
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
manifestURLHeader := make(http.Header)
if s.ManifestURLHeader != "" {
pieces := strings.Split(s.ManifestURLHeader, ":")
if len(pieces) != 2 {
return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader)
}
manifestURLHeader.Set(pieces[0], pieces[1])
}
hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
if err != nil { if err != nil {
return err return err
@@ -330,6 +341,7 @@ func (s *KubeletServer) Run(_ []string) error {
RootDirectory: s.RootDirectory, RootDirectory: s.RootDirectory,
ConfigFile: s.Config, ConfigFile: s.Config,
ManifestURL: s.ManifestURL, ManifestURL: s.ManifestURL,
ManifestURLHeader: manifestURLHeader,
FileCheckFrequency: s.FileCheckFrequency, FileCheckFrequency: s.FileCheckFrequency,
HTTPCheckFrequency: s.HTTPCheckFrequency, HTTPCheckFrequency: s.HTTPCheckFrequency,
PodInfraContainerImage: s.PodInfraContainerImage, PodInfraContainerImage: s.PodInfraContainerImage,
@@ -604,7 +616,7 @@ func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
} else { } else {
glog.Warning("No api server defined - no events will be sent to API server.") glog.Warning("No api server defined - no events will be sent to API server.")
} }
capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources) capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources, 0)
credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory) credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory)
@@ -660,8 +672,8 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig {
// define url config source // define url config source
if kc.ManifestURL != "" { if kc.ManifestURL != "" {
glog.Infof("Adding manifest url: %v", kc.ManifestURL) glog.Infof("Adding manifest url %q with HTTP header %v", kc.ManifestURL, kc.ManifestURLHeader)
config.NewSourceURL(kc.ManifestURL, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubelet.HTTPSource)) config.NewSourceURL(kc.ManifestURL, kc.ManifestURLHeader, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubelet.HTTPSource))
} }
if kc.KubeClient != nil { if kc.KubeClient != nil {
glog.Infof("Watching apiserver") glog.Infof("Watching apiserver")
@@ -683,6 +695,7 @@ type KubeletConfig struct {
RootDirectory string RootDirectory string
ConfigFile string ConfigFile string
ManifestURL string ManifestURL string
ManifestURLHeader http.Header
FileCheckFrequency time.Duration FileCheckFrequency time.Duration
HTTPCheckFrequency time.Duration HTTPCheckFrequency time.Duration
Hostname string Hostname string

View File

@@ -0,0 +1,16 @@
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google-containers/mounttest:0.3
USER 1001

View File

@@ -0,0 +1,9 @@
all: push
TAG = 0.1
image:
sudo docker build -t gcr.io/google_containers/mounttest-user:$(TAG) .
push: image
gcloud docker push gcr.io/google_containers/mounttest-user:$(TAG)

View File

@@ -1,6 +1,6 @@
all: push all: push
TAG = 0.2 TAG = 0.3
mt: mt.go mt: mt.go
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./mt.go CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./mt.go

View File

@@ -25,17 +25,23 @@ import (
) )
var ( var (
fsTypePath = "" fsTypePath = ""
fileModePath = "" fileModePath = ""
readFileContentPath = "" filePermPath = ""
readWriteNewFilePath = "" readFileContentPath = ""
newFilePath0644 = ""
newFilePath0666 = ""
newFilePath0777 = ""
) )
func init() { func init() {
flag.StringVar(&fsTypePath, "fs_type", "", "Path to print the fs type for") flag.StringVar(&fsTypePath, "fs_type", "", "Path to print the fs type for")
flag.StringVar(&fileModePath, "file_mode", "", "Path to print the filemode of") flag.StringVar(&fileModePath, "file_mode", "", "Path to print the mode bits of")
flag.StringVar(&filePermPath, "file_perm", "", "Path to print the perms of")
flag.StringVar(&readFileContentPath, "file_content", "", "Path to read the file content from") flag.StringVar(&readFileContentPath, "file_content", "", "Path to read the file content from")
flag.StringVar(&readWriteNewFilePath, "rw_new_file", "", "Path to write to and read from") flag.StringVar(&newFilePath0644, "new_file_0644", "", "Path to write to and read from with perm 0644")
flag.StringVar(&newFilePath0666, "new_file_0666", "", "Path to write to and read from with perm 0666")
flag.StringVar(&newFilePath0777, "new_file_0777", "", "Path to write to and read from with perm 0777")
} }
// This program performs some tests on the filesystem as dictated by the // This program performs some tests on the filesystem as dictated by the
@@ -48,6 +54,9 @@ func main() {
errs = []error{} errs = []error{}
) )
// Clear the umask so we can set any mode bits we want.
syscall.Umask(0000)
// NOTE: the ordering of execution of the various command line // NOTE: the ordering of execution of the various command line
// flags is intentional and allows a single command to: // flags is intentional and allows a single command to:
// //
@@ -62,7 +71,17 @@ func main() {
errs = append(errs, err) errs = append(errs, err)
} }
err = readWriteNewFile(readWriteNewFilePath) err = readWriteNewFile(newFilePath0644, 0644)
if err != nil {
errs = append(errs, err)
}
err = readWriteNewFile(newFilePath0666, 0666)
if err != nil {
errs = append(errs, err)
}
err = readWriteNewFile(newFilePath0777, 0777)
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
} }
@@ -72,6 +91,11 @@ func main() {
errs = append(errs, err) errs = append(errs, err)
} }
err = filePerm(filePermPath)
if err != nil {
errs = append(errs, err)
}
err = readFileContent(readFileContentPath) err = readFileContent(readFileContentPath)
if err != nil { if err != nil {
errs = append(errs, err) errs = append(errs, err)
@@ -94,7 +118,7 @@ func fsType(path string) error {
buf := syscall.Statfs_t{} buf := syscall.Statfs_t{}
if err := syscall.Statfs(path, &buf); err != nil { if err := syscall.Statfs(path, &buf); err != nil {
fmt.Printf("error from statfs(%q): %v", path, err) fmt.Printf("error from statfs(%q): %v\n", path, err)
return err return err
} }
@@ -122,6 +146,21 @@ func fileMode(path string) error {
return nil return nil
} }
func filePerm(path string) error {
if path == "" {
return nil
}
fileinfo, err := os.Lstat(path)
if err != nil {
fmt.Printf("error from Lstat(%q): %v\n", path, err)
return err
}
fmt.Printf("perms of file %q: %v\n", path, fileinfo.Mode().Perm())
return nil
}
func readFileContent(path string) error { func readFileContent(path string) error {
if path == "" { if path == "" {
return nil return nil
@@ -138,13 +177,13 @@ func readFileContent(path string) error {
return nil return nil
} }
func readWriteNewFile(path string) error { func readWriteNewFile(path string, perm os.FileMode) error {
if path == "" { if path == "" {
return nil return nil
} }
content := "mount-tester new file\n" content := "mount-tester new file\n"
err := ioutil.WriteFile(path, []byte(content), 0644) err := ioutil.WriteFile(path, []byte(content), perm)
if err != nil { if err != nil {
fmt.Printf("error writing new file %q: %v\n", path, err) fmt.Printf("error writing new file %q: %v\n", path, err)
return err return err

View File

@@ -5,7 +5,7 @@
# #
# The address on the local server to listen to. # The address on the local server to listen to.
KUBE_API_ADDRESS="--address=127.0.0.1" KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
# The port on the local server to listen on. # The port on the local server to listen on.
# KUBE_API_PORT="--port=8080" # KUBE_API_PORT="--port=8080"

View File

@@ -1,6 +1,7 @@
[Unit] [Unit]
Description=Kubernetes API Server Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service] [Service]
EnvironmentFile=-/etc/kubernetes/config EnvironmentFile=-/etc/kubernetes/config

View File

@@ -0,0 +1,283 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package github
import (
"fmt"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
func MakeClient(token string) *github.Client {
if len(token) > 0 {
ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token})
tc := oauth2.NewClient(oauth2.NoContext, ts)
return github.NewClient(tc)
}
return github.NewClient(nil)
}
func hasLabel(labels []github.Label, name string) bool {
for i := range labels {
label := &labels[i]
if label.Name != nil && *label.Name == name {
return true
}
}
return false
}
func hasLabels(labels []github.Label, names []string) bool {
for i := range names {
if !hasLabel(labels, names[i]) {
return false
}
}
return true
}
func fetchAllPRs(client *github.Client, user, project string) ([]github.PullRequest, error) {
page := 1
var result []github.PullRequest
for {
glog.V(4).Infof("Fetching page %d", page)
listOpts := &github.PullRequestListOptions{
Sort: "desc",
ListOptions: github.ListOptions{PerPage: 100, Page: page},
}
prs, response, err := client.PullRequests.List(user, project, listOpts)
if err != nil {
return nil, err
}
result = append(result, prs...)
if response.LastPage == 0 || response.LastPage == page {
break
}
page++
}
return result, nil
}
type PRFunction func(*github.Client, *github.PullRequest, *github.Issue) error
type FilterConfig struct {
MinPRNumber int
UserWhitelist []string
WhitelistOverride string
RequiredStatusContexts []string
}
// For each PR in the project that matches:
// * pr.Number > minPRNumber
// * is mergeable
// * has labels "cla: yes", "lgtm"
// * combinedStatus = 'success' (e.g. all hooks have finished success in github)
// Run the specified function
func ForEachCandidatePRDo(client *github.Client, user, project string, fn PRFunction, once bool, config *FilterConfig) error {
// Get all PRs
prs, err := fetchAllPRs(client, user, project)
if err != nil {
return err
}
userSet := util.StringSet{}
userSet.Insert(config.UserWhitelist...)
for ix := range prs {
if prs[ix].User == nil || prs[ix].User.Login == nil {
glog.V(2).Infof("Skipping PR %d with no user info %v.", *prs[ix].Number, *prs[ix].User)
continue
}
if *prs[ix].Number < config.MinPRNumber {
glog.V(6).Infof("Dropping %d < %d", *prs[ix].Number, config.MinPRNumber)
continue
}
pr, _, err := client.PullRequests.Get(user, project, *prs[ix].Number)
if err != nil {
glog.Errorf("Error getting pull request: %v", err)
continue
}
glog.V(2).Infof("----==== %d ====----", *pr.Number)
// Labels are actually stored in the Issues API, not the Pull Request API
issue, _, err := client.Issues.Get(user, project, *pr.Number)
if err != nil {
glog.Errorf("Failed to get issue for PR: %v", err)
continue
}
glog.V(8).Infof("%v", issue.Labels)
if !hasLabels(issue.Labels, []string{"lgtm", "cla: yes"}) {
continue
}
if !hasLabel(issue.Labels, config.WhitelistOverride) && !userSet.Has(*prs[ix].User.Login) {
glog.V(4).Infof("Dropping %d since %s isn't in whitelist and %s isn't present", *prs[ix].Number, *prs[ix].User.Login, config.WhitelistOverride)
continue
}
// This is annoying, github appears to only temporarily cache mergeability, if it is nil, wait
// for an async refresh and retry.
if pr.Mergeable == nil {
glog.Infof("Waiting for mergeability on %s %d", *pr.Title, *pr.Number)
// TODO: determine what a good empirical setting for this is.
time.Sleep(10 * time.Second)
pr, _, err = client.PullRequests.Get(user, project, *prs[ix].Number)
}
if pr.Mergeable == nil {
glog.Errorf("No mergeability information for %s %d, Skipping.", *pr.Title, *pr.Number)
continue
}
if !*pr.Mergeable {
continue
}
// Validate the status information for this PR
ok, err := ValidateStatus(client, user, project, *pr.Number, config.RequiredStatusContexts, false)
if err != nil {
glog.Errorf("Error validating PR status: %v", err)
continue
}
if !ok {
continue
}
if err := fn(client, pr, issue); err != nil {
glog.Errorf("Failed to run user function: %v", err)
continue
}
if once {
break
}
}
return nil
}
func getCommitStatus(client *github.Client, user, project string, prNumber int) ([]*github.CombinedStatus, error) {
commits, _, err := client.PullRequests.ListCommits(user, project, prNumber, &github.ListOptions{})
if err != nil {
return nil, err
}
commitStatus := make([]*github.CombinedStatus, len(commits))
for ix := range commits {
commit := &commits[ix]
statusList, _, err := client.Repositories.GetCombinedStatus(user, project, *commit.SHA, &github.ListOptions{})
if err != nil {
return nil, err
}
commitStatus[ix] = statusList
}
return commitStatus, nil
}
// Gets the current status of a PR by introspecting the status of the commits in the PR.
// The rules are:
// * If any member of the 'requiredContexts' list is missing, it is 'incomplete'
// * If any commit is 'pending', the PR is 'pending'
// * If any commit is 'error', the PR is in 'error'
// * If any commit is 'failure', the PR is 'failure'
// * Otherwise the PR is 'success'
func GetStatus(client *github.Client, user, project string, prNumber int, requiredContexts []string) (string, error) {
statusList, err := getCommitStatus(client, user, project, prNumber)
if err != nil {
return "", err
}
return computeStatus(statusList, requiredContexts), nil
}
func computeStatus(statusList []*github.CombinedStatus, requiredContexts []string) string {
states := util.StringSet{}
providers := util.StringSet{}
for ix := range statusList {
status := statusList[ix]
glog.V(8).Infof("Checking commit: %s", *status.SHA)
glog.V(8).Infof("Checking commit: %v", status)
states.Insert(*status.State)
for _, subStatus := range status.Statuses {
glog.V(8).Infof("Found status from: %v", subStatus)
providers.Insert(*subStatus.Context)
}
}
for _, provider := range requiredContexts {
if !providers.Has(provider) {
glog.V(8).Infof("Failed to find %s in %v", provider, providers)
return "incomplete"
}
}
switch {
case states.Has("pending"):
return "pending"
case states.Has("error"):
return "error"
case states.Has("failure"):
return "failure"
default:
return "success"
}
}
// Make sure that the combined status for all commits in a PR is 'success'
// if 'waitForPending' is true, this function will wait until the PR is no longer pending (all checks have run)
func ValidateStatus(client *github.Client, user, project string, prNumber int, requiredContexts []string, waitOnPending bool) (bool, error) {
pending := true
for pending {
status, err := GetStatus(client, user, project, prNumber, requiredContexts)
if err != nil {
return false, err
}
switch status {
case "error", "failure":
return false, nil
case "pending":
if !waitOnPending {
return false, nil
}
pending = true
glog.V(4).Info("PR is pending, waiting for 30 seconds")
time.Sleep(30 * time.Second)
case "success":
return true, nil
case "incomplete":
return false, nil
default:
return false, fmt.Errorf("unknown status: %s", status)
}
}
return true, nil
}
// Wait for a PR to move into Pending. This is useful because the request to test a PR again
// is asynchronous with the PR actually moving into a pending state
// TODO: add a timeout
func WaitForPending(client *github.Client, user, project string, prNumber int) error {
for {
status, err := GetStatus(client, user, project, prNumber, []string{})
if err != nil {
return err
}
if status == "pending" {
return nil
}
glog.V(4).Info("PR is not pending, waiting for 30 seconds")
time.Sleep(30 * time.Second)
}
return nil
}

View File

@@ -0,0 +1,390 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package github
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strconv"
"testing"
"github.com/google/go-github/github"
)
func stringPtr(val string) *string { return &val }
func TestHasLabel(t *testing.T) {
tests := []struct {
labels []github.Label
label string
hasLabel bool
}{
{
labels: []github.Label{
{Name: stringPtr("foo")},
},
label: "foo",
hasLabel: true,
},
{
labels: []github.Label{
{Name: stringPtr("bar")},
},
label: "foo",
hasLabel: false,
},
{
labels: []github.Label{
{Name: stringPtr("bar")},
{Name: stringPtr("foo")},
},
label: "foo",
hasLabel: true,
},
{
labels: []github.Label{
{Name: stringPtr("bar")},
{Name: stringPtr("baz")},
},
label: "foo",
hasLabel: false,
},
}
for _, test := range tests {
if test.hasLabel != hasLabel(test.labels, test.label) {
t.Errorf("Unexpected output: %v", test)
}
}
}
func TestHasLabels(t *testing.T) {
tests := []struct {
labels []github.Label
seekLabels []string
hasLabel bool
}{
{
labels: []github.Label{
{Name: stringPtr("foo")},
},
seekLabels: []string{"foo"},
hasLabel: true,
},
{
labels: []github.Label{
{Name: stringPtr("bar")},
},
seekLabels: []string{"foo"},
hasLabel: false,
},
{
labels: []github.Label{
{Name: stringPtr("bar")},
{Name: stringPtr("foo")},
},
seekLabels: []string{"foo"},
hasLabel: true,
},
{
labels: []github.Label{
{Name: stringPtr("bar")},
{Name: stringPtr("baz")},
},
seekLabels: []string{"foo"},
hasLabel: false,
},
{
labels: []github.Label{
{Name: stringPtr("foo")},
},
seekLabels: []string{"foo", "bar"},
hasLabel: false,
},
}
for _, test := range tests {
if test.hasLabel != hasLabels(test.labels, test.seekLabels) {
t.Errorf("Unexpected output: %v", test)
}
}
}
func initTest() (*github.Client, *httptest.Server, *http.ServeMux) {
// test server
mux := http.NewServeMux()
server := httptest.NewServer(mux)
// github client configured to use test server
client := github.NewClient(nil)
url, _ := url.Parse(server.URL)
client.BaseURL = url
client.UploadURL = url
return client, server, mux
}
func TestFetchAllPRs(t *testing.T) {
tests := []struct {
PullRequests [][]github.PullRequest
Pages []int
}{
{
PullRequests: [][]github.PullRequest{
{
{},
},
},
Pages: []int{0},
},
{
PullRequests: [][]github.PullRequest{
{
{},
},
{
{},
},
{
{},
},
{
{},
},
},
Pages: []int{4, 4, 4, 0},
},
{
PullRequests: [][]github.PullRequest{
{
{},
},
{
{},
},
{
{},
{},
{},
},
},
Pages: []int{3, 3, 3, 0},
},
}
for _, test := range tests {
client, server, mux := initTest()
count := 0
prCount := 0
mux.HandleFunc("/repos/foo/bar/pulls", func(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
t.Errorf("Unexpected method: %s", r.Method)
}
if r.URL.Query().Get("page") != strconv.Itoa(count+1) {
t.Errorf("Unexpected page: %s", r.URL.Query().Get("page"))
}
if r.URL.Query().Get("sort") != "desc" {
t.Errorf("Unexpected sort: %s", r.URL.Query().Get("sort"))
}
if r.URL.Query().Get("per_page") != "100" {
t.Errorf("Unexpected per_page: %s", r.URL.Query().Get("per_page"))
}
w.Header().Add("Link",
fmt.Sprintf("<https://api.github.com/?page=%d>; rel=\"last\"", test.Pages[count]))
w.WriteHeader(http.StatusOK)
data, err := json.Marshal(test.PullRequests[count])
prCount += len(test.PullRequests[count])
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
w.Write(data)
count++
})
prs, err := fetchAllPRs(client, "foo", "bar")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(prs) != prCount {
t.Errorf("unexpected output %d vs %d", len(prs), prCount)
}
if count != len(test.PullRequests) {
t.Errorf("unexpected number of fetches: %d", count)
}
server.Close()
}
}
func TestComputeStatus(t *testing.T) {
tests := []struct {
statusList []*github.CombinedStatus
requiredContexts []string
expected string
}{
{
statusList: []*github.CombinedStatus{
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
expected: "success",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("error"), SHA: stringPtr("abcdef")},
{State: stringPtr("pending"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
expected: "pending",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("pending"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
expected: "pending",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("failure"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
expected: "failure",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("failure"), SHA: stringPtr("abcdef")},
{State: stringPtr("error"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
expected: "error",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "incomplete",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("pending"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "incomplete",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("failure"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "incomplete",
},
{
statusList: []*github.CombinedStatus{
{State: stringPtr("failure"), SHA: stringPtr("abcdef")},
{State: stringPtr("error"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "incomplete",
},
{
statusList: []*github.CombinedStatus{
{
State: stringPtr("success"),
SHA: stringPtr("abcdef"),
Statuses: []github.RepoStatus{
{Context: stringPtr("context")},
},
},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "success",
},
{
statusList: []*github.CombinedStatus{
{
State: stringPtr("pending"),
SHA: stringPtr("abcdef"),
Statuses: []github.RepoStatus{
{Context: stringPtr("context")},
},
},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "pending",
},
{
statusList: []*github.CombinedStatus{
{
State: stringPtr("error"),
SHA: stringPtr("abcdef"),
Statuses: []github.RepoStatus{
{Context: stringPtr("context")},
},
},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "error",
},
{
statusList: []*github.CombinedStatus{
{
State: stringPtr("failure"),
SHA: stringPtr("abcdef"),
Statuses: []github.RepoStatus{
{Context: stringPtr("context")},
},
},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
{State: stringPtr("success"), SHA: stringPtr("abcdef")},
},
requiredContexts: []string{"context"},
expected: "failure",
},
}
for _, test := range tests {
// ease of use, reduce boilerplate in test cases
if test.requiredContexts == nil {
test.requiredContexts = []string{}
}
status := computeStatus(test.statusList, test.requiredContexts)
if test.expected != status {
t.Errorf("expected: %s, saw %s", test.expected, status)
}
}
}

View File

@@ -0,0 +1,91 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package jenkins
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/golang/glog"
)
type JenkinsClient struct {
Host string
}
type Queue struct {
Builds []Build `json:"builds"`
LastCompletedBuild Build `json:"lastCompletedBuild"`
LastStableBuild Build `json:"lastStableBuild"`
}
type Build struct {
Number int `json:"number"`
URL string `json:"url"`
}
type Job struct {
Result string `json:"result"`
ID string `json:"id"`
Timestamp int `json:timestamp`
}
func (j *JenkinsClient) request(path string) ([]byte, error) {
url := j.Host + path
glog.V(3).Infof("Hitting: %s", url)
res, err := http.Get(url)
if err != nil {
return nil, err
}
defer res.Body.Close()
return ioutil.ReadAll(res.Body)
}
func (j *JenkinsClient) GetJob(name string) (*Queue, error) {
data, err := j.request("/job/" + name + "/api/json")
if err != nil {
return nil, err
}
glog.V(8).Infof("Got data: %s", string(data))
q := &Queue{}
if err := json.Unmarshal(data, q); err != nil {
return nil, err
}
return q, nil
}
func (j *JenkinsClient) GetLastCompletedBuild(name string) (*Job, error) {
data, err := j.request("/job/" + name + "/lastCompletedBuild/api/json")
if err != nil {
return nil, err
}
glog.V(8).Infof("Got data: %s", string(data))
job := &Job{}
if err := json.Unmarshal(data, job); err != nil {
return nil, err
}
return job, nil
}
func (j *JenkinsClient) IsBuildStable(name string) (bool, error) {
q, err := j.GetLastCompletedBuild(name)
if err != nil {
return false, err
}
return q.Result == "SUCCESS", nil
}

View File

@@ -0,0 +1,162 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
// A simple binary for merging PR that match a criteria
// Usage:
// submit-queue -token=<github-access-token> -user-whitelist=<file> --jenkins-host=http://some.host [-min-pr-number=<number>] [-dry-run] [-once]
//
// Details:
/*
Usage of ./submit-queue:
-alsologtostderr=false: log to standard error as well as files
-dry-run=false: If true, don't actually merge anything
-jenkins-job="kubernetes-e2e-gce,kubernetes-e2e-gke-ci,kubernetes-build": Comma separated list of jobs in Jenkins to use for stability testing
-log_backtrace_at=:0: when logging hits line file:N, emit a stack trace
-log_dir="": If non-empty, write log files in this directory
-logtostderr=false: log to standard error instead of files
-min-pr-number=0: The minimum PR to start with [default: 0]
-once=false: If true, only merge one PR, don't run forever
-stderrthreshold=0: logs at or above this threshold go to stderr
-token="": The OAuth Token to use for requests.
-user-whitelist="": Path to a whitelist file that contains users to auto-merge. Required.
-v=0: log level for V logs
-vmodule=: comma-separated list of pattern=N settings for file-filtered logging
*/
import (
"bufio"
"errors"
"flag"
"os"
"strings"
"github.com/GoogleCloudPlatform/kubernetes/contrib/submit-queue/github"
"github.com/GoogleCloudPlatform/kubernetes/contrib/submit-queue/jenkins"
"github.com/golang/glog"
github_api "github.com/google/go-github/github"
)
var (
token = flag.String("token", "", "The OAuth Token to use for requests.")
minPRNumber = flag.Int("min-pr-number", 0, "The minimum PR to start with [default: 0]")
dryrun = flag.Bool("dry-run", false, "If true, don't actually merge anything")
oneOff = flag.Bool("once", false, "If true, only merge one PR, don't run forever")
jobs = flag.String("jenkins-jobs", "kubernetes-e2e-gce,kubernetes-e2e-gke-ci,kubernetes-build", "Comma separated list of jobs in Jenkins to use for stability testing")
jenkinsHost = flag.String("jenkins-host", "", "The URL for the jenkins job to watch")
userWhitelist = flag.String("user-whitelist", "", "Path to a whitelist file that contains users to auto-merge. Required.")
requiredContexts = flag.String("required-contexts", "cla/google,Shippable,continuous-integration/travis-ci/pr,Jenkins GCE e2e", "Comma separate list of status contexts required for a PR to be considered ok to merge")
whitelistOverride = flag.String("whitelist-override-label", "ok-to-merge", "Github label, if present on a PR it will be merged even if the author isn't in the whitelist")
)
const (
org = "GoogleCloudPlatform"
project = "kubernetes"
)
// This is called on a potentially mergeable PR
func runE2ETests(client *github_api.Client, pr *github_api.PullRequest, issue *github_api.Issue) error {
// Test if the build is stable in Jenkins
jenkinsClient := &jenkins.JenkinsClient{Host: *jenkinsHost}
builds := strings.Split(*jobs, ",")
for _, build := range builds {
stable, err := jenkinsClient.IsBuildStable(build)
glog.V(2).Infof("Checking build stability for %s", build)
if err != nil {
return err
}
if !stable {
glog.Errorf("Build %s isn't stable, skipping!", build)
return errors.New("Unstable build")
}
}
glog.V(2).Infof("Build is stable.")
// Ask for a fresh build
glog.V(4).Infof("Asking PR builder to build %d", *pr.Number)
body := "@k8s-bot test this [testing build queue, sorry for the noise]"
if _, _, err := client.Issues.CreateComment(org, project, *pr.Number, &github_api.IssueComment{Body: &body}); err != nil {
return err
}
// Wait for the build to start
err := github.WaitForPending(client, org, project, *pr.Number)
// Wait for the status to go back to 'success'
ok, err := github.ValidateStatus(client, org, project, *pr.Number, []string{}, true)
if err != nil {
return err
}
if !ok {
glog.Infof("Status after build is not 'success', skipping PR %d", *pr.Number)
return nil
}
if !*dryrun {
glog.Infof("Merging PR: %d", *pr.Number)
mergeBody := "Automatic merge from SubmitQueue"
if _, _, err := client.Issues.CreateComment(org, project, *pr.Number, &github_api.IssueComment{Body: &mergeBody}); err != nil {
glog.Warningf("Failed to create merge comment: %v", err)
return err
}
_, _, err := client.PullRequests.Merge(org, project, *pr.Number, "Auto commit by PR queue bot")
return err
}
glog.Infof("Skipping actual merge because --dry-run is set")
return nil
}
func loadWhitelist(file string) ([]string, error) {
fp, err := os.Open(file)
if err != nil {
return nil, err
}
defer fp.Close()
scanner := bufio.NewScanner(fp)
result := []string{}
for scanner.Scan() {
result = append(result, scanner.Text())
}
return result, scanner.Err()
}
func main() {
flag.Parse()
if len(*userWhitelist) == 0 {
glog.Fatalf("--user-whitelist is required.")
}
if len(*jenkinsHost) == 0 {
glog.Fatalf("--jenkins-host is required.")
}
client := github.MakeClient(*token)
users, err := loadWhitelist(*userWhitelist)
if err != nil {
glog.Fatalf("error loading user whitelist: %v", err)
}
requiredContexts := strings.Split(*requiredContexts, ",")
config := &github.FilterConfig{
MinPRNumber: *minPRNumber,
UserWhitelist: users,
RequiredStatusContexts: requiredContexts,
WhitelistOverride: *whitelistOverride,
}
for !*oneOff {
if err := github.ForEachCandidatePRDo(client, org, project, runE2ETests, *oneOff, config); err != nil {
glog.Fatalf("Error getting candidate PRs: %v", err)
}
}
}

View File

@@ -0,0 +1,41 @@
brendandburns
thockin
mikedanese
a-robinson
saad-ali
lavalamp
smarterclayton
justinsb
satnam6502
derekwaynecarr
dchen1107
zmerlynn
erictune
eparis
caesarxuchao
wojtek-t
jlowdermilk
yifan-gu
nikhiljindal
markturansky
pmorie
yujuhong
roberthbailey
vishh
deads2k
bprashanth
cjcullen
liggitt
bgrant0607
fgrzadkowski
jayunit100
mbforbes
ArtfulCoder
piosz
davidopp
ixdy
marekbiskup
gmarek
ghodss
krousey
quinton-hoole

View File

@@ -134,7 +134,7 @@ Here are all the solutions mentioned above in table form.
IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level
-------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- -------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ----------------------------
GKE | | | GCE | [docs](https://cloud.google.com/container-engine) | | Commercial GKE | | | GCE | [docs](https://cloud.google.com/container-engine) | | Commercial
Vagrant | Saltstack | Fedora | OVS | [docs](vagrant.md) | | Project Vagrant | Saltstack | Fedora | OVS | [docs](vagrant.md) | [✓][2] | Project
GCE | Saltstack | Debian | GCE | [docs](gce.md) | [✓][1] | Project GCE | Saltstack | Debian | GCE | [docs](gce.md) | [✓][1] | Project
Azure | CoreOS | CoreOS | Weave | [docs](coreos/azure/README.md) | | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin)) Azure | CoreOS | CoreOS | Weave | [docs](coreos/azure/README.md) | | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin))
Docker Single Node | custom | N/A | local | [docs](docker.md) | | Project (@brendandburns) Docker Single Node | custom | N/A | local | [docs](docker.md) | | Project (@brendandburns)
@@ -164,7 +164,7 @@ Local | | | _none_ | [docs](locally.md)
libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](libvirt-coreos.md) | | Community (@lhuard1A) libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](libvirt-coreos.md) | | Community (@lhuard1A)
oVirt | | | | [docs](ovirt.md) | | Community (@simon3z) oVirt | | | | [docs](ovirt.md) | | Community (@simon3z)
Rackspace | CoreOS | CoreOS | flannel | [docs](rackspace.md) | | Community (@doublerr) Rackspace | CoreOS | CoreOS | flannel | [docs](rackspace.md) | | Community (@doublerr)
any | any | any | any | [docs](scratch.md) | | Community (@doublerr) any | any | any | any | [docs](scratch.md) | | Community (@erictune)
*Note*: The above table is ordered by version test/used in notes followed by support level. *Note*: The above table is ordered by version test/used in notes followed by support level.
@@ -189,6 +189,8 @@ Definition of columns:
<!-- reference style links below here --> <!-- reference style links below here -->
<!-- GCE conformance test result --> <!-- GCE conformance test result -->
[1]: https://gist.github.com/erictune/4cabc010906afbcc5061 [1]: https://gist.github.com/erictune/4cabc010906afbcc5061
<!-- Vagrant conformance test result -->
[2]: https://gist.github.com/derekwaynecarr/505e56036cdf010bf6b6
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@@ -39,7 +39,7 @@ interested in just starting to explore Kubernetes, we recommend that you start t
_Note_: _Note_:
There is a [bug](https://github.com/docker/docker/issues/14106) in Docker 1.7.0 that prevents this from working correctly. There is a [bug](https://github.com/docker/docker/issues/14106) in Docker 1.7.0 that prevents this from working correctly.
Please install Docker 1.6.2 or wait for Docker 1.7.1. Please install Docker 1.6.2 or Docker 1.7.1.
**Table of Contents** **Table of Contents**
@@ -83,7 +83,7 @@ The first step in the process is to initialize the master node.
Clone the Kubernetes repo, and run [master.sh](docker-multinode/master.sh) on the master machine with root: Clone the Kubernetes repo, and run [master.sh](docker-multinode/master.sh) on the master machine with root:
```sh ```sh
export K8S_VERSION=<your_k8s_version> export K8S_VERSION=<your_k8s_version (e.g. 1.0.1)>
cd kubernetes/cluster/docker-multinode cd kubernetes/cluster/docker-multinode
./master.sh ./master.sh
``` ```
@@ -99,7 +99,8 @@ Once your master is up and running you can add one or more workers on different
Clone the Kubernetes repo, and run [worker.sh](docker-multinode/worker.sh) on the worker machine with root: Clone the Kubernetes repo, and run [worker.sh](docker-multinode/worker.sh) on the worker machine with root:
```sh ```sh
export K8S_VERSION=<your_k8s_version> MASTER_IP=<your_master_ip> export K8S_VERSION=<your_k8s_version (e.g. 1.0.1)>
export MASTER_IP=<your_master_ip (e.g. 1.2.3.4)>
cd kubernetes/cluster/docker-multinode cd kubernetes/cluster/docker-multinode
./worker.sh ./worker.sh
``` ```

View File

@@ -78,13 +78,13 @@ output of /proc/cmdline
### Step One: Run etcd ### Step One: Run etcd
```sh ```sh
docker run --net=host -d gcr.io/google_containers/etcd:2.0.9 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data docker run --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data
``` ```
### Step Two: Run the master ### Step Two: Run the master
```sh ```sh
docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.21.2 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests
``` ```
This actually runs the kubelet, which in turn runs a [pod](../user-guide/pods.md) that contains the other master components. This actually runs the kubelet, which in turn runs a [pod](../user-guide/pods.md) that contains the other master components.
@@ -94,15 +94,15 @@ This actually runs the kubelet, which in turn runs a [pod](../user-guide/pods.md
*Note, this could be combined with master above, but it requires --privileged for iptables manipulation* *Note, this could be combined with master above, but it requires --privileged for iptables manipulation*
```sh ```sh
docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.21.2 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2
``` ```
### Test it out ### Test it out
At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl
binary binary
([OS X](https://storage.googleapis.com/kubernetes-release/release/v0.18.2/bin/darwin/amd64/kubectl)) ([OS X](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/darwin/amd64/kubectl))
([linux](https://storage.googleapis.com/kubernetes-release/release/v0.18.2/bin/linux/amd64/kubectl)) ([linux](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl))
*Note:* *Note:*
On OS/X you will need to set up port forwarding via ssh: On OS/X you will need to set up port forwarding via ssh:
@@ -129,7 +129,7 @@ If you are running different Kubernetes clusters, you may need to specify `-s ht
### Run an application ### Run an application
```sh ```sh
kubectl -s http://localhost:8080 run-container nginx --image=nginx --port=80 kubectl -s http://localhost:8080 run nginx --image=nginx --port=80
``` ```
now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled. now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
@@ -144,7 +144,7 @@ This should print:
```console ```console
NAME LABELS SELECTOR IP PORT(S) NAME LABELS SELECTOR IP PORT(S)
nginx <none> run=nginx <ip-addr> 80/TCP nginx run=nginx run=nginx <ip-addr> 80/TCP
``` ```
If ip-addr is blank run the following command to obtain it. Know issue #10836 If ip-addr is blank run the following command to obtain it. Know issue #10836

View File

@@ -3,7 +3,7 @@
.SH NAME .SH NAME
.PP .PP
kubectl stop \- Gracefully shut down a resource by name or filename. kubectl stop \- Deprecated: Gracefully shut down a resource by name or filename.
.SH SYNOPSIS .SH SYNOPSIS
@@ -13,7 +13,11 @@ kubectl stop \- Gracefully shut down a resource by name or filename.
.SH DESCRIPTION .SH DESCRIPTION
.PP .PP
Gracefully shut down a resource by name or filename. Deprecated: Gracefully shut down a resource by name or filename.
.PP
stop command is deprecated, all its functionalities are covered by delete command.
See 'kubectl delete \-\-help' for more details.
.PP .PP
Attempts to shut down and delete a resource that supports graceful termination. Attempts to shut down and delete a resource that supports graceful termination.

View File

@@ -97,10 +97,10 @@ kubectl
* [kubectl rolling-update](kubectl_rolling-update.md) - Perform a rolling update of the given ReplicationController. * [kubectl rolling-update](kubectl_rolling-update.md) - Perform a rolling update of the given ReplicationController.
* [kubectl run](kubectl_run.md) - Run a particular image on the cluster. * [kubectl run](kubectl_run.md) - Run a particular image on the cluster.
* [kubectl scale](kubectl_scale.md) - Set a new size for a Replication Controller. * [kubectl scale](kubectl_scale.md) - Set a new size for a Replication Controller.
* [kubectl stop](kubectl_stop.md) - Gracefully shut down a resource by name or filename. * [kubectl stop](kubectl_stop.md) - Deprecated: Gracefully shut down a resource by name or filename.
* [kubectl version](kubectl_version.md) - Print the client and server version information. * [kubectl version](kubectl_version.md) - Print the client and server version information.
###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.96000791 +0000 UTC ###### Auto generated by spf13/cobra at 2015-07-29 09:18:59.541696918 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@@ -33,12 +33,15 @@ Documentation for other releases can be found at
## kubectl stop ## kubectl stop
Gracefully shut down a resource by name or filename. Deprecated: Gracefully shut down a resource by name or filename.
### Synopsis ### Synopsis
Gracefully shut down a resource by name or filename. Deprecated: Gracefully shut down a resource by name or filename.
stop command is deprecated, all its functionalities are covered by delete command.
See 'kubectl delete --help' for more details.
Attempts to shut down and delete a resource that supports graceful termination. Attempts to shut down and delete a resource that supports graceful termination.
If the resource is scalable it will be scaled to 0 before deletion. If the resource is scalable it will be scaled to 0 before deletion.
@@ -109,7 +112,7 @@ $ kubectl stop -f path/to/resources
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.957441942 +0000 UTC ###### Auto generated by spf13/cobra at 2015-07-29 09:18:59.539597953 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@@ -317,6 +317,7 @@ func TestExampleObjectSchemas(t *testing.T) {
"spark-master-service": &api.Service{}, "spark-master-service": &api.Service{},
"spark-master": &api.Pod{}, "spark-master": &api.Pod{},
"spark-worker-controller": &api.ReplicationController{}, "spark-worker-controller": &api.ReplicationController{},
"spark-driver": &api.Pod{},
}, },
"../examples/storm": { "../examples/storm": {
"storm-nimbus-service": &api.Service{}, "storm-nimbus-service": &api.Service{},

View File

@@ -144,45 +144,36 @@ $ kubectl logs spark-master
15/06/26 14:15:55 INFO Master: Registering worker 10.244.0.19:60970 with 1 cores, 2.6 GB RAM 15/06/26 14:15:55 INFO Master: Registering worker 10.244.0.19:60970 with 1 cores, 2.6 GB RAM
``` ```
## Step Three: Do something with the cluster ## Step Three: Start your Spark driver to launch jobs on your Spark cluster
Get the address and port of the Master service. The Spark driver is used to launch jobs into Spark cluster. You can read more about it in
[Spark architecture](http://spark.apache.org/docs/latest/cluster-overview.html).
```sh ```shell
$ kubectl get service spark-master $ kubectl create -f examples/spark/spark-driver.json
NAME LABELS SELECTOR IP(S) PORT(S)
spark-master name=spark-master name=spark-master 10.0.204.187 7077/TCP
``` ```
SSH to one of your cluster nodes. On GCE/GKE you can either use [Developers Console](https://console.developers.google.com) The Spark driver needs the Master service to be running.
(more details [here](https://cloud.google.com/compute/docs/ssh-in-browser))
or run `gcloud compute ssh <name>` where the name can be taken from `kubectl get nodes`
(more details [here](https://cloud.google.com/compute/docs/gcloud-compute/#connecting)).
``` ### Check to see if the driver is running
$ kubectl get nodes
NAME LABELS STATUS
kubernetes-minion-5jvu kubernetes.io/hostname=kubernetes-minion-5jvu Ready
kubernetes-minion-6fbi kubernetes.io/hostname=kubernetes-minion-6fbi Ready
kubernetes-minion-8y2v kubernetes.io/hostname=kubernetes-minion-8y2v Ready
kubernetes-minion-h0tr kubernetes.io/hostname=kubernetes-minion-h0tr Ready
$ gcloud compute ssh kubernetes-minion-5jvu --zone=us-central1-b ```shell
Linux kubernetes-minion-5jvu 3.16.0-0.bpo.4-amd64 #1 SMP Debian 3.16.7-ckt9-3~deb8u1~bpo70+1 (2015-04-27) x86_64 $ kubectl get pods
NAME READY REASON RESTARTS AGE
=== GCE Kubernetes node setup complete === [...]
spark-master 1/1 Running 0 14m
me@kubernetes-minion-5jvu:~$ spark-driver 1/1 Running 0 10m
``` ```
Once logged in run spark-base image. Inside of the image there is a script ## Step Four: Do something with the cluster
that sets up the environment based on the provided IP and port of the Master.
Use the kubectl exec to connect to Spark driver
``` ```
cluster-node $ sudo docker run -it gcr.io/google_containers/spark-base $ kubectl exec spark-driver -it bash
root@f12a6fec45ce:/# . /setup_client.sh 10.0.204.187 7077 root@spark-driver:/#
root@f12a6fec45ce:/# pyspark root@spark-driver:/# pyspark
Python 2.7.9 (default, Mar 1 2015, 12:57:24) Python 2.7.9 (default, Mar 1 2015, 12:57:24)
[GCC 4.9.2] on linux2 [GCC 4.9.2] on linux2
Type "help", "copyright", "credits" or "license" for more information. Type "help", "copyright", "credits" or "license" for more information.
15/06/26 14:25:28 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 15/06/26 14:25:28 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
@@ -201,9 +192,9 @@ SparkContext available as sc, HiveContext available as sqlContext.
## Result ## Result
You now have services, replication controllers, and pods for the Spark master and Spark workers. You now have services, replication controllers, and pods for the Spark master , Spark driver and Spark workers.
You can take this example to the next step and start using the Apache Spark cluster You can take this example to the next step and start using the Apache Spark cluster
you just created, see [Spark documentation](https://spark.apache.org/documentation.html) you just created, see [Spark documentation](https://spark.apache.org/documentation.html)
for more information. for more information.
## tl;dr ## tl;dr
@@ -216,6 +207,8 @@ Make sure the Master Pod is running (use: ```kubectl get pods```).
```kubectl create -f spark-worker-controller.json``` ```kubectl create -f spark-worker-controller.json```
```kubectl create -f spark-driver.json```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/README.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/README.md?pixel)]()

View File

@@ -0,0 +1,4 @@
FROM gcr.io/google_containers/spark-base
ADD start.sh /start.sh
ADD log4j.properties /opt/spark/conf/log4j.properties
CMD ["/start.sh"]

View File

@@ -0,0 +1,37 @@
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<strong>
The latest 1.0.x release of this document can be found
[here](http://releases.k8s.io/release-1.0/examples/spark/images/driver/README.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/images/driver/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@@ -0,0 +1,23 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "$SPARK_MASTER_SERVICE_HOST spark-master" >> /etc/hosts
echo "SPARK_LOCAL_HOSTNAME=$(hostname -i)" >> /opt/spark/conf/spark-env.sh
echo "MASTER=spark://spark-master:$SPARK_MASTER_SERVICE_PORT" >> /opt/spark/conf/spark-env.sh
while true; do
sleep 100
done

View File

@@ -0,0 +1,23 @@
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "spark-driver",
"labels": {
"name": "spark-driver"
}
},
"spec": {
"containers": [
{
"name": "spark-driver",
"image": "gurvin/spark-driver",
"resources": {
"limits": {
"cpu": "100m"
}
}
}
]
}
}

View File

@@ -1,481 +0,0 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Verifies that services and virtual IPs work.
# TODO(wojtek-t): Remove this test once the following go tests are stable:
# - "should work after restarting kube-proxy"
# - "should work after restarting apiserver"
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
: ${KUBE_VERSION_ROOT:=${KUBE_ROOT}}
: ${KUBECTL:="${KUBE_VERSION_ROOT}/cluster/kubectl.sh"}
: ${KUBE_CONFIG_FILE:="config-test.sh"}
export KUBECTL KUBE_CONFIG_FILE
TEST_NAMESPACE="services-test-${RANDOM}"
KUBECTL="${KUBECTL} --namespace=${TEST_NAMESPACE}"
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_VERSION_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
prepare-e2e
function error() {
echo "$@" >&2
exit 1
}
function sort_args() {
[ $# == 0 ] && return
a=($(printf "%s\n" "$@" | sort -n))
echo "${a[*]}"
}
# Join args $2... with $1 between them.
# Example: join ", " x y z => x, y, z
function join() {
local sep item
sep=$1
shift
echo -n "${1:-}"
shift
for item; do
echo -n "${sep}${item}"
done
echo
}
svcs_to_clean=()
function do_teardown() {
${KUBECTL} delete namespace "${TEST_NAMESPACE}"
}
function make_namespace() {
echo "Making namespace '${TEST_NAMESPACE}'"
${KUBECTL} create -f - << __EOF__
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "${TEST_NAMESPACE}"
}
}
__EOF__
}
wait_for_apiserver() {
echo "Waiting for apiserver to be up"
local i
for i in $(seq 1 12); do
results=$(ssh-to-node "${master}" "
wget -q -T 1 -O - http://localhost:8080/healthz || true
")
if [[ "${results}" == "ok" ]]; then
return
fi
sleep 5 # wait for apiserver to restart
done
error "restarting apiserver timed out"
}
# Args:
# $1: service name
# $2: service port
# $3: service replica count
function start_service() {
echo "Starting service '${TEST_NAMESPACE}/$1' on port $2 with $3 replicas"
svcs_to_clean+=("$1")
${KUBECTL} create -f - << __EOF__
{
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "$1",
"labels": {
"name": "$1"
}
},
"spec": {
"replicas": $3,
"selector": {
"name": "$1"
},
"template": {
"metadata": {
"labels": {
"name": "$1"
}
},
"spec": {
"containers": [
{
"name": "$1",
"image": "gcr.io/google_containers/serve_hostname:1.1",
"ports": [
{
"containerPort": 9376,
"protocol": "TCP"
}
]
}
]
}
}
}
}
__EOF__
${KUBECTL} create -f - << __EOF__
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "$1",
"labels": {
"name": "$1"
}
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": $2,
"targetPort": 9376
}
],
"selector": {
"name": "$1"
}
}
}
__EOF__
}
# Args:
# $1: service name
function stop_service() {
echo "Stopping service '$1'"
${KUBECTL} stop rc "$1" || true
${KUBECTL} delete services "$1" || true
}
# Args:
# $1: service name
# $2: expected pod count
function query_pods() {
# This fails very occasionally, so retry a bit.
local pods_unsorted=()
local i
for i in $(seq 1 10); do
pods_unsorted=($(${KUBECTL} get pods -o template \
'--template={{range.items}}{{.metadata.name}} {{end}}' \
'--api-version=v1' \
-l name="$1"))
found="${#pods_unsorted[*]}"
if [[ "${found}" == "$2" ]]; then
break
fi
sleep 3
done
if [[ "${found}" != "$2" ]]; then
error "Failed to query pods for $1: expected $2, found ${found}"
fi
# The "return" is a sorted list of pod IDs.
sort_args "${pods_unsorted[@]}"
}
# Args:
# $1: service name
# $2: pod count
function wait_for_pods() {
echo "Querying pods in $1"
local pods_sorted=$(query_pods "$1" "$2")
printf '\t%s\n' ${pods_sorted}
# Container turn up on a clean cluster can take a while for the docker image
# pulls. Wait a generous amount of time.
# TODO: Sometimes pods change underneath us, which makes the GET fail (404).
# Maybe this test can be loosened and still be useful?
pods_needed=$2
local i
for i in $(seq 1 30); do
echo "Waiting for ${pods_needed} pods to become 'running'"
pods_needed="$2"
for id in ${pods_sorted}; do
status=$(${KUBECTL} get pods "${id}" -o template --template='{{.status.phase}}' --api-version=v1)
if [[ "${status}" == "Running" ]]; then
pods_needed=$((pods_needed-1))
fi
done
if [[ "${pods_needed}" == 0 ]]; then
break
fi
sleep 3
done
if [[ "${pods_needed}" -gt 0 ]]; then
error "Pods for $1 did not come up in time"
fi
}
# Args:
# $1: service name
# $2: service IP
# $3: service port
# $4: pod count
# $5: pod IDs (sorted)
function wait_for_service_up() {
local i
local found_pods
echo "waiting for $1 at $2:$3"
# TODO: Reduce this interval once we have a sense for the latency distribution.
for i in $(seq 1 10); do
results=($(ssh-to-node "${test_node}" "
set -e;
for i in $(seq -s' ' 1 $(($4*3))); do
wget -q -T 1 -O - http://$2:$3 || true;
echo;
done | sort -n | uniq
"))
found_pods=$(sort_args "${results[@]:+${results[@]}}")
if [[ "${found_pods}" == "$5" ]]; then
return
fi
echo "expected '$5', got '${found_pods}': will try again"
sleep 5 # wait for endpoints to propagate
done
error "$1: failed to verify portal from host"
}
# Args:
# $1: service name
# $2: service IP
# $3: service port
function wait_for_service_down() {
local i
for i in $(seq 1 15); do
$(ssh-to-node "${test_node}" "
curl -s --connect-timeout 2 "http://$2:$3" >/dev/null 2>&1 && exit 1 || exit 0;
") && break
echo "Waiting for $1 to go down"
sleep 2
done
}
# Args:
# $1: service name
# $2: service IP
# $3: service port
# $4: pod count
# $5: pod IDs (sorted)
function verify_from_container() {
local i
local found_pods
echo "waiting for $1 at $2:$3"
# TODO: Reduce this interval once we have a sense for the latency distribution.
for i in $(seq 1 10); do
results=($(ssh-to-node "${test_node}" "
set -e;
sudo docker pull gcr.io/google_containers/busybox >/dev/null;
sudo docker run gcr.io/google_containers/busybox sh -c '
for i in $(seq -s' ' 1 $(($4*3))); do
wget -q -T 1 -O - http://$2:$3 || true;
echo;
done
'" | sort -n | uniq))
found_pods=$(sort_args "${results[@]:+${results[@]}}")
if [[ "${found_pods}" == "$5" ]]; then
return
fi
echo "expected '$5', got '${found_pods}': will try again"
sleep 5 # wait for endpoints to propagate
done
error "$1: failed to verify portal from host"
}
trap do_teardown EXIT
# Get node IP addresses and pick one as our test point.
detect-minions
test_node="${MINION_NAMES[0]}"
master="${MASTER_NAME}"
# Make our namespace
make_namespace
# Launch some pods and services.
svc1_name="service1"
svc1_port=80
svc1_count=3
start_service "${svc1_name}" "${svc1_port}" "${svc1_count}"
svc2_name="service2"
svc2_port=80
svc2_count=3
start_service "${svc2_name}" "${svc2_port}" "${svc2_count}"
# Wait for the pods to become "running".
wait_for_pods "${svc1_name}" "${svc1_count}"
wait_for_pods "${svc2_name}" "${svc2_count}"
# Get the sorted lists of pods.
svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}")
svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}")
# Get the VIP IPs.
svc1_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc1_name}" --api-version=v1)
test -n "${svc1_ip}" || error "Service1 IP is blank"
svc2_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc2_name}" --api-version=v1)
test -n "${svc2_ip}" || error "Service2 IP is blank"
if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then
error "VIPs conflict: ${svc1_ip}"
fi
#
# Test 1: Prove that the service VIP is alive.
#
echo "Test 1: Prove that the service VIP is alive."
echo "Verifying the VIP from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
echo "Verifying the VIP from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
#
# Test 2: Bounce the proxy and make sure the VIP comes back.
#
echo "Test 2: Bounce the proxy and make sure the VIP comes back."
echo "Restarting kube-proxy"
restart-kube-proxy "${test_node}"
echo "Verifying the VIP from the host"
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
echo "Verifying the VIP from a container"
verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
"${svc1_count}" "${svc1_pods}"
verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
"${svc2_count}" "${svc2_pods}"
#
# Test 3: Stop one service and make sure it is gone.
#
echo "Test 3: Stop one service and make sure it is gone."
stop_service "${svc1_name}"
wait_for_service_down "${svc1_name}" "${svc1_ip}" "${svc1_port}"
#
# Test 4: Bring up another service.
# TODO: Actually add a test to force re-use.
#
echo "Test 4: Bring up another service."
svc3_name="service3"
svc3_port=80
svc3_count=3
start_service "${svc3_name}" "${svc3_port}" "${svc3_count}"
# Wait for the pods to become "running".
wait_for_pods "${svc3_name}" "${svc3_count}"
# Get the sorted lists of pods.
svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}")
# Get the VIP.
svc3_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc3_name}" --api-version=v1)
test -n "${svc3_ip}" || error "Service3 IP is blank"
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 5: Remove the iptables rules, make sure they come back.
#
echo "Test 5: Remove the iptables rules, make sure they come back."
echo "Manually removing iptables rules"
# Remove both the new and old style chains, in case we're testing on an old kubelet
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-HOST || true"
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true"
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 6: Restart the master, make sure VIPs come back.
#
echo "Test 6: Restart the master, make sure VIPs come back."
echo "Restarting the master"
restart-apiserver "${master}"
wait_for_apiserver
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
echo "Verifying the VIPs from a container"
verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
"${svc3_count}" "${svc3_pods}"
#
# Test 7: Bring up another service, make sure it does not re-use IPs.
#
echo "Test 7: Bring up another service, make sure it does not re-use IPs."
svc4_name="service4"
svc4_port=80
svc4_count=3
start_service "${svc4_name}" "${svc4_port}" "${svc4_count}"
# Wait for the pods to become "running".
wait_for_pods "${svc4_name}" "${svc4_count}"
# Get the sorted lists of pods.
svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}")
# Get the VIP.
svc4_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc4_name}" --api-version=v1)
test -n "${svc4_ip}" || error "Service4 IP is blank"
if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then
error "VIPs conflict: ${svc4_ip}"
fi
echo "Verifying the VIPs from the host"
wait_for_service_up "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}"
echo "Verifying the VIPs from a container"
verify_from_container "${svc4_name}" "${svc4_ip}" "${svc4_port}" \
"${svc4_count}" "${svc4_pods}"
exit 0

View File

@@ -87,7 +87,6 @@ readonly KUBE_TEST_PORTABLE=(
contrib/for-tests/network-tester/rc.json contrib/for-tests/network-tester/rc.json
contrib/for-tests/network-tester/service.json contrib/for-tests/network-tester/service.json
hack/e2e.go hack/e2e.go
hack/e2e-suite
hack/e2e-internal hack/e2e-internal
hack/ginkgo-e2e.sh hack/ginkgo-e2e.sh
hack/lib hack/lib
@@ -104,10 +103,16 @@ readonly KUBE_CLIENT_PLATFORMS=(
windows/amd64 windows/amd64
) )
# Gigabytes desired for parallel platform builds. 8 is fairly # Gigabytes desired for parallel platform builds. 11 is fairly
# arbitrary, but is a reasonable splitting point for 2015 # arbitrary, but is a reasonable splitting point for 2015
# laptops-versus-not. # laptops-versus-not.
readonly KUBE_PARALLEL_BUILD_MEMORY=8 #
# If you are using boot2docker, the following seems to work (note
# that 12000 rounds to 11G):
# boot2docker down
# VBoxManage modifyvm boot2docker-vm --memory 12000
# boot2docker up
readonly KUBE_PARALLEL_BUILD_MEMORY=11
readonly KUBE_ALL_TARGETS=( readonly KUBE_ALL_TARGETS=(
"${KUBE_SERVER_TARGETS[@]}" "${KUBE_SERVER_TARGETS[@]}"

View File

@@ -33,14 +33,6 @@ function generate_version() {
cat >> $TMPFILE <<EOF cat >> $TMPFILE <<EOF
package ${version} package ${version}
import (
"reflect"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
)
// AUTO-GENERATED FUNCTIONS START HERE // AUTO-GENERATED FUNCTIONS START HERE
EOF EOF

View File

@@ -18,14 +18,14 @@ package api
// AUTO-GENERATED FUNCTIONS START HERE // AUTO-GENERATED FUNCTIONS START HERE
import ( import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields" fields "github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels" labels "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" util "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"speter.net/go/exp/math/dec/inf" inf "speter.net/go/exp/math/dec/inf"
"time" time "time"
) )
func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
@@ -587,7 +587,7 @@ func deepCopy_api_LimitRange(in LimitRange, out *LimitRange, c *conversion.Clone
func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error {
out.Type = in.Type out.Type = in.Type
if in.Max != nil { if in.Max != nil {
out.Max = make(map[ResourceName]resource.Quantity) out.Max = make(ResourceList)
for key, val := range in.Max { for key, val := range in.Max {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -599,7 +599,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv
out.Max = nil out.Max = nil
} }
if in.Min != nil { if in.Min != nil {
out.Min = make(map[ResourceName]resource.Quantity) out.Min = make(ResourceList)
for key, val := range in.Min { for key, val := range in.Min {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -611,7 +611,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv
out.Min = nil out.Min = nil
} }
if in.Default != nil { if in.Default != nil {
out.Default = make(map[ResourceName]resource.Quantity) out.Default = make(ResourceList)
for key, val := range in.Default { for key, val := range in.Default {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -857,7 +857,7 @@ func deepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) err
func deepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { func deepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error {
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1041,7 +1041,7 @@ func deepCopy_api_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, ou
out.AccessModes = nil out.AccessModes = nil
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1143,7 +1143,7 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
func deepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { func deepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error {
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1571,7 +1571,7 @@ func deepCopy_api_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList
func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error {
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[ResourceName]resource.Quantity) out.Hard = make(ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1587,7 +1587,7 @@ func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec
func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error {
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[ResourceName]resource.Quantity) out.Hard = make(ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1599,7 +1599,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota
out.Hard = nil out.Hard = nil
} }
if in.Used != nil { if in.Used != nil {
out.Used = make(map[ResourceName]resource.Quantity) out.Used = make(ResourceList)
for key, val := range in.Used { for key, val := range in.Used {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1615,7 +1615,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota
func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error {
if in.Limits != nil { if in.Limits != nil {
out.Limits = make(map[ResourceName]resource.Quantity) out.Limits = make(ResourceList)
for key, val := range in.Limits { for key, val := range in.Limits {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1627,7 +1627,7 @@ func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceReq
out.Limits = nil out.Limits = nil
} }
if in.Requests != nil { if in.Requests != nil {
out.Requests = make(map[ResourceName]resource.Quantity) out.Requests = make(ResourceList)
for key, val := range in.Requests { for key, val := range in.Requests {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {

View File

@@ -16,15 +16,14 @@ limitations under the License.
package v1 package v1
// AUTO-GENERATED FUNCTIONS START HERE
import ( import (
"reflect" api "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" reflect "reflect"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
) )
// AUTO-GENERATED FUNCTIONS START HERE
func convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { func convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in) defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in)
@@ -692,7 +691,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out
} }
out.Type = LimitType(in.Type) out.Type = LimitType(in.Type)
if in.Max != nil { if in.Max != nil {
out.Max = make(map[ResourceName]resource.Quantity) out.Max = make(ResourceList)
for key, val := range in.Max { for key, val := range in.Max {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -704,7 +703,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out
out.Max = nil out.Max = nil
} }
if in.Min != nil { if in.Min != nil {
out.Min = make(map[ResourceName]resource.Quantity) out.Min = make(ResourceList)
for key, val := range in.Min { for key, val := range in.Min {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -716,7 +715,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out
out.Min = nil out.Min = nil
} }
if in.Default != nil { if in.Default != nil {
out.Default = make(map[ResourceName]resource.Quantity) out.Default = make(ResourceList)
for key, val := range in.Default { for key, val := range in.Default {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1006,7 +1005,7 @@ func convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus
defaulting.(func(*api.NodeStatus))(in) defaulting.(func(*api.NodeStatus))(in)
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1216,7 +1215,7 @@ func convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(i
out.AccessModes = nil out.AccessModes = nil
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1330,7 +1329,7 @@ func convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.Persist
defaulting.(func(*api.PersistentVolumeSpec))(in) defaulting.(func(*api.PersistentVolumeSpec))(in)
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1735,7 +1734,7 @@ func convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuota
defaulting.(func(*api.ResourceQuotaSpec))(in) defaulting.(func(*api.ResourceQuotaSpec))(in)
} }
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[ResourceName]resource.Quantity) out.Hard = make(ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1754,7 +1753,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ
defaulting.(func(*api.ResourceQuotaStatus))(in) defaulting.(func(*api.ResourceQuotaStatus))(in)
} }
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[ResourceName]resource.Quantity) out.Hard = make(ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1766,7 +1765,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ
out.Hard = nil out.Hard = nil
} }
if in.Used != nil { if in.Used != nil {
out.Used = make(map[ResourceName]resource.Quantity) out.Used = make(ResourceList)
for key, val := range in.Used { for key, val := range in.Used {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1785,7 +1784,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc
defaulting.(func(*api.ResourceRequirements))(in) defaulting.(func(*api.ResourceRequirements))(in)
} }
if in.Limits != nil { if in.Limits != nil {
out.Limits = make(map[ResourceName]resource.Quantity) out.Limits = make(ResourceList)
for key, val := range in.Limits { for key, val := range in.Limits {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -1797,7 +1796,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc
out.Limits = nil out.Limits = nil
} }
if in.Requests != nil { if in.Requests != nil {
out.Requests = make(map[ResourceName]resource.Quantity) out.Requests = make(ResourceList)
for key, val := range in.Requests { for key, val := range in.Requests {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -2942,7 +2941,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap
} }
out.Type = api.LimitType(in.Type) out.Type = api.LimitType(in.Type)
if in.Max != nil { if in.Max != nil {
out.Max = make(map[api.ResourceName]resource.Quantity) out.Max = make(api.ResourceList)
for key, val := range in.Max { for key, val := range in.Max {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -2954,7 +2953,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap
out.Max = nil out.Max = nil
} }
if in.Min != nil { if in.Min != nil {
out.Min = make(map[api.ResourceName]resource.Quantity) out.Min = make(api.ResourceList)
for key, val := range in.Min { for key, val := range in.Min {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -2966,7 +2965,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap
out.Min = nil out.Min = nil
} }
if in.Default != nil { if in.Default != nil {
out.Default = make(map[api.ResourceName]resource.Quantity) out.Default = make(api.ResourceList)
for key, val := range in.Default { for key, val := range in.Default {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -3256,7 +3255,7 @@ func convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus
defaulting.(func(*NodeStatus))(in) defaulting.(func(*NodeStatus))(in)
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[api.ResourceName]resource.Quantity) out.Capacity = make(api.ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -3466,7 +3465,7 @@ func convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(i
out.AccessModes = nil out.AccessModes = nil
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[api.ResourceName]resource.Quantity) out.Capacity = make(api.ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -3580,7 +3579,7 @@ func convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentV
defaulting.(func(*PersistentVolumeSpec))(in) defaulting.(func(*PersistentVolumeSpec))(in)
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[api.ResourceName]resource.Quantity) out.Capacity = make(api.ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -3985,7 +3984,7 @@ func convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec
defaulting.(func(*ResourceQuotaSpec))(in) defaulting.(func(*ResourceQuotaSpec))(in)
} }
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[api.ResourceName]resource.Quantity) out.Hard = make(api.ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -4004,7 +4003,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota
defaulting.(func(*ResourceQuotaStatus))(in) defaulting.(func(*ResourceQuotaStatus))(in)
} }
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[api.ResourceName]resource.Quantity) out.Hard = make(api.ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -4016,7 +4015,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota
out.Hard = nil out.Hard = nil
} }
if in.Used != nil { if in.Used != nil {
out.Used = make(map[api.ResourceName]resource.Quantity) out.Used = make(api.ResourceList)
for key, val := range in.Used { for key, val := range in.Used {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -4035,7 +4034,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq
defaulting.(func(*ResourceRequirements))(in) defaulting.(func(*ResourceRequirements))(in)
} }
if in.Limits != nil { if in.Limits != nil {
out.Limits = make(map[api.ResourceName]resource.Quantity) out.Limits = make(api.ResourceList)
for key, val := range in.Limits { for key, val := range in.Limits {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {
@@ -4047,7 +4046,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq
out.Limits = nil out.Limits = nil
} }
if in.Requests != nil { if in.Requests != nil {
out.Requests = make(map[api.ResourceName]resource.Quantity) out.Requests = make(api.ResourceList)
for key, val := range in.Requests { for key, val := range in.Requests {
newVal := resource.Quantity{} newVal := resource.Quantity{}
if err := s.Convert(&val, &newVal, 0); err != nil { if err := s.Convert(&val, &newVal, 0); err != nil {

View File

@@ -18,13 +18,13 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE // AUTO-GENERATED FUNCTIONS START HERE
import ( import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" api "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" util "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"speter.net/go/exp/math/dec/inf" inf "speter.net/go/exp/math/dec/inf"
"time" time "time"
) )
func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error {
@@ -600,7 +600,7 @@ func deepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner
func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error {
out.Type = in.Type out.Type = in.Type
if in.Max != nil { if in.Max != nil {
out.Max = make(map[ResourceName]resource.Quantity) out.Max = make(ResourceList)
for key, val := range in.Max { for key, val := range in.Max {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -612,7 +612,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve
out.Max = nil out.Max = nil
} }
if in.Min != nil { if in.Min != nil {
out.Min = make(map[ResourceName]resource.Quantity) out.Min = make(ResourceList)
for key, val := range in.Min { for key, val := range in.Min {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -624,7 +624,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve
out.Min = nil out.Min = nil
} }
if in.Default != nil { if in.Default != nil {
out.Default = make(map[ResourceName]resource.Quantity) out.Default = make(ResourceList)
for key, val := range in.Default { for key, val := range in.Default {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -860,7 +860,7 @@ func deepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) erro
func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error {
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1044,7 +1044,7 @@ func deepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out
out.AccessModes = nil out.AccessModes = nil
} }
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1146,7 +1146,7 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste
func deepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { func deepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error {
if in.Capacity != nil { if in.Capacity != nil {
out.Capacity = make(map[ResourceName]resource.Quantity) out.Capacity = make(ResourceList)
for key, val := range in.Capacity { for key, val := range in.Capacity {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1580,7 +1580,7 @@ func deepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList,
func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error {
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[ResourceName]resource.Quantity) out.Hard = make(ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1596,7 +1596,7 @@ func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec,
func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error {
if in.Hard != nil { if in.Hard != nil {
out.Hard = make(map[ResourceName]resource.Quantity) out.Hard = make(ResourceList)
for key, val := range in.Hard { for key, val := range in.Hard {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1608,7 +1608,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS
out.Hard = nil out.Hard = nil
} }
if in.Used != nil { if in.Used != nil {
out.Used = make(map[ResourceName]resource.Quantity) out.Used = make(ResourceList)
for key, val := range in.Used { for key, val := range in.Used {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1624,7 +1624,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS
func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error {
if in.Limits != nil { if in.Limits != nil {
out.Limits = make(map[ResourceName]resource.Quantity) out.Limits = make(ResourceList)
for key, val := range in.Limits { for key, val := range in.Limits {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {
@@ -1636,7 +1636,7 @@ func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequ
out.Limits = nil out.Limits = nil
} }
if in.Requests != nil { if in.Requests != nil {
out.Requests = make(map[ResourceName]resource.Quantity) out.Requests = make(ResourceList)
for key, val := range in.Requests { for key, val := range in.Requests {
newVal := new(resource.Quantity) newVal := new(resource.Quantity)
if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { if err := deepCopy_resource_Quantity(val, newVal, c); err != nil {

View File

@@ -204,10 +204,11 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
} }
versionedStatus := indirectArbitraryPointer(versionedStatusPtr) versionedStatus := indirectArbitraryPointer(versionedStatusPtr)
var ( var (
getOptions runtime.Object getOptions runtime.Object
getOptionsKind string versionedGetOptions runtime.Object
getSubpath bool getOptionsKind string
getSubpathKey string getSubpath bool
getSubpathKey string
) )
if isGetterWithOptions { if isGetterWithOptions {
getOptions, getSubpath, getSubpathKey = getterWithOptions.NewGetOptions() getOptions, getSubpath, getSubpathKey = getterWithOptions.NewGetOptions()
@@ -215,14 +216,19 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
if err != nil { if err != nil {
return err return err
} }
versionedGetOptions, err = a.group.Creater.New(serverVersion, getOptionsKind)
if err != nil {
return err
}
isGetter = true isGetter = true
} }
var ( var (
connectOptions runtime.Object connectOptions runtime.Object
connectOptionsKind string versionedConnectOptions runtime.Object
connectSubpath bool connectOptionsKind string
connectSubpathKey string connectSubpath bool
connectSubpathKey string
) )
if isConnecter { if isConnecter {
connectOptions, connectSubpath, connectSubpathKey = connecter.NewConnectOptions() connectOptions, connectSubpath, connectSubpathKey = connecter.NewConnectOptions()
@@ -231,6 +237,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
if err != nil { if err != nil {
return err return err
} }
versionedConnectOptions, err = a.group.Creater.New(serverVersion, connectOptionsKind)
} }
} }
@@ -390,7 +397,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
Returns(http.StatusOK, "OK", versionedObject). Returns(http.StatusOK, "OK", versionedObject).
Writes(versionedObject) Writes(versionedObject)
if isGetterWithOptions { if isGetterWithOptions {
if err := addObjectParams(ws, route, getOptions); err != nil { if err := addObjectParams(ws, route, versionedGetOptions); err != nil {
return err return err
} }
} }
@@ -561,8 +568,8 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
Produces("*/*"). Produces("*/*").
Consumes("*/*"). Consumes("*/*").
Writes("string") Writes("string")
if connectOptions != nil { if versionedConnectOptions != nil {
if err := addObjectParams(ws, route, connectOptions); err != nil { if err := addObjectParams(ws, route, versionedConnectOptions); err != nil {
return err return err
} }
} }

View File

@@ -255,8 +255,8 @@ func (*SimpleRoot) IsAnAPIObject() {}
type SimpleGetOptions struct { type SimpleGetOptions struct {
api.TypeMeta `json:",inline"` api.TypeMeta `json:",inline"`
Param1 string `json:"param1"` Param1 string `json:"param1" description:"description for param1"`
Param2 string `json:"param2"` Param2 string `json:"param2" description:"description for param2"`
Path string `json:"atAPath"` Path string `json:"atAPath"`
} }
@@ -1078,6 +1078,47 @@ func TestGetBinary(t *testing.T) {
} }
} }
func validateSimpleGetOptionsParams(t *testing.T, route *restful.Route) {
// Validate name and description
expectedParams := map[string]string{
"param1": "description for param1",
"param2": "description for param2",
"atAPath": "",
}
for _, p := range route.ParameterDocs {
data := p.Data()
if desc, exists := expectedParams[data.Name]; exists {
if desc != data.Description {
t.Errorf("unexpected description for parameter %s: %s\n", data.Name, data.Description)
}
delete(expectedParams, data.Name)
}
}
if len(expectedParams) > 0 {
t.Errorf("did not find all expected parameters: %#v", expectedParams)
}
}
func TestGetWithOptionsRouteParams(t *testing.T) {
storage := map[string]rest.Storage{}
simpleStorage := GetWithOptionsRESTStorage{
SimpleRESTStorage: &SimpleRESTStorage{},
}
storage["simple"] = &simpleStorage
handler := handle(storage)
ws := handler.(*defaultAPIServer).container.RegisteredWebServices()
if len(ws) == 0 {
t.Fatal("no web services registered")
}
routes := ws[0].Routes()
for i := range routes {
if routes[i].Method == "GET" && routes[i].Operation == "readNamespacedSimple" {
validateSimpleGetOptionsParams(t, &routes[i])
break
}
}
}
func TestGetWithOptions(t *testing.T) { func TestGetWithOptions(t *testing.T) {
storage := map[string]rest.Storage{} storage := map[string]rest.Storage{}
simpleStorage := GetWithOptionsRESTStorage{ simpleStorage := GetWithOptionsRESTStorage{
@@ -1292,6 +1333,33 @@ func TestConnect(t *testing.T) {
} }
} }
func TestConnectWithOptionsRouteParams(t *testing.T) {
connectStorage := &ConnecterRESTStorage{
connectHandler: &SimpleConnectHandler{},
emptyConnectOptions: &SimpleGetOptions{},
}
storage := map[string]rest.Storage{
"simple": &SimpleRESTStorage{},
"simple/connect": connectStorage,
}
handler := handle(storage)
ws := handler.(*defaultAPIServer).container.RegisteredWebServices()
if len(ws) == 0 {
t.Fatal("no web services registered")
}
routes := ws[0].Routes()
for i := range routes {
switch routes[i].Operation {
case "connectGetNamespacedSimpleConnect":
case "connectPostNamespacedSimpleConnect":
case "connectPutNamespacedSimpleConnect":
case "connectDeleteNamespacedSimpleConnect":
validateSimpleGetOptionsParams(t, &routes[i])
}
}
}
func TestConnectWithOptions(t *testing.T) { func TestConnectWithOptions(t *testing.T) {
responseText := "Hello World" responseText := "Hello World"
itemID := "theID" itemID := "theID"

View File

@@ -27,6 +27,9 @@ type Capabilities struct {
// List of pod sources for which using host network is allowed. // List of pod sources for which using host network is allowed.
HostNetworkSources []string HostNetworkSources []string
// PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach)
PerConnectionBandwidthLimitBytesPerSec int64
} }
// TODO: Clean these up into a singleton // TODO: Clean these up into a singleton
@@ -43,10 +46,11 @@ func Initialize(c Capabilities) {
} }
// Setup the capability set. It wraps Initialize for improving usibility. // Setup the capability set. It wraps Initialize for improving usibility.
func Setup(allowPrivileged bool, hostNetworkSources []string) { func Setup(allowPrivileged bool, hostNetworkSources []string, perConnectionBytesPerSec int64) {
Initialize(Capabilities{ Initialize(Capabilities{
AllowPrivileged: allowPrivileged, AllowPrivileged: allowPrivileged,
HostNetworkSources: hostNetworkSources, HostNetworkSources: hostNetworkSources,
PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec,
}) })
} }

View File

@@ -224,7 +224,7 @@ func (r *Reflector) listAndWatch(stopCh <-chan struct{}) {
} }
if err := r.watchHandler(w, &resourceVersion, resyncCh, stopCh); err != nil { if err := r.watchHandler(w, &resourceVersion, resyncCh, stopCh); err != nil {
if err != errorResyncRequested && err != errorStopRequested { if err != errorResyncRequested && err != errorStopRequested {
util.HandleError(fmt.Errorf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)) glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
} }
return return
} }

View File

@@ -43,10 +43,10 @@ func MinifyConfig(config *Config) error {
return fmt.Errorf("cannot locate context %v", config.CurrentContext) return fmt.Errorf("cannot locate context %v", config.CurrentContext)
} }
newContexts := map[string]Context{} newContexts := map[string]*Context{}
newContexts[config.CurrentContext] = currContext newContexts[config.CurrentContext] = currContext
newClusters := map[string]Cluster{} newClusters := map[string]*Cluster{}
if len(currContext.Cluster) > 0 { if len(currContext.Cluster) > 0 {
if _, exists := config.Clusters[currContext.Cluster]; !exists { if _, exists := config.Clusters[currContext.Cluster]; !exists {
return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) return fmt.Errorf("cannot locate cluster %v", currContext.Cluster)
@@ -55,7 +55,7 @@ func MinifyConfig(config *Config) error {
newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster]
} }
newAuthInfos := map[string]AuthInfo{} newAuthInfos := map[string]*AuthInfo{}
if len(currContext.AuthInfo) > 0 { if len(currContext.AuthInfo) > 0 {
if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists {
return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) return fmt.Errorf("cannot locate user %v", currContext.AuthInfo)

View File

@@ -38,13 +38,13 @@ func newMergedConfig(certFile, certContent, keyFile, keyContent, caFile, caConte
} }
return Config{ return Config{
AuthInfos: map[string]AuthInfo{ AuthInfos: map[string]*AuthInfo{
"red-user": {Token: "red-token", ClientCertificateData: []byte(certContent), ClientKeyData: []byte(keyContent)}, "red-user": {Token: "red-token", ClientCertificateData: []byte(certContent), ClientKeyData: []byte(keyContent)},
"blue-user": {Token: "blue-token", ClientCertificate: certFile, ClientKey: keyFile}}, "blue-user": {Token: "blue-token", ClientCertificate: certFile, ClientKey: keyFile}},
Clusters: map[string]Cluster{ Clusters: map[string]*Cluster{
"cow-cluster": {Server: "http://cow.org:8080", CertificateAuthorityData: []byte(caContent)}, "cow-cluster": {Server: "http://cow.org:8080", CertificateAuthorityData: []byte(caContent)},
"chicken-cluster": {Server: "http://chicken.org:8080", CertificateAuthority: caFile}}, "chicken-cluster": {Server: "http://chicken.org:8080", CertificateAuthority: caFile}},
Contexts: map[string]Context{ Contexts: map[string]*Context{
"federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"}, "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"},
"shaker-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster"}}, "shaker-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster"}},
CurrentContext: "federal-context", CurrentContext: "federal-context",

View File

@@ -33,21 +33,21 @@ type Config struct {
// Preferences holds general information to be use for cli interactions // Preferences holds general information to be use for cli interactions
Preferences Preferences `json:"preferences"` Preferences Preferences `json:"preferences"`
// Clusters is a map of referencable names to cluster configs // Clusters is a map of referencable names to cluster configs
Clusters map[string]Cluster `json:"clusters"` Clusters map[string]*Cluster `json:"clusters"`
// AuthInfos is a map of referencable names to user configs // AuthInfos is a map of referencable names to user configs
AuthInfos map[string]AuthInfo `json:"users"` AuthInfos map[string]*AuthInfo `json:"users"`
// Contexts is a map of referencable names to context configs // Contexts is a map of referencable names to context configs
Contexts map[string]Context `json:"contexts"` Contexts map[string]*Context `json:"contexts"`
// CurrentContext is the name of the context that you would like to use by default // CurrentContext is the name of the context that you would like to use by default
CurrentContext string `json:"current-context"` CurrentContext string `json:"current-context"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"`
} }
type Preferences struct { type Preferences struct {
Colors bool `json:"colors,omitempty"` Colors bool `json:"colors,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"`
} }
// Cluster contains information about how to communicate with a kubernetes cluster // Cluster contains information about how to communicate with a kubernetes cluster
@@ -65,7 +65,7 @@ type Cluster struct {
// CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority
CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"`
} }
// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are.
@@ -87,7 +87,7 @@ type AuthInfo struct {
// Password is the password for basic authentication to the kubernetes cluster. // Password is the password for basic authentication to the kubernetes cluster.
Password string `json:"password,omitempty"` Password string `json:"password,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"`
} }
// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with)
@@ -101,36 +101,36 @@ type Context struct {
// Namespace is the default namespace to use on unspecified requests // Namespace is the default namespace to use on unspecified requests
Namespace string `json:"namespace,omitempty"` Namespace string `json:"namespace,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"`
} }
// NewConfig is a convenience function that returns a new Config object with non-nil maps // NewConfig is a convenience function that returns a new Config object with non-nil maps
func NewConfig() *Config { func NewConfig() *Config {
return &Config{ return &Config{
Preferences: *NewPreferences(), Preferences: *NewPreferences(),
Clusters: make(map[string]Cluster), Clusters: make(map[string]*Cluster),
AuthInfos: make(map[string]AuthInfo), AuthInfos: make(map[string]*AuthInfo),
Contexts: make(map[string]Context), Contexts: make(map[string]*Context),
Extensions: make(map[string]runtime.EmbeddedObject), Extensions: make(map[string]*runtime.EmbeddedObject),
} }
} }
// NewConfig is a convenience function that returns a new Config object with non-nil maps // NewConfig is a convenience function that returns a new Config object with non-nil maps
func NewContext() *Context { func NewContext() *Context {
return &Context{Extensions: make(map[string]runtime.EmbeddedObject)} return &Context{Extensions: make(map[string]*runtime.EmbeddedObject)}
} }
// NewConfig is a convenience function that returns a new Config object with non-nil maps // NewConfig is a convenience function that returns a new Config object with non-nil maps
func NewCluster() *Cluster { func NewCluster() *Cluster {
return &Cluster{Extensions: make(map[string]runtime.EmbeddedObject)} return &Cluster{Extensions: make(map[string]*runtime.EmbeddedObject)}
} }
// NewConfig is a convenience function that returns a new Config object with non-nil maps // NewConfig is a convenience function that returns a new Config object with non-nil maps
func NewAuthInfo() *AuthInfo { func NewAuthInfo() *AuthInfo {
return &AuthInfo{Extensions: make(map[string]runtime.EmbeddedObject)} return &AuthInfo{Extensions: make(map[string]*runtime.EmbeddedObject)}
} }
// NewConfig is a convenience function that returns a new Config object with non-nil maps // NewConfig is a convenience function that returns a new Config object with non-nil maps
func NewPreferences() *Preferences { func NewPreferences() *Preferences {
return &Preferences{Extensions: make(map[string]runtime.EmbeddedObject)} return &Preferences{Extensions: make(map[string]*runtime.EmbeddedObject)}
} }

View File

@@ -42,35 +42,35 @@ func ExampleEmptyConfig() {
func ExampleOfOptionsConfig() { func ExampleOfOptionsConfig() {
defaultConfig := NewConfig() defaultConfig := NewConfig()
defaultConfig.Preferences.Colors = true defaultConfig.Preferences.Colors = true
defaultConfig.Clusters["alfa"] = Cluster{ defaultConfig.Clusters["alfa"] = &Cluster{
Server: "https://alfa.org:8080", Server: "https://alfa.org:8080",
APIVersion: "v1beta2", APIVersion: "v1beta2",
InsecureSkipTLSVerify: true, InsecureSkipTLSVerify: true,
CertificateAuthority: "path/to/my/cert-ca-filename", CertificateAuthority: "path/to/my/cert-ca-filename",
} }
defaultConfig.Clusters["bravo"] = Cluster{ defaultConfig.Clusters["bravo"] = &Cluster{
Server: "https://bravo.org:8080", Server: "https://bravo.org:8080",
APIVersion: "v1beta1", APIVersion: "v1beta1",
InsecureSkipTLSVerify: false, InsecureSkipTLSVerify: false,
} }
defaultConfig.AuthInfos["white-mage-via-cert"] = AuthInfo{ defaultConfig.AuthInfos["white-mage-via-cert"] = &AuthInfo{
ClientCertificate: "path/to/my/client-cert-filename", ClientCertificate: "path/to/my/client-cert-filename",
ClientKey: "path/to/my/client-key-filename", ClientKey: "path/to/my/client-key-filename",
} }
defaultConfig.AuthInfos["red-mage-via-token"] = AuthInfo{ defaultConfig.AuthInfos["red-mage-via-token"] = &AuthInfo{
Token: "my-secret-token", Token: "my-secret-token",
} }
defaultConfig.Contexts["bravo-as-black-mage"] = Context{ defaultConfig.Contexts["bravo-as-black-mage"] = &Context{
Cluster: "bravo", Cluster: "bravo",
AuthInfo: "black-mage-via-file", AuthInfo: "black-mage-via-file",
Namespace: "yankee", Namespace: "yankee",
} }
defaultConfig.Contexts["alfa-as-black-mage"] = Context{ defaultConfig.Contexts["alfa-as-black-mage"] = &Context{
Cluster: "alfa", Cluster: "alfa",
AuthInfo: "black-mage-via-file", AuthInfo: "black-mage-via-file",
Namespace: "zulu", Namespace: "zulu",
} }
defaultConfig.Contexts["alfa-as-white-mage"] = Context{ defaultConfig.Contexts["alfa-as-white-mage"] = &Context{
Cluster: "alfa", Cluster: "alfa",
AuthInfo: "white-mage-via-cert", AuthInfo: "white-mage-via-cert",
} }

View File

@@ -57,19 +57,19 @@ func init() {
return err return err
} }
out.Clusters = make(map[string]api.Cluster) out.Clusters = make(map[string]*api.Cluster)
if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil {
return err return err
} }
out.AuthInfos = make(map[string]api.AuthInfo) out.AuthInfos = make(map[string]*api.AuthInfo)
if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil {
return err return err
} }
out.Contexts = make(map[string]api.Context) out.Contexts = make(map[string]*api.Context)
if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil {
return err return err
} }
out.Extensions = make(map[string]runtime.EmbeddedObject) out.Extensions = make(map[string]*runtime.EmbeddedObject)
if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil {
return err return err
} }
@@ -99,18 +99,18 @@ func init() {
} }
return nil return nil
}, },
func(in *[]NamedCluster, out *map[string]api.Cluster, s conversion.Scope) error { func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error {
for _, curr := range *in { for _, curr := range *in {
newCluster := api.NewCluster() newCluster := api.NewCluster()
if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil {
return err return err
} }
(*out)[curr.Name] = *newCluster (*out)[curr.Name] = newCluster
} }
return nil return nil
}, },
func(in *map[string]api.Cluster, out *[]NamedCluster, s conversion.Scope) error { func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error {
allKeys := make([]string, 0, len(*in)) allKeys := make([]string, 0, len(*in))
for key := range *in { for key := range *in {
allKeys = append(allKeys, key) allKeys = append(allKeys, key)
@@ -120,7 +120,7 @@ func init() {
for _, key := range allKeys { for _, key := range allKeys {
newCluster := (*in)[key] newCluster := (*in)[key]
oldCluster := &Cluster{} oldCluster := &Cluster{}
if err := s.Convert(&newCluster, oldCluster, 0); err != nil { if err := s.Convert(newCluster, oldCluster, 0); err != nil {
return err return err
} }
@@ -130,18 +130,18 @@ func init() {
return nil return nil
}, },
func(in *[]NamedAuthInfo, out *map[string]api.AuthInfo, s conversion.Scope) error { func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error {
for _, curr := range *in { for _, curr := range *in {
newAuthInfo := api.NewAuthInfo() newAuthInfo := api.NewAuthInfo()
if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil {
return err return err
} }
(*out)[curr.Name] = *newAuthInfo (*out)[curr.Name] = newAuthInfo
} }
return nil return nil
}, },
func(in *map[string]api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error {
allKeys := make([]string, 0, len(*in)) allKeys := make([]string, 0, len(*in))
for key := range *in { for key := range *in {
allKeys = append(allKeys, key) allKeys = append(allKeys, key)
@@ -151,7 +151,7 @@ func init() {
for _, key := range allKeys { for _, key := range allKeys {
newAuthInfo := (*in)[key] newAuthInfo := (*in)[key]
oldAuthInfo := &AuthInfo{} oldAuthInfo := &AuthInfo{}
if err := s.Convert(&newAuthInfo, oldAuthInfo, 0); err != nil { if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil {
return err return err
} }
@@ -161,18 +161,18 @@ func init() {
return nil return nil
}, },
func(in *[]NamedContext, out *map[string]api.Context, s conversion.Scope) error { func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error {
for _, curr := range *in { for _, curr := range *in {
newContext := api.NewContext() newContext := api.NewContext()
if err := s.Convert(&curr.Context, newContext, 0); err != nil { if err := s.Convert(&curr.Context, newContext, 0); err != nil {
return err return err
} }
(*out)[curr.Name] = *newContext (*out)[curr.Name] = newContext
} }
return nil return nil
}, },
func(in *map[string]api.Context, out *[]NamedContext, s conversion.Scope) error { func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error {
allKeys := make([]string, 0, len(*in)) allKeys := make([]string, 0, len(*in))
for key := range *in { for key := range *in {
allKeys = append(allKeys, key) allKeys = append(allKeys, key)
@@ -182,7 +182,7 @@ func init() {
for _, key := range allKeys { for _, key := range allKeys {
newContext := (*in)[key] newContext := (*in)[key]
oldContext := &Context{} oldContext := &Context{}
if err := s.Convert(&newContext, oldContext, 0); err != nil { if err := s.Convert(newContext, oldContext, 0); err != nil {
return err return err
} }
@@ -192,18 +192,18 @@ func init() {
return nil return nil
}, },
func(in *[]NamedExtension, out *map[string]runtime.EmbeddedObject, s conversion.Scope) error { func(in *[]NamedExtension, out *map[string]*runtime.EmbeddedObject, s conversion.Scope) error {
for _, curr := range *in { for _, curr := range *in {
newExtension := &runtime.EmbeddedObject{} newExtension := &runtime.EmbeddedObject{}
if err := s.Convert(&curr.Extension, newExtension, 0); err != nil { if err := s.Convert(&curr.Extension, newExtension, 0); err != nil {
return err return err
} }
(*out)[curr.Name] = *newExtension (*out)[curr.Name] = newExtension
} }
return nil return nil
}, },
func(in *map[string]runtime.EmbeddedObject, out *[]NamedExtension, s conversion.Scope) error { func(in *map[string]*runtime.EmbeddedObject, out *[]NamedExtension, s conversion.Scope) error {
allKeys := make([]string, 0, len(*in)) allKeys := make([]string, 0, len(*in))
for key := range *in { for key := range *in {
allKeys = append(allKeys, key) allKeys = append(allKeys, key)
@@ -213,7 +213,7 @@ func init() {
for _, key := range allKeys { for _, key := range allKeys {
newExtension := (*in)[key] newExtension := (*in)[key]
oldExtension := &runtime.RawExtension{} oldExtension := &runtime.RawExtension{}
if err := s.Convert(&newExtension, oldExtension, 0); err != nil { if err := s.Convert(newExtension, oldExtension, 0); err != nil {
return err return err
} }

View File

@@ -32,14 +32,14 @@ func createValidTestConfig() *clientcmdapi.Config {
) )
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["clean"] = clientcmdapi.Cluster{ config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: server, Server: server,
APIVersion: latest.Version, APIVersion: latest.Version,
} }
config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: token, Token: token,
} }
config.Contexts["clean"] = clientcmdapi.Context{ config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean", Cluster: "clean",
AuthInfo: "clean", AuthInfo: "clean",
} }
@@ -87,16 +87,16 @@ func TestCertificateData(t *testing.T) {
keyData := []byte("key-data") keyData := []byte("key-data")
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["clean"] = clientcmdapi.Cluster{ config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443", Server: "https://localhost:8443",
APIVersion: latest.Version, APIVersion: latest.Version,
CertificateAuthorityData: caData, CertificateAuthorityData: caData,
} }
config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificateData: certData, ClientCertificateData: certData,
ClientKeyData: keyData, ClientKeyData: keyData,
} }
config.Contexts["clean"] = clientcmdapi.Context{ config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean", Cluster: "clean",
AuthInfo: "clean", AuthInfo: "clean",
} }
@@ -120,15 +120,15 @@ func TestBasicAuthData(t *testing.T) {
password := "mypass" password := "mypass"
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["clean"] = clientcmdapi.Cluster{ config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "https://localhost:8443", Server: "https://localhost:8443",
APIVersion: latest.Version, APIVersion: latest.Version,
} }
config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Username: username, Username: username,
Password: password, Password: password,
} }
config.Contexts["clean"] = clientcmdapi.Context{ config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean", Cluster: "clean",
AuthInfo: "clean", AuthInfo: "clean",
} }

View File

@@ -23,6 +23,7 @@ import (
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
"strings"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"github.com/imdario/mergo" "github.com/imdario/mergo"
@@ -120,11 +121,6 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
if err := mergeConfigWithFile(mapConfig, file); err != nil { if err := mergeConfigWithFile(mapConfig, file); err != nil {
errlist = append(errlist, err) errlist = append(errlist, err)
} }
if rules.ResolvePaths() {
if err := ResolveLocalPaths(file, mapConfig); err != nil {
errlist = append(errlist, err)
}
}
} }
// merge all of the struct values in the reverse order so that priority is given correctly // merge all of the struct values in the reverse order so that priority is given correctly
@@ -133,9 +129,6 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
for i := len(kubeConfigFiles) - 1; i >= 0; i-- { for i := len(kubeConfigFiles) - 1; i >= 0; i-- {
file := kubeConfigFiles[i] file := kubeConfigFiles[i]
mergeConfigWithFile(nonMapConfig, file) mergeConfigWithFile(nonMapConfig, file)
if rules.ResolvePaths() {
ResolveLocalPaths(file, nonMapConfig)
}
} }
// since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and
@@ -144,6 +137,12 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) {
mergo.Merge(config, mapConfig) mergo.Merge(config, mapConfig)
mergo.Merge(config, nonMapConfig) mergo.Merge(config, nonMapConfig)
if rules.ResolvePaths() {
if err := ResolveLocalPaths(config); err != nil {
errlist = append(errlist, err)
}
}
return config, errors.NewAggregate(errlist) return config, errors.NewAggregate(errlist)
} }
@@ -213,49 +212,6 @@ func mergeConfigWithFile(startingConfig *clientcmdapi.Config, filename string) e
return nil return nil
} }
// ResolveLocalPaths resolves all relative paths in the config object with respect to the parent directory of the filename
// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
// modification of its contents.
func ResolveLocalPaths(filename string, config *clientcmdapi.Config) error {
if len(filename) == 0 {
return nil
}
configDir, err := filepath.Abs(filepath.Dir(filename))
if err != nil {
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", filename, err)
}
resolvedClusters := make(map[string]clientcmdapi.Cluster)
for key, cluster := range config.Clusters {
cluster.CertificateAuthority = resolveLocalPath(configDir, cluster.CertificateAuthority)
resolvedClusters[key] = cluster
}
config.Clusters = resolvedClusters
resolvedAuthInfos := make(map[string]clientcmdapi.AuthInfo)
for key, authInfo := range config.AuthInfos {
authInfo.ClientCertificate = resolveLocalPath(configDir, authInfo.ClientCertificate)
authInfo.ClientKey = resolveLocalPath(configDir, authInfo.ClientKey)
resolvedAuthInfos[key] = authInfo
}
config.AuthInfos = resolvedAuthInfos
return nil
}
// resolveLocalPath makes the path absolute with respect to the startingDir
func resolveLocalPath(startingDir, path string) string {
if len(path) == 0 {
return path
}
if filepath.IsAbs(path) {
return path
}
return filepath.Join(startingDir, path)
}
// LoadFromFile takes a filename and deserializes the contents into Config object // LoadFromFile takes a filename and deserializes the contents into Config object
func LoadFromFile(filename string) (*clientcmdapi.Config, error) { func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
kubeconfigBytes, err := ioutil.ReadFile(filename) kubeconfigBytes, err := ioutil.ReadFile(filename)
@@ -335,3 +291,159 @@ func Write(config clientcmdapi.Config) ([]byte, error) {
func (rules ClientConfigLoadingRules) ResolvePaths() bool { func (rules ClientConfigLoadingRules) ResolvePaths() bool {
return !rules.DoNotResolvePaths return !rules.DoNotResolvePaths
} }
// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin
// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without
// modification of its contents.
func ResolveLocalPaths(config *clientcmdapi.Config) error {
for _, cluster := range config.Clusters {
if len(cluster.LocationOfOrigin) == 0 {
continue
}
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
if err != nil {
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
}
if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
return err
}
}
for _, authInfo := range config.AuthInfos {
if len(authInfo.LocationOfOrigin) == 0 {
continue
}
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
if err != nil {
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
}
if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
return err
}
}
return nil
}
// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already
// absolute, but any existing path will be resolved relative to LocationOfOrigin
func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error {
if len(cluster.LocationOfOrigin) == 0 {
return fmt.Errorf("no location of origin for %s", cluster.Server)
}
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
if err != nil {
return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
}
if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil {
return err
}
if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil {
return err
}
return nil
}
// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already
// absolute, but any existing path will be resolved relative to LocationOfOrigin
func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error {
if len(authInfo.LocationOfOrigin) == 0 {
return fmt.Errorf("no location of origin for %v", authInfo)
}
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
if err != nil {
return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
}
if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil {
return err
}
if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil {
return err
}
return nil
}
func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error {
return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base)
}
func ResolveConfigPaths(config *clientcmdapi.Config, base string) error {
return ResolvePaths(GetConfigFileReferences(config), base)
}
func GetConfigFileReferences(config *clientcmdapi.Config) []*string {
refs := []*string{}
for _, cluster := range config.Clusters {
refs = append(refs, GetClusterFileReferences(cluster)...)
}
for _, authInfo := range config.AuthInfos {
refs = append(refs, GetAuthInfoFileReferences(authInfo)...)
}
return refs
}
func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string {
return []*string{&cluster.CertificateAuthority}
}
func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string {
return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey}
}
// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory
func ResolvePaths(refs []*string, base string) error {
for _, ref := range refs {
// Don't resolve empty paths
if len(*ref) > 0 {
// Don't resolve absolute paths
if !filepath.IsAbs(*ref) {
*ref = filepath.Join(base, *ref)
}
}
}
return nil
}
// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps.
// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error
func RelativizePathWithNoBacksteps(refs []*string, base string) error {
for _, ref := range refs {
// Don't relativize empty paths
if len(*ref) > 0 {
rel, err := MakeRelative(*ref, base)
if err != nil {
return err
}
// if we have a backstep, don't mess with the path
if strings.HasPrefix(rel, "../") {
if filepath.IsAbs(*ref) {
continue
}
return fmt.Errorf("%v requires backsteps and is not absolute", *ref)
}
*ref = rel
}
}
return nil
}
func MakeRelative(path, base string) (string, error) {
if len(path) > 0 {
rel, err := filepath.Rel(base, path)
if err != nil {
return path, err
}
return rel, nil
}
return path, nil
}

View File

@@ -34,43 +34,43 @@ import (
var ( var (
testConfigAlfa = clientcmdapi.Config{ testConfigAlfa = clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"red-user": {Token: "red-token"}}, "red-user": {Token: "red-token"}},
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"cow-cluster": {Server: "http://cow.org:8080"}}, "cow-cluster": {Server: "http://cow.org:8080"}},
Contexts: map[string]clientcmdapi.Context{ Contexts: map[string]*clientcmdapi.Context{
"federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster", Namespace: "hammer-ns"}}, "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster", Namespace: "hammer-ns"}},
} }
testConfigBravo = clientcmdapi.Config{ testConfigBravo = clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"black-user": {Token: "black-token"}}, "black-user": {Token: "black-token"}},
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"pig-cluster": {Server: "http://pig.org:8080"}}, "pig-cluster": {Server: "http://pig.org:8080"}},
Contexts: map[string]clientcmdapi.Context{ Contexts: map[string]*clientcmdapi.Context{
"queen-anne-context": {AuthInfo: "black-user", Cluster: "pig-cluster", Namespace: "saw-ns"}}, "queen-anne-context": {AuthInfo: "black-user", Cluster: "pig-cluster", Namespace: "saw-ns"}},
} }
testConfigCharlie = clientcmdapi.Config{ testConfigCharlie = clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"green-user": {Token: "green-token"}}, "green-user": {Token: "green-token"}},
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"horse-cluster": {Server: "http://horse.org:8080"}}, "horse-cluster": {Server: "http://horse.org:8080"}},
Contexts: map[string]clientcmdapi.Context{ Contexts: map[string]*clientcmdapi.Context{
"shaker-context": {AuthInfo: "green-user", Cluster: "horse-cluster", Namespace: "chisel-ns"}}, "shaker-context": {AuthInfo: "green-user", Cluster: "horse-cluster", Namespace: "chisel-ns"}},
} }
testConfigDelta = clientcmdapi.Config{ testConfigDelta = clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"blue-user": {Token: "blue-token"}}, "blue-user": {Token: "blue-token"}},
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"chicken-cluster": {Server: "http://chicken.org:8080"}}, "chicken-cluster": {Server: "http://chicken.org:8080"}},
Contexts: map[string]clientcmdapi.Context{ Contexts: map[string]*clientcmdapi.Context{
"gothic-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster", Namespace: "plane-ns"}}, "gothic-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster", Namespace: "plane-ns"}},
} }
testConfigConflictAlfa = clientcmdapi.Config{ testConfigConflictAlfa = clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"red-user": {Token: "a-different-red-token"}, "red-user": {Token: "a-different-red-token"},
"yellow-user": {Token: "yellow-token"}}, "yellow-user": {Token: "yellow-token"}},
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"cow-cluster": {Server: "http://a-different-cow.org:8080", InsecureSkipTLSVerify: true}, "cow-cluster": {Server: "http://a-different-cow.org:8080", InsecureSkipTLSVerify: true},
"donkey-cluster": {Server: "http://donkey.org:8080", InsecureSkipTLSVerify: true}}, "donkey-cluster": {Server: "http://donkey.org:8080", InsecureSkipTLSVerify: true}},
CurrentContext: "federal-context", CurrentContext: "federal-context",
@@ -176,21 +176,21 @@ func TestConflictingCurrentContext(t *testing.T) {
func TestResolveRelativePaths(t *testing.T) { func TestResolveRelativePaths(t *testing.T) {
pathResolutionConfig1 := clientcmdapi.Config{ pathResolutionConfig1 := clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"relative-user-1": {ClientCertificate: "relative/client/cert", ClientKey: "../relative/client/key"}, "relative-user-1": {ClientCertificate: "relative/client/cert", ClientKey: "../relative/client/key"},
"absolute-user-1": {ClientCertificate: "/absolute/client/cert", ClientKey: "/absolute/client/key"}, "absolute-user-1": {ClientCertificate: "/absolute/client/cert", ClientKey: "/absolute/client/key"},
}, },
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"relative-server-1": {CertificateAuthority: "../relative/ca"}, "relative-server-1": {CertificateAuthority: "../relative/ca"},
"absolute-server-1": {CertificateAuthority: "/absolute/ca"}, "absolute-server-1": {CertificateAuthority: "/absolute/ca"},
}, },
} }
pathResolutionConfig2 := clientcmdapi.Config{ pathResolutionConfig2 := clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"relative-user-2": {ClientCertificate: "relative/client/cert2", ClientKey: "../relative/client/key2"}, "relative-user-2": {ClientCertificate: "relative/client/cert2", ClientKey: "../relative/client/key2"},
"absolute-user-2": {ClientCertificate: "/absolute/client/cert2", ClientKey: "/absolute/client/key2"}, "absolute-user-2": {ClientCertificate: "/absolute/client/cert2", ClientKey: "/absolute/client/key2"},
}, },
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"relative-server-2": {CertificateAuthority: "../relative/ca2"}, "relative-server-2": {CertificateAuthority: "../relative/ca2"},
"absolute-server-2": {CertificateAuthority: "/absolute/ca2"}, "absolute-server-2": {CertificateAuthority: "/absolute/ca2"},
}, },

View File

@@ -95,15 +95,15 @@ func Validate(config clientcmdapi.Config) error {
} }
for contextName, context := range config.Contexts { for contextName, context := range config.Contexts {
validationErrors = append(validationErrors, validateContext(contextName, context, config)...) validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
} }
for authInfoName, authInfo := range config.AuthInfos { for authInfoName, authInfo := range config.AuthInfos {
validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...)
} }
for clusterName, clusterInfo := range config.Clusters { for clusterName, clusterInfo := range config.Clusters {
validationErrors = append(validationErrors, validateClusterInfo(clusterName, clusterInfo)...) validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...)
} }
return newErrConfigurationInvalid(validationErrors) return newErrConfigurationInvalid(validationErrors)
@@ -131,9 +131,9 @@ func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error {
} }
if exists { if exists {
validationErrors = append(validationErrors, validateContext(contextName, context, config)...) validationErrors = append(validationErrors, validateContext(contextName, *context, config)...)
validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, config.AuthInfos[context.AuthInfo])...) validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...)
validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, config.Clusters[context.Cluster])...) validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...)
} }
return newErrConfigurationInvalid(validationErrors) return newErrConfigurationInvalid(validationErrors)

View File

@@ -28,25 +28,25 @@ import (
func TestConfirmUsableBadInfoButOkConfig(t *testing.T) { func TestConfirmUsableBadInfoButOkConfig(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = clientcmdapi.Cluster{ config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything", Server: "anything",
CertificateAuthority: "missing", CertificateAuthority: "missing",
} }
config.AuthInfos["error"] = clientcmdapi.AuthInfo{ config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Username: "anything", Username: "anything",
Token: "here", Token: "here",
} }
config.Contexts["dirty"] = clientcmdapi.Context{ config.Contexts["dirty"] = &clientcmdapi.Context{
Cluster: "missing ca", Cluster: "missing ca",
AuthInfo: "error", AuthInfo: "error",
} }
config.Clusters["clean"] = clientcmdapi.Cluster{ config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything", Server: "anything",
} }
config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: "here", Token: "here",
} }
config.Contexts["clean"] = clientcmdapi.Context{ config.Contexts["clean"] = &clientcmdapi.Context{
Cluster: "clean", Cluster: "clean",
AuthInfo: "clean", AuthInfo: "clean",
} }
@@ -64,15 +64,15 @@ func TestConfirmUsableBadInfoButOkConfig(t *testing.T) {
} }
func TestConfirmUsableBadInfoConfig(t *testing.T) { func TestConfirmUsableBadInfoConfig(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = clientcmdapi.Cluster{ config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything", Server: "anything",
CertificateAuthority: "missing", CertificateAuthority: "missing",
} }
config.AuthInfos["error"] = clientcmdapi.AuthInfo{ config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Username: "anything", Username: "anything",
Token: "here", Token: "here",
} }
config.Contexts["first"] = clientcmdapi.Context{ config.Contexts["first"] = &clientcmdapi.Context{
Cluster: "missing ca", Cluster: "missing ca",
AuthInfo: "error", AuthInfo: "error",
} }
@@ -150,7 +150,7 @@ func TestIsConfigurationInvalid(t *testing.T) {
func TestValidateMissingReferencesConfig(t *testing.T) { func TestValidateMissingReferencesConfig(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.CurrentContext = "anything" config.CurrentContext = "anything"
config.Contexts["anything"] = clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"} config.Contexts["anything"] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"}
test := configValidationTest{ test := configValidationTest{
config: config, config: config,
expectedErrorSubstring: []string{"user \"missing\" was not found for context \"anything\"", "cluster \"missing\" was not found for context \"anything\""}, expectedErrorSubstring: []string{"user \"missing\" was not found for context \"anything\"", "cluster \"missing\" was not found for context \"anything\""},
@@ -162,7 +162,7 @@ func TestValidateMissingReferencesConfig(t *testing.T) {
func TestValidateEmptyContext(t *testing.T) { func TestValidateEmptyContext(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.CurrentContext = "anything" config.CurrentContext = "anything"
config.Contexts["anything"] = clientcmdapi.Context{} config.Contexts["anything"] = &clientcmdapi.Context{}
test := configValidationTest{ test := configValidationTest{
config: config, config: config,
expectedErrorSubstring: []string{"user was not specified for context \"anything\"", "cluster was not specified for context \"anything\""}, expectedErrorSubstring: []string{"user was not specified for context \"anything\"", "cluster was not specified for context \"anything\""},
@@ -174,7 +174,7 @@ func TestValidateEmptyContext(t *testing.T) {
func TestValidateEmptyClusterInfo(t *testing.T) { func TestValidateEmptyClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["empty"] = clientcmdapi.Cluster{} config.Clusters["empty"] = &clientcmdapi.Cluster{}
test := configValidationTest{ test := configValidationTest{
config: config, config: config,
expectedErrorSubstring: []string{"no server found for"}, expectedErrorSubstring: []string{"no server found for"},
@@ -185,7 +185,7 @@ func TestValidateEmptyClusterInfo(t *testing.T) {
} }
func TestValidateMissingCAFileClusterInfo(t *testing.T) { func TestValidateMissingCAFileClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["missing ca"] = clientcmdapi.Cluster{ config.Clusters["missing ca"] = &clientcmdapi.Cluster{
Server: "anything", Server: "anything",
CertificateAuthority: "missing", CertificateAuthority: "missing",
} }
@@ -199,7 +199,7 @@ func TestValidateMissingCAFileClusterInfo(t *testing.T) {
} }
func TestValidateCleanClusterInfo(t *testing.T) { func TestValidateCleanClusterInfo(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["clean"] = clientcmdapi.Cluster{ config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything", Server: "anything",
} }
test := configValidationTest{ test := configValidationTest{
@@ -214,7 +214,7 @@ func TestValidateCleanWithCAClusterInfo(t *testing.T) {
defer os.Remove(tempFile.Name()) defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.Clusters["clean"] = clientcmdapi.Cluster{ config.Clusters["clean"] = &clientcmdapi.Cluster{
Server: "anything", Server: "anything",
CertificateAuthority: tempFile.Name(), CertificateAuthority: tempFile.Name(),
} }
@@ -228,7 +228,7 @@ func TestValidateCleanWithCAClusterInfo(t *testing.T) {
func TestValidateEmptyAuthInfo(t *testing.T) { func TestValidateEmptyAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = clientcmdapi.AuthInfo{} config.AuthInfos["error"] = &clientcmdapi.AuthInfo{}
test := configValidationTest{ test := configValidationTest{
config: config, config: config,
} }
@@ -238,7 +238,7 @@ func TestValidateEmptyAuthInfo(t *testing.T) {
} }
func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) { func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = clientcmdapi.AuthInfo{ config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
ClientCertificate: "missing", ClientCertificate: "missing",
ClientKey: "missing", ClientKey: "missing",
} }
@@ -255,7 +255,7 @@ func TestValidateCertDataOverridesFiles(t *testing.T) {
defer os.Remove(tempFile.Name()) defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificate: tempFile.Name(), ClientCertificate: tempFile.Name(),
ClientCertificateData: []byte("certdata"), ClientCertificateData: []byte("certdata"),
ClientKey: tempFile.Name(), ClientKey: tempFile.Name(),
@@ -274,7 +274,7 @@ func TestValidateCleanCertFilesAuthInfo(t *testing.T) {
defer os.Remove(tempFile.Name()) defer os.Remove(tempFile.Name())
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
ClientCertificate: tempFile.Name(), ClientCertificate: tempFile.Name(),
ClientKey: tempFile.Name(), ClientKey: tempFile.Name(),
} }
@@ -287,7 +287,7 @@ func TestValidateCleanCertFilesAuthInfo(t *testing.T) {
} }
func TestValidateCleanTokenAuthInfo(t *testing.T) { func TestValidateCleanTokenAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{
Token: "any-value", Token: "any-value",
} }
test := configValidationTest{ test := configValidationTest{
@@ -300,7 +300,7 @@ func TestValidateCleanTokenAuthInfo(t *testing.T) {
func TestValidateMultipleMethodsAuthInfo(t *testing.T) { func TestValidateMultipleMethodsAuthInfo(t *testing.T) {
config := clientcmdapi.NewConfig() config := clientcmdapi.NewConfig()
config.AuthInfos["error"] = clientcmdapi.AuthInfo{ config.AuthInfos["error"] = &clientcmdapi.AuthInfo{
Token: "token", Token: "token",
Username: "username", Username: "username",
} }
@@ -319,7 +319,7 @@ type configValidationTest struct {
} }
func (c configValidationTest) testContext(contextName string, t *testing.T) { func (c configValidationTest) testContext(contextName string, t *testing.T) {
errs := validateContext(contextName, c.config.Contexts[contextName], *c.config) errs := validateContext(contextName, *c.config.Contexts[contextName], *c.config)
if len(c.expectedErrorSubstring) != 0 { if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 { if len(errs) == 0 {
@@ -379,7 +379,7 @@ func (c configValidationTest) testConfig(t *testing.T) {
} }
} }
func (c configValidationTest) testCluster(clusterName string, t *testing.T) { func (c configValidationTest) testCluster(clusterName string, t *testing.T) {
errs := validateClusterInfo(clusterName, c.config.Clusters[clusterName]) errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName])
if len(c.expectedErrorSubstring) != 0 { if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 { if len(errs) == 0 {
@@ -399,7 +399,7 @@ func (c configValidationTest) testCluster(clusterName string, t *testing.T) {
} }
func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) { func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) {
errs := validateAuthInfo(authInfoName, c.config.AuthInfos[authInfoName]) errs := validateAuthInfo(authInfoName, *c.config.AuthInfos[authInfoName])
if len(c.expectedErrorSubstring) != 0 { if len(c.expectedErrorSubstring) != 0 {
if len(errs) == 0 { if len(errs) == 0 {

View File

@@ -51,8 +51,8 @@ func (c *FakeEndpoints) Delete(name string) error {
} }
func (c *FakeEndpoints) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeEndpoints) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-endpoints", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-endpoints", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }
func (c *FakeEndpoints) Update(endpoints *api.Endpoints) (*api.Endpoints, error) { func (c *FakeEndpoints) Update(endpoints *api.Endpoints) (*api.Endpoints, error) {

View File

@@ -56,8 +56,8 @@ func (c *FakeEvents) Get(id string) (*api.Event, error) {
// Watch starts watching for events matching the given selectors. // Watch starts watching for events matching the given selectors.
func (c *FakeEvents) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeEvents) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-events", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-events", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }
// Search returns a list of events matching the specified object. // Search returns a list of events matching the specified object.
@@ -72,6 +72,6 @@ func (c *FakeEvents) Delete(name string) error {
} }
func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "get-field-selector"}) c.Fake.Invokes(FakeAction{Action: "get-field-selector"}, nil)
return fields.Everything() return fields.Everything()
} }

View File

@@ -56,6 +56,6 @@ func (c *FakeLimitRanges) Update(limitRange *api.LimitRange) (*api.LimitRange, e
} }
func (c *FakeLimitRanges) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeLimitRanges) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-limitRange", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-limitRange", Value: resourceVersion}, nil)
return c.Fake.Watch, nil return c.Fake.Watch, nil
} }

View File

@@ -45,8 +45,8 @@ func (c *FakeNamespaces) Delete(name string) error {
} }
func (c *FakeNamespaces) Create(namespace *api.Namespace) (*api.Namespace, error) { func (c *FakeNamespaces) Create(namespace *api.Namespace) (*api.Namespace, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "create-namespace"}) c.Fake.Invokes(FakeAction{Action: "create-namespace"}, nil)
return &api.Namespace{}, c.Fake.Err return &api.Namespace{}, c.Fake.Err()
} }
func (c *FakeNamespaces) Update(namespace *api.Namespace) (*api.Namespace, error) { func (c *FakeNamespaces) Update(namespace *api.Namespace) (*api.Namespace, error) {
@@ -55,7 +55,7 @@ func (c *FakeNamespaces) Update(namespace *api.Namespace) (*api.Namespace, error
} }
func (c *FakeNamespaces) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeNamespaces) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-namespaces", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-namespaces", Value: resourceVersion}, nil)
return c.Fake.Watch, nil return c.Fake.Watch, nil
} }

View File

@@ -60,6 +60,6 @@ func (c *FakeNodes) UpdateStatus(minion *api.Node) (*api.Node, error) {
} }
func (c *FakeNodes) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeNodes) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-nodes", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-nodes", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }

View File

@@ -59,6 +59,6 @@ func (c *FakePersistentVolumeClaims) UpdateStatus(claim *api.PersistentVolumeCla
} }
func (c *FakePersistentVolumeClaims) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakePersistentVolumeClaims) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-persistentVolumeClaims", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-persistentVolumeClaims", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }

View File

@@ -59,6 +59,6 @@ func (c *FakePersistentVolumes) UpdateStatus(pv *api.PersistentVolume) (*api.Per
} }
func (c *FakePersistentVolumes) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakePersistentVolumes) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-persistentVolumes", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-persistentVolumes", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }

View File

@@ -56,6 +56,6 @@ func (c *FakePodTemplates) Update(pod *api.PodTemplate) (*api.PodTemplate, error
} }
func (c *FakePodTemplates) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakePodTemplates) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-podTemplates", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-podTemplates", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }

View File

@@ -56,12 +56,12 @@ func (c *FakePods) Update(pod *api.Pod) (*api.Pod, error) {
} }
func (c *FakePods) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakePods) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-pods", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-pods", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }
func (c *FakePods) Bind(bind *api.Binding) error { func (c *FakePods) Bind(bind *api.Binding) error {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "bind-pod", Value: bind.Name}) c.Fake.Invokes(FakeAction{Action: "bind-pod", Value: bind.Name}, nil)
return nil return nil
} }

View File

@@ -65,6 +65,6 @@ func (c *FakeReplicationControllers) Delete(name string) error {
} }
func (c *FakeReplicationControllers) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeReplicationControllers) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: WatchControllerAction, Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: WatchControllerAction, Value: resourceVersion}, nil)
return c.Fake.Watch, nil return c.Fake.Watch, nil
} }

View File

@@ -61,6 +61,6 @@ func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (*ap
} }
func (c *FakeResourceQuotas) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeResourceQuotas) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-resourceQuota", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-resourceQuota", Value: resourceVersion}, nil)
return c.Fake.Watch, nil return c.Fake.Watch, nil
} }

View File

@@ -56,6 +56,6 @@ func (c *FakeSecrets) Delete(name string) error {
} }
func (c *FakeSecrets) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeSecrets) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-secrets", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-secrets", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }

View File

@@ -56,6 +56,6 @@ func (c *FakeServiceAccounts) Delete(name string) error {
} }
func (c *FakeServiceAccounts) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeServiceAccounts) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-serviceAccounts", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-serviceAccounts", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }

View File

@@ -56,6 +56,6 @@ func (c *FakeServices) Delete(name string) error {
} }
func (c *FakeServices) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { func (c *FakeServices) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) {
c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-services", Value: resourceVersion}) c.Fake.Invokes(FakeAction{Action: "watch-services", Value: resourceVersion}, nil)
return c.Fake.Watch, c.Fake.Err return c.Fake.Watch, c.Fake.Err()
} }

View File

@@ -17,6 +17,8 @@ limitations under the License.
package testclient package testclient
import ( import (
"sync"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/registered" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/registered"
@@ -48,10 +50,11 @@ type ReactionFunc func(FakeAction) (runtime.Object, error)
// Fake implements client.Interface. Meant to be embedded into a struct to get a default // Fake implements client.Interface. Meant to be embedded into a struct to get a default
// implementation. This makes faking out just the method you want to test easier. // implementation. This makes faking out just the method you want to test easier.
type Fake struct { type Fake struct {
Actions []FakeAction sync.RWMutex
Watch watch.Interface actions []FakeAction
Err error err error
Watch watch.Interface
// ReactFn is an optional function that will be invoked with the provided action // ReactFn is an optional function that will be invoked with the provided action
// and return a response. It can implement scenario specific behavior. The type // and return a response. It can implement scenario specific behavior. The type
// of object returned must match the expected type from the caller (even if nil). // of object returned must match the expected type from the caller (even if nil).
@@ -61,11 +64,47 @@ type Fake struct {
// Invokes records the provided FakeAction and then invokes the ReactFn (if provided). // Invokes records the provided FakeAction and then invokes the ReactFn (if provided).
// obj is expected to be of the same type a normal call would return. // obj is expected to be of the same type a normal call would return.
func (c *Fake) Invokes(action FakeAction, obj runtime.Object) (runtime.Object, error) { func (c *Fake) Invokes(action FakeAction, obj runtime.Object) (runtime.Object, error) {
c.Actions = append(c.Actions, action) c.Lock()
defer c.Unlock()
c.actions = append(c.actions, action)
if c.ReactFn != nil { if c.ReactFn != nil {
return c.ReactFn(action) return c.ReactFn(action)
} }
return obj, c.Err return obj, c.err
}
// ClearActions clears the history of actions called on the fake client
func (c *Fake) ClearActions() {
c.Lock()
c.Unlock()
c.actions = make([]FakeAction, 0)
}
// Actions returns a chronologically ordered slice fake actions called on the fake client
func (c *Fake) Actions() []FakeAction {
c.RLock()
defer c.RUnlock()
fa := make([]FakeAction, len(c.actions))
copy(fa, c.actions)
return fa
}
// SetErr sets the error to return for client calls
func (c *Fake) SetErr(err error) {
c.Lock()
defer c.Unlock()
c.err = err
}
// Err returns any a client error or nil
func (c *Fake) Err() error {
c.RLock()
c.RUnlock()
return c.err
} }
func (c *Fake) LimitRanges(namespace string) client.LimitRangeInterface { func (c *Fake) LimitRanges(namespace string) client.LimitRangeInterface {
@@ -125,13 +164,13 @@ func (c *Fake) Namespaces() client.NamespaceInterface {
} }
func (c *Fake) ServerVersion() (*version.Info, error) { func (c *Fake) ServerVersion() (*version.Info, error) {
c.Actions = append(c.Actions, FakeAction{Action: "get-version", Value: nil}) c.Invokes(FakeAction{Action: "get-version", Value: nil}, nil)
versionInfo := version.Get() versionInfo := version.Get()
return &versionInfo, nil return &versionInfo, nil
} }
func (c *Fake) ServerAPIVersions() (*api.APIVersions, error) { func (c *Fake) ServerAPIVersions() (*api.APIVersions, error) {
c.Actions = append(c.Actions, FakeAction{Action: "get-apiversions", Value: nil}) c.Invokes(FakeAction{Action: "get-apiversions", Value: nil}, nil)
return &api.APIVersions{Versions: registered.RegisteredVersions}, nil return &api.APIVersions{Versions: registered.RegisteredVersions}, nil
} }

View File

@@ -1694,10 +1694,9 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string, vpcID st
createResponse, err := s.ec2.CreateSecurityGroup(createRequest) createResponse, err := s.ec2.CreateSecurityGroup(createRequest)
if err != nil { if err != nil {
ignore := false ignore := false
switch err.(type) { switch err := err.(type) {
case awserr.Error: case awserr.Error:
awsError := err.(awserr.Error) if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries {
if awsError.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries {
glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry")
ignore = true ignore = true
} }

View File

@@ -247,7 +247,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
// - both saved and current statuses have Ready Conditions, different LastProbeTimes and different Ready Condition State - // - both saved and current statuses have Ready Conditions, different LastProbeTimes and different Ready Condition State -
// Ready Condition changed it state since we last seen it, so we update both probeTimestamp and readyTransitionTimestamp. // Ready Condition changed it state since we last seen it, so we update both probeTimestamp and readyTransitionTimestamp.
// TODO: things to consider: // TODO: things to consider:
// - if 'LastProbeTime' have gone back in time its probably and error, currently we ignore it, // - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it,
// - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check // - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check
// if that's the case, but it does not seem necessary. // if that's the case, but it does not seem necessary.
savedCondition := nc.getCondition(&savedNodeStatus.status, api.NodeReady) savedCondition := nc.getCondition(&savedNodeStatus.status, api.NodeReady)
@@ -374,18 +374,20 @@ func (nc *NodeController) monitorNodeStatus() error {
continue continue
} }
decisionTimestamp := nc.now()
if readyCondition != nil { if readyCondition != nil {
// Check eviction timeout. // Check eviction timeout against decisionTimestamp
if lastReadyCondition.Status == api.ConditionFalse && if lastReadyCondition.Status == api.ConditionFalse &&
nc.now().After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) { decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
if nc.podEvictor.AddNodeToEvict(node.Name) { if nc.podEvictor.AddNodeToEvict(node.Name) {
glog.Infof("Adding pods to evict: %v is later than %v + %v", nc.now(), nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout) glog.Infof("Adding pods to evict: %v is later than %v + %v", decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout)
} }
} }
if lastReadyCondition.Status == api.ConditionUnknown && if lastReadyCondition.Status == api.ConditionUnknown &&
nc.now().After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout-gracePeriod)) { decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout-gracePeriod)) {
if nc.podEvictor.AddNodeToEvict(node.Name) { if nc.podEvictor.AddNodeToEvict(node.Name) {
glog.Infof("Adding pods to evict2: %v is later than %v + %v", nc.now(), nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod) glog.Infof("Adding pods to evict2: %v is later than %v + %v", decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod)
} }
} }
if lastReadyCondition.Status == api.ConditionTrue { if lastReadyCondition.Status == api.ConditionTrue {

View File

@@ -342,7 +342,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
podEvictor.TryEvict(func(nodeName string) { nodeController.deletePods(nodeName) }) podEvictor.TryEvict(func(nodeName string) { nodeController.deletePods(nodeName) })
podEvicted := false podEvicted := false
for _, action := range item.fakeNodeHandler.Actions { for _, action := range item.fakeNodeHandler.Actions() {
if action.Action == "delete-pod" { if action.Action == "delete-pod" {
podEvicted = true podEvicted = true
} }

View File

@@ -93,20 +93,21 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
client := &testclient.Fake{} client := &testclient.Fake{}
controller := New(cloud, client, "test-cluster") controller := New(cloud, client, "test-cluster")
controller.init() controller.init()
cloud.Calls = nil // ignore any cloud calls made in init() cloud.Calls = nil // ignore any cloud calls made in init()
client.Actions = nil // ignore any client calls made in init() client.ClearActions() // ignore any client calls made in init()
err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{"foo", "bar"}, item.service, nil) err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{"foo", "bar"}, item.service, nil)
if !item.expectErr && err != nil { if !item.expectErr && err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} else if item.expectErr && err == nil { } else if item.expectErr && err == nil {
t.Errorf("expected error creating %v, got nil", item.service) t.Errorf("expected error creating %v, got nil", item.service)
} }
actions := client.Actions()
if !item.expectCreateAttempt { if !item.expectCreateAttempt {
if len(cloud.Calls) > 0 { if len(cloud.Calls) > 0 {
t.Errorf("unexpected cloud provider calls: %v", cloud.Calls) t.Errorf("unexpected cloud provider calls: %v", cloud.Calls)
} }
if len(client.Actions) > 0 { if len(actions) > 0 {
t.Errorf("unexpected client actions: %v", client.Actions) t.Errorf("unexpected client actions: %v", actions)
} }
} else { } else {
if len(cloud.Balancers) != 1 { if len(cloud.Balancers) != 1 {
@@ -117,13 +118,13 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
t.Errorf("created load balancer has incorrect parameters: %v", cloud.Balancers[0]) t.Errorf("created load balancer has incorrect parameters: %v", cloud.Balancers[0])
} }
actionFound := false actionFound := false
for _, action := range client.Actions { for _, action := range actions {
if action.Action == "update-service" { if action.Action == "update-service" {
actionFound = true actionFound = true
} }
} }
if !actionFound { if !actionFound {
t.Errorf("expected updated service to be sent to client, got these actions instead: %v", client.Actions) t.Errorf("expected updated service to be sent to client, got these actions instead: %v", actions)
} }
} }
} }

View File

@@ -191,9 +191,9 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
rm.queue.ShutDown() rm.queue.ShutDown()
} }
// getPodControllers returns the controller managing the given pod. // getPodController returns the controller managing the given pod.
// TODO: Surface that we are ignoring multiple controllers for a single pod. // TODO: Surface that we are ignoring multiple controllers for a single pod.
func (rm *ReplicationManager) getPodControllers(pod *api.Pod) *api.ReplicationController { func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationController {
controllers, err := rm.rcStore.GetPodControllers(pod) controllers, err := rm.rcStore.GetPodControllers(pod)
if err != nil { if err != nil {
glog.V(4).Infof("No controllers found for pod %v, replication manager will avoid syncing", pod.Name) glog.V(4).Infof("No controllers found for pod %v, replication manager will avoid syncing", pod.Name)
@@ -211,7 +211,7 @@ func (rm *ReplicationManager) getPodControllers(pod *api.Pod) *api.ReplicationCo
// When a pod is created, enqueue the controller that manages it and update it's expectations. // When a pod is created, enqueue the controller that manages it and update it's expectations.
func (rm *ReplicationManager) addPod(obj interface{}) { func (rm *ReplicationManager) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*api.Pod)
if rc := rm.getPodControllers(pod); rc != nil { if rc := rm.getPodController(pod); rc != nil {
rcKey, err := controller.KeyFunc(rc) rcKey, err := controller.KeyFunc(rc)
if err != nil { if err != nil {
glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)
@@ -232,7 +232,7 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
} }
// TODO: Write a unittest for this case // TODO: Write a unittest for this case
curPod := cur.(*api.Pod) curPod := cur.(*api.Pod)
if rc := rm.getPodControllers(curPod); rc != nil { if rc := rm.getPodController(curPod); rc != nil {
rm.enqueueController(rc) rm.enqueueController(rc)
} }
oldPod := old.(*api.Pod) oldPod := old.(*api.Pod)
@@ -240,7 +240,7 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) { if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) {
// If the old and new rc are the same, the first one that syncs // If the old and new rc are the same, the first one that syncs
// will set expectations preventing any damage from the second. // will set expectations preventing any damage from the second.
if oldRC := rm.getPodControllers(oldPod); oldRC != nil { if oldRC := rm.getPodController(oldPod); oldRC != nil {
rm.enqueueController(oldRC) rm.enqueueController(oldRC)
} }
} }
@@ -267,7 +267,7 @@ func (rm *ReplicationManager) deletePod(obj interface{}) {
return return
} }
} }
if rc := rm.getPodControllers(pod); rc != nil { if rc := rm.getPodController(pod); rc != nil {
rcKey, err := controller.KeyFunc(rc) rcKey, err := controller.KeyFunc(rc)
if err != nil { if err != nil {
glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)

View File

@@ -484,7 +484,7 @@ func TestPodControllerLookup(t *testing.T) {
for _, r := range c.inRCs { for _, r := range c.inRCs {
manager.rcStore.Add(r) manager.rcStore.Add(r)
} }
if rc := manager.getPodControllers(c.pod); rc != nil { if rc := manager.getPodController(c.pod); rc != nil {
if c.outRCName != rc.Name { if c.outRCName != rc.Name {
t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName) t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName)
} }
@@ -693,7 +693,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
numReplicas := 10 numReplicas := 10
updateReplicaCount(fakeRCClient, *rc, numReplicas) updateReplicaCount(fakeRCClient, *rc, numReplicas)
updates, gets := 0, 0 updates, gets := 0, 0
for _, a := range fakeClient.Actions { for _, a := range fakeClient.Actions() {
switch a.Action { switch a.Action {
case testclient.GetControllerAction: case testclient.GetControllerAction:
gets++ gets++

View File

@@ -187,8 +187,9 @@ func (o *PathOptions) GetExplicitFile() string {
// uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. // uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow.
// Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values // Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values
// (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, // (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference,
// that means that this code will only write into a single file. // that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any
func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) error { // modified element.
func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error {
startingConfig, err := configAccess.GetStartingConfig() startingConfig, err := configAccess.GetStartingConfig()
if err != nil { if err != nil {
return err return err
@@ -223,7 +224,14 @@ func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) erro
} }
configToWrite := getConfigFromFileOrDie(destinationFile) configToWrite := getConfigFromFileOrDie(destinationFile)
configToWrite.Clusters[key] = cluster t := *cluster
configToWrite.Clusters[key] = &t
configToWrite.Clusters[key].LocationOfOrigin = destinationFile
if relativizePaths {
if err := clientcmd.RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil {
return err
}
}
if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil {
return err return err
@@ -257,7 +265,14 @@ func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) erro
} }
configToWrite := getConfigFromFileOrDie(destinationFile) configToWrite := getConfigFromFileOrDie(destinationFile)
configToWrite.AuthInfos[key] = authInfo t := *authInfo
configToWrite.AuthInfos[key] = &t
configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile
if relativizePaths {
if err := clientcmd.RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil {
return err
}
}
if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil {
return err return err

View File

@@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"path"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
@@ -34,11 +35,11 @@ import (
func newRedFederalCowHammerConfig() clientcmdapi.Config { func newRedFederalCowHammerConfig() clientcmdapi.Config {
return clientcmdapi.Config{ return clientcmdapi.Config{
AuthInfos: map[string]clientcmdapi.AuthInfo{ AuthInfos: map[string]*clientcmdapi.AuthInfo{
"red-user": {Token: "red-token"}}, "red-user": {Token: "red-token"}},
Clusters: map[string]clientcmdapi.Cluster{ Clusters: map[string]*clientcmdapi.Cluster{
"cow-cluster": {Server: "http://cow.org:8080"}}, "cow-cluster": {Server: "http://cow.org:8080"}},
Contexts: map[string]clientcmdapi.Context{ Contexts: map[string]*clientcmdapi.Context{
"federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"}}, "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"}},
CurrentContext: "federal-context", CurrentContext: "federal-context",
} }
@@ -79,10 +80,9 @@ func TestSetCurrentContext(t *testing.T) {
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
newContextName := "the-new-context" newContextName := "the-new-context"
newContext := clientcmdapi.NewContext()
startingConfig.Contexts[newContextName] = *newContext startingConfig.Contexts[newContextName] = clientcmdapi.NewContext()
expectedConfig.Contexts[newContextName] = *newContext expectedConfig.Contexts[newContextName] = clientcmdapi.NewContext()
expectedConfig.CurrentContext = newContextName expectedConfig.CurrentContext = newContextName
@@ -108,10 +108,7 @@ func TestSetNonExistantContext(t *testing.T) {
func TestSetIntoExistingStruct(t *testing.T) { func TestSetIntoExistingStruct(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
a := expectedConfig.AuthInfos["red-user"] expectedConfig.AuthInfos["red-user"].Password = "new-path-value"
authInfo := &a
authInfo.Password = "new-path-value"
expectedConfig.AuthInfos["red-user"] = *authInfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set", "users.red-user.password", "new-path-value"}, args: []string{"set", "users.red-user.password", "new-path-value"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
@@ -123,10 +120,7 @@ func TestSetIntoExistingStruct(t *testing.T) {
func TestSetWithPathPrefixIntoExistingStruct(t *testing.T) { func TestSetWithPathPrefixIntoExistingStruct(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
cc := expectedConfig.Clusters["cow-clusters"] expectedConfig.Clusters["cow-cluster"].Server = "http://cow.org:8080/foo/baz"
cinfo := &cc
cinfo.Server = "http://cow.org:8080/foo/baz"
expectedConfig.Clusters["cow-cluster"] = *cinfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set", "clusters.cow-cluster.server", "http://cow.org:8080/foo/baz"}, args: []string{"set", "clusters.cow-cluster.server", "http://cow.org:8080/foo/baz"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
@@ -164,7 +158,7 @@ func TestUnsetStruct(t *testing.T) {
func TestUnsetField(t *testing.T) { func TestUnsetField(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.AuthInfos["red-user"] = *clientcmdapi.NewAuthInfo() expectedConfig.AuthInfos["red-user"] = clientcmdapi.NewAuthInfo()
test := configCommandTest{ test := configCommandTest{
args: []string{"unset", "users.red-user.token"}, args: []string{"unset", "users.red-user.token"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
@@ -178,7 +172,7 @@ func TestSetIntoNewStruct(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
cluster := clientcmdapi.NewCluster() cluster := clientcmdapi.NewCluster()
cluster.Server = "new-server-value" cluster.Server = "new-server-value"
expectedConfig.Clusters["big-cluster"] = *cluster expectedConfig.Clusters["big-cluster"] = cluster
test := configCommandTest{ test := configCommandTest{
args: []string{"set", "clusters.big-cluster.server", "new-server-value"}, args: []string{"set", "clusters.big-cluster.server", "new-server-value"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
@@ -192,7 +186,7 @@ func TestSetBoolean(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
cluster := clientcmdapi.NewCluster() cluster := clientcmdapi.NewCluster()
cluster.InsecureSkipTLSVerify = true cluster.InsecureSkipTLSVerify = true
expectedConfig.Clusters["big-cluster"] = *cluster expectedConfig.Clusters["big-cluster"] = cluster
test := configCommandTest{ test := configCommandTest{
args: []string{"set", "clusters.big-cluster.insecure-skip-tls-verify", "true"}, args: []string{"set", "clusters.big-cluster.insecure-skip-tls-verify", "true"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
@@ -206,7 +200,7 @@ func TestSetIntoNewConfig(t *testing.T) {
expectedConfig := *clientcmdapi.NewConfig() expectedConfig := *clientcmdapi.NewConfig()
context := clientcmdapi.NewContext() context := clientcmdapi.NewContext()
context.AuthInfo = "fake-user" context.AuthInfo = "fake-user"
expectedConfig.Contexts["new-context"] = *context expectedConfig.Contexts["new-context"] = context
test := configCommandTest{ test := configCommandTest{
args: []string{"set", "contexts.new-context.user", "fake-user"}, args: []string{"set", "contexts.new-context.user", "fake-user"},
startingConfig: *clientcmdapi.NewConfig(), startingConfig: *clientcmdapi.NewConfig(),
@@ -218,7 +212,7 @@ func TestSetIntoNewConfig(t *testing.T) {
func TestNewEmptyAuth(t *testing.T) { func TestNewEmptyAuth(t *testing.T) {
expectedConfig := *clientcmdapi.NewConfig() expectedConfig := *clientcmdapi.NewConfig()
expectedConfig.AuthInfos["the-user-name"] = *clientcmdapi.NewAuthInfo() expectedConfig.AuthInfos["the-user-name"] = clientcmdapi.NewAuthInfo()
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "the-user-name"}, args: []string{"set-credentials", "the-user-name"},
startingConfig: *clientcmdapi.NewConfig(), startingConfig: *clientcmdapi.NewConfig(),
@@ -232,7 +226,7 @@ func TestAdditionalAuth(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
authInfo := clientcmdapi.NewAuthInfo() authInfo := clientcmdapi.NewAuthInfo()
authInfo.Token = "token" authInfo.Token = "token"
expectedConfig.AuthInfos["another-user"] = *authInfo expectedConfig.AuthInfos["another-user"] = authInfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
@@ -250,7 +244,7 @@ func TestEmbedClientCert(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
authInfo := clientcmdapi.NewAuthInfo() authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = fakeData authInfo.ClientCertificateData = fakeData
expectedConfig.AuthInfos["another-user"] = *authInfo expectedConfig.AuthInfos["another-user"] = authInfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=" + fakeCertFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=" + fakeCertFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"},
@@ -269,7 +263,7 @@ func TestEmbedClientKey(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
authInfo := clientcmdapi.NewAuthInfo() authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientKeyData = fakeData authInfo.ClientKeyData = fakeData
expectedConfig.AuthInfos["another-user"] = *authInfo expectedConfig.AuthInfos["another-user"] = authInfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagKeyFile + "=" + fakeKeyFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagKeyFile + "=" + fakeKeyFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"},
@@ -293,13 +287,15 @@ func TestEmbedNoKeyOrCertDisallowed(t *testing.T) {
} }
func TestEmptyTokenAndCertAllowed(t *testing.T) { func TestEmptyTokenAndCertAllowed(t *testing.T) {
fakeCertFile, _ := ioutil.TempFile("", "cert-file")
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
authInfo := clientcmdapi.NewAuthInfo() authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificate = "cert-file" authInfo.ClientCertificate = path.Base(fakeCertFile.Name())
expectedConfig.AuthInfos["another-user"] = *authInfo expectedConfig.AuthInfos["another-user"] = authInfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert-file", "--" + clientcmd.FlagBearerToken + "="}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=" + fakeCertFile.Name(), "--" + clientcmd.FlagBearerToken + "="},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
expectedConfig: expectedConfig, expectedConfig: expectedConfig,
} }
@@ -311,10 +307,10 @@ func TestTokenAndCertAllowed(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
authInfo := clientcmdapi.NewAuthInfo() authInfo := clientcmdapi.NewAuthInfo()
authInfo.Token = "token" authInfo.Token = "token"
authInfo.ClientCertificate = "cert-file" authInfo.ClientCertificate = "/cert-file"
expectedConfig.AuthInfos["another-user"] = *authInfo expectedConfig.AuthInfos["another-user"] = authInfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert-file", "--" + clientcmd.FlagBearerToken + "=token"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=/cert-file", "--" + clientcmd.FlagBearerToken + "=token"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
expectedConfig: expectedConfig, expectedConfig: expectedConfig,
} }
@@ -343,10 +339,10 @@ func TestBasicClearsToken(t *testing.T) {
authInfoWithBasic.Password = "mypass" authInfoWithBasic.Password = "mypass"
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.AuthInfos["another-user"] = *authInfoWithToken startingConfig.AuthInfos["another-user"] = authInfoWithToken
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.AuthInfos["another-user"] = *authInfoWithBasic expectedConfig.AuthInfos["another-user"] = authInfoWithBasic
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagUsername + "=myuser", "--" + clientcmd.FlagPassword + "=mypass"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagUsername + "=myuser", "--" + clientcmd.FlagPassword + "=mypass"},
@@ -366,10 +362,10 @@ func TestTokenClearsBasic(t *testing.T) {
authInfoWithToken.Token = "token" authInfoWithToken.Token = "token"
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.AuthInfos["another-user"] = *authInfoWithBasic startingConfig.AuthInfos["another-user"] = authInfoWithBasic
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.AuthInfos["another-user"] = *authInfoWithToken expectedConfig.AuthInfos["another-user"] = authInfoWithToken
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"},
@@ -395,10 +391,10 @@ func TestTokenLeavesCert(t *testing.T) {
authInfoWithTokenAndCerts.ClientKeyData = []byte("keydata") authInfoWithTokenAndCerts.ClientKeyData = []byte("keydata")
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.AuthInfos["another-user"] = *authInfoWithCerts startingConfig.AuthInfos["another-user"] = authInfoWithCerts
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.AuthInfos["another-user"] = *authInfoWithTokenAndCerts expectedConfig.AuthInfos["another-user"] = authInfoWithTokenAndCerts
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"},
@@ -415,17 +411,17 @@ func TestCertLeavesToken(t *testing.T) {
authInfoWithTokenAndCerts := clientcmdapi.NewAuthInfo() authInfoWithTokenAndCerts := clientcmdapi.NewAuthInfo()
authInfoWithTokenAndCerts.Token = "token" authInfoWithTokenAndCerts.Token = "token"
authInfoWithTokenAndCerts.ClientCertificate = "cert" authInfoWithTokenAndCerts.ClientCertificate = "/cert"
authInfoWithTokenAndCerts.ClientKey = "key" authInfoWithTokenAndCerts.ClientKey = "/key"
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.AuthInfos["another-user"] = *authInfoWithToken startingConfig.AuthInfos["another-user"] = authInfoWithToken
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.AuthInfos["another-user"] = *authInfoWithTokenAndCerts expectedConfig.AuthInfos["another-user"] = authInfoWithTokenAndCerts
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert", "--" + clientcmd.FlagKeyFile + "=key"}, args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=/cert", "--" + clientcmd.FlagKeyFile + "=/key"},
startingConfig: startingConfig, startingConfig: startingConfig,
expectedConfig: expectedConfig, expectedConfig: expectedConfig,
} }
@@ -434,20 +430,22 @@ func TestCertLeavesToken(t *testing.T) {
} }
func TestCAClearsInsecure(t *testing.T) { func TestCAClearsInsecure(t *testing.T) {
fakeCAFile, _ := ioutil.TempFile("", "ca-file")
clusterInfoWithInsecure := clientcmdapi.NewCluster() clusterInfoWithInsecure := clientcmdapi.NewCluster()
clusterInfoWithInsecure.InsecureSkipTLSVerify = true clusterInfoWithInsecure.InsecureSkipTLSVerify = true
clusterInfoWithCA := clientcmdapi.NewCluster() clusterInfoWithCA := clientcmdapi.NewCluster()
clusterInfoWithCA.CertificateAuthority = "cafile" clusterInfoWithCA.CertificateAuthority = path.Base(fakeCAFile.Name())
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.Clusters["another-cluster"] = *clusterInfoWithInsecure startingConfig.Clusters["another-cluster"] = clusterInfoWithInsecure
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.Clusters["another-cluster"] = *clusterInfoWithCA expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA
test := configCommandTest{ test := configCommandTest{
args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=cafile"}, args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=" + fakeCAFile.Name()},
startingConfig: startingConfig, startingConfig: startingConfig,
expectedConfig: expectedConfig, expectedConfig: expectedConfig,
} }
@@ -460,16 +458,16 @@ func TestCAClearsCAData(t *testing.T) {
clusterInfoWithCAData.CertificateAuthorityData = []byte("cadata") clusterInfoWithCAData.CertificateAuthorityData = []byte("cadata")
clusterInfoWithCA := clientcmdapi.NewCluster() clusterInfoWithCA := clientcmdapi.NewCluster()
clusterInfoWithCA.CertificateAuthority = "cafile" clusterInfoWithCA.CertificateAuthority = "/cafile"
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.Clusters["another-cluster"] = *clusterInfoWithCAData startingConfig.Clusters["another-cluster"] = clusterInfoWithCAData
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.Clusters["another-cluster"] = *clusterInfoWithCA expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA
test := configCommandTest{ test := configCommandTest{
args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=cafile", "--" + clientcmd.FlagInsecure + "=false"}, args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=/cafile", "--" + clientcmd.FlagInsecure + "=false"},
startingConfig: startingConfig, startingConfig: startingConfig,
expectedConfig: expectedConfig, expectedConfig: expectedConfig,
} }
@@ -486,10 +484,10 @@ func TestInsecureClearsCA(t *testing.T) {
clusterInfoWithCA.CertificateAuthorityData = []byte("cadata") clusterInfoWithCA.CertificateAuthorityData = []byte("cadata")
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.Clusters["another-cluster"] = *clusterInfoWithCA startingConfig.Clusters["another-cluster"] = clusterInfoWithCA
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.Clusters["another-cluster"] = *clusterInfoWithInsecure expectedConfig.Clusters["another-cluster"] = clusterInfoWithInsecure
test := configCommandTest{ test := configCommandTest{
args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagInsecure + "=true"}, args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagInsecure + "=true"},
@@ -513,10 +511,10 @@ func TestCADataClearsCA(t *testing.T) {
clusterInfoWithCA.CertificateAuthority = "cafile" clusterInfoWithCA.CertificateAuthority = "cafile"
startingConfig := newRedFederalCowHammerConfig() startingConfig := newRedFederalCowHammerConfig()
startingConfig.Clusters["another-cluster"] = *clusterInfoWithCA startingConfig.Clusters["another-cluster"] = clusterInfoWithCA
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
expectedConfig.Clusters["another-cluster"] = *clusterInfoWithCAData expectedConfig.Clusters["another-cluster"] = clusterInfoWithCAData
test := configCommandTest{ test := configCommandTest{
args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=" + fakeCAFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=" + fakeCAFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"},
@@ -553,10 +551,10 @@ func TestCAAndInsecureDisallowed(t *testing.T) {
func TestMergeExistingAuth(t *testing.T) { func TestMergeExistingAuth(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
authInfo := expectedConfig.AuthInfos["red-user"] authInfo := expectedConfig.AuthInfos["red-user"]
authInfo.ClientKey = "key" authInfo.ClientKey = "/key"
expectedConfig.AuthInfos["red-user"] = authInfo expectedConfig.AuthInfos["red-user"] = authInfo
test := configCommandTest{ test := configCommandTest{
args: []string{"set-credentials", "red-user", "--" + clientcmd.FlagKeyFile + "=key"}, args: []string{"set-credentials", "red-user", "--" + clientcmd.FlagKeyFile + "=/key"},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
expectedConfig: expectedConfig, expectedConfig: expectedConfig,
} }
@@ -566,7 +564,7 @@ func TestMergeExistingAuth(t *testing.T) {
func TestNewEmptyCluster(t *testing.T) { func TestNewEmptyCluster(t *testing.T) {
expectedConfig := *clientcmdapi.NewConfig() expectedConfig := *clientcmdapi.NewConfig()
expectedConfig.Clusters["new-cluster"] = *clientcmdapi.NewCluster() expectedConfig.Clusters["new-cluster"] = clientcmdapi.NewCluster()
test := configCommandTest{ test := configCommandTest{
args: []string{"set-cluster", "new-cluster"}, args: []string{"set-cluster", "new-cluster"},
startingConfig: *clientcmdapi.NewConfig(), startingConfig: *clientcmdapi.NewConfig(),
@@ -578,14 +576,14 @@ func TestNewEmptyCluster(t *testing.T) {
func TestAdditionalCluster(t *testing.T) { func TestAdditionalCluster(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
cluster := *clientcmdapi.NewCluster() cluster := clientcmdapi.NewCluster()
cluster.APIVersion = testapi.Version() cluster.APIVersion = testapi.Version()
cluster.CertificateAuthority = "ca-location" cluster.CertificateAuthority = "/ca-location"
cluster.InsecureSkipTLSVerify = false cluster.InsecureSkipTLSVerify = false
cluster.Server = "serverlocation" cluster.Server = "serverlocation"
expectedConfig.Clusters["different-cluster"] = cluster expectedConfig.Clusters["different-cluster"] = cluster
test := configCommandTest{ test := configCommandTest{
args: []string{"set-cluster", "different-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation", "--" + clientcmd.FlagInsecure + "=false", "--" + clientcmd.FlagCAFile + "=ca-location", "--" + clientcmd.FlagAPIVersion + "=" + testapi.Version()}, args: []string{"set-cluster", "different-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation", "--" + clientcmd.FlagInsecure + "=false", "--" + clientcmd.FlagCAFile + "=/ca-location", "--" + clientcmd.FlagAPIVersion + "=" + testapi.Version()},
startingConfig: newRedFederalCowHammerConfig(), startingConfig: newRedFederalCowHammerConfig(),
expectedConfig: expectedConfig, expectedConfig: expectedConfig,
} }
@@ -595,7 +593,7 @@ func TestAdditionalCluster(t *testing.T) {
func TestOverwriteExistingCluster(t *testing.T) { func TestOverwriteExistingCluster(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
cluster := *clientcmdapi.NewCluster() cluster := clientcmdapi.NewCluster()
cluster.Server = "serverlocation" cluster.Server = "serverlocation"
expectedConfig.Clusters["cow-cluster"] = cluster expectedConfig.Clusters["cow-cluster"] = cluster
@@ -610,7 +608,7 @@ func TestOverwriteExistingCluster(t *testing.T) {
func TestNewEmptyContext(t *testing.T) { func TestNewEmptyContext(t *testing.T) {
expectedConfig := *clientcmdapi.NewConfig() expectedConfig := *clientcmdapi.NewConfig()
expectedConfig.Contexts["new-context"] = *clientcmdapi.NewContext() expectedConfig.Contexts["new-context"] = clientcmdapi.NewContext()
test := configCommandTest{ test := configCommandTest{
args: []string{"set-context", "new-context"}, args: []string{"set-context", "new-context"},
startingConfig: *clientcmdapi.NewConfig(), startingConfig: *clientcmdapi.NewConfig(),
@@ -622,7 +620,7 @@ func TestNewEmptyContext(t *testing.T) {
func TestAdditionalContext(t *testing.T) { func TestAdditionalContext(t *testing.T) {
expectedConfig := newRedFederalCowHammerConfig() expectedConfig := newRedFederalCowHammerConfig()
context := *clientcmdapi.NewContext() context := clientcmdapi.NewContext()
context.Cluster = "some-cluster" context.Cluster = "some-cluster"
context.AuthInfo = "some-user" context.AuthInfo = "some-user"
context.Namespace = "different-namespace" context.Namespace = "different-namespace"
@@ -683,10 +681,13 @@ func TestToBool(t *testing.T) {
} }
func testConfigCommand(args []string, startingConfig clientcmdapi.Config) (string, clientcmdapi.Config) { func testConfigCommand(args []string, startingConfig clientcmdapi.Config, t *testing.T) (string, clientcmdapi.Config) {
fakeKubeFile, _ := ioutil.TempFile("", "") fakeKubeFile, _ := ioutil.TempFile("", "")
defer os.Remove(fakeKubeFile.Name()) defer os.Remove(fakeKubeFile.Name())
clientcmd.WriteToFile(startingConfig, fakeKubeFile.Name()) err := clientcmd.WriteToFile(startingConfig, fakeKubeFile.Name())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
argsToUse := make([]string, 0, 2+len(args)) argsToUse := make([]string, 0, 2+len(args))
argsToUse = append(argsToUse, "--kubeconfig="+fakeKubeFile.Name()) argsToUse = append(argsToUse, "--kubeconfig="+fakeKubeFile.Name())
@@ -712,7 +713,7 @@ type configCommandTest struct {
} }
func (test configCommandTest) run(t *testing.T) string { func (test configCommandTest) run(t *testing.T) string {
out, actualConfig := testConfigCommand(test.args, test.startingConfig) out, actualConfig := testConfigCommand(test.args, test.startingConfig, t)
testSetNilMapsToEmpties(reflect.ValueOf(&test.expectedConfig)) testSetNilMapsToEmpties(reflect.ValueOf(&test.expectedConfig))
testSetNilMapsToEmpties(reflect.ValueOf(&actualConfig)) testSetNilMapsToEmpties(reflect.ValueOf(&actualConfig))
@@ -755,20 +756,7 @@ func testSetNilMapsToEmpties(curr reflect.Value) {
case reflect.Map: case reflect.Map:
for _, mapKey := range actualCurrValue.MapKeys() { for _, mapKey := range actualCurrValue.MapKeys() {
currMapValue := actualCurrValue.MapIndex(mapKey) currMapValue := actualCurrValue.MapIndex(mapKey)
testSetNilMapsToEmpties(currMapValue)
// our maps do not hold pointers to structs, they hold the structs themselves. This means that MapIndex returns the struct itself
// That in turn means that they have kinds of type.Struct, which is not a settable type. Because of this, we need to make new struct of that type
// copy all the data from the old value into the new value, then take the .addr of the new value to modify it in the next recursion.
// clear as mud
modifiableMapValue := reflect.New(currMapValue.Type()).Elem()
modifiableMapValue.Set(currMapValue)
if modifiableMapValue.Kind() == reflect.Struct {
modifiableMapValue = modifiableMapValue.Addr()
}
testSetNilMapsToEmpties(modifiableMapValue)
actualCurrValue.SetMapIndex(mapKey, reflect.Indirect(modifiableMapValue))
} }
case reflect.Struct: case reflect.Struct:

View File

@@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"path/filepath"
"strings" "strings"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -108,10 +109,14 @@ func (o createAuthInfoOptions) run() error {
return err return err
} }
authInfo := o.modifyAuthInfo(config.AuthInfos[o.name]) startingStanza, exists := config.AuthInfos[o.name]
config.AuthInfos[o.name] = authInfo if !exists {
startingStanza = clientcmdapi.NewAuthInfo()
}
authInfo := o.modifyAuthInfo(*startingStanza)
config.AuthInfos[o.name] = &authInfo
if err := ModifyConfig(o.configAccess, *config); err != nil { if err := ModifyConfig(o.configAccess, *config, true); err != nil {
return err return err
} }
@@ -130,6 +135,7 @@ func (o *createAuthInfoOptions) modifyAuthInfo(existingAuthInfo clientcmdapi.Aut
modifiedAuthInfo.ClientCertificateData, _ = ioutil.ReadFile(certPath) modifiedAuthInfo.ClientCertificateData, _ = ioutil.ReadFile(certPath)
modifiedAuthInfo.ClientCertificate = "" modifiedAuthInfo.ClientCertificate = ""
} else { } else {
certPath, _ = filepath.Abs(certPath)
modifiedAuthInfo.ClientCertificate = certPath modifiedAuthInfo.ClientCertificate = certPath
if len(modifiedAuthInfo.ClientCertificate) > 0 { if len(modifiedAuthInfo.ClientCertificate) > 0 {
modifiedAuthInfo.ClientCertificateData = nil modifiedAuthInfo.ClientCertificateData = nil
@@ -142,6 +148,7 @@ func (o *createAuthInfoOptions) modifyAuthInfo(existingAuthInfo clientcmdapi.Aut
modifiedAuthInfo.ClientKeyData, _ = ioutil.ReadFile(keyPath) modifiedAuthInfo.ClientKeyData, _ = ioutil.ReadFile(keyPath)
modifiedAuthInfo.ClientKey = "" modifiedAuthInfo.ClientKey = ""
} else { } else {
keyPath, _ = filepath.Abs(keyPath)
modifiedAuthInfo.ClientKey = keyPath modifiedAuthInfo.ClientKey = keyPath
if len(modifiedAuthInfo.ClientKey) > 0 { if len(modifiedAuthInfo.ClientKey) > 0 {
modifiedAuthInfo.ClientKeyData = nil modifiedAuthInfo.ClientKeyData = nil

View File

@@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"path/filepath"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@@ -94,10 +95,14 @@ func (o createClusterOptions) run() error {
return err return err
} }
cluster := o.modifyCluster(config.Clusters[o.name]) startingStanza, exists := config.Clusters[o.name]
config.Clusters[o.name] = cluster if !exists {
startingStanza = clientcmdapi.NewCluster()
}
cluster := o.modifyCluster(*startingStanza)
config.Clusters[o.name] = &cluster
if err := ModifyConfig(o.configAccess, *config); err != nil { if err := ModifyConfig(o.configAccess, *config, true); err != nil {
return err return err
} }
@@ -129,6 +134,7 @@ func (o *createClusterOptions) modifyCluster(existingCluster clientcmdapi.Cluste
modifiedCluster.InsecureSkipTLSVerify = false modifiedCluster.InsecureSkipTLSVerify = false
modifiedCluster.CertificateAuthority = "" modifiedCluster.CertificateAuthority = ""
} else { } else {
caPath, _ = filepath.Abs(caPath)
modifiedCluster.CertificateAuthority = caPath modifiedCluster.CertificateAuthority = caPath
// Specifying a certificate authority file clears certificate authority data and insecure mode // Specifying a certificate authority file clears certificate authority data and insecure mode
if caPath != "" { if caPath != "" {

View File

@@ -81,10 +81,14 @@ func (o createContextOptions) run() error {
return err return err
} }
context := o.modifyContext(config.Contexts[o.name]) startingStanza, exists := config.Contexts[o.name]
config.Contexts[o.name] = context if !exists {
startingStanza = clientcmdapi.NewContext()
}
context := o.modifyContext(*startingStanza)
config.Contexts[o.name] = &context
if err := ModifyConfig(o.configAccess, *config); err != nil { if err := ModifyConfig(o.configAccess, *config, true); err != nil {
return err return err
} }

View File

@@ -50,7 +50,7 @@ func newNavigationSteps(path string) (*navigationSteps, error) {
// store them as a single step. In order to do that, we need to determine what set of tokens is a legal step AFTER the name of the map key // store them as a single step. In order to do that, we need to determine what set of tokens is a legal step AFTER the name of the map key
// This set of reflective code pulls the type of the map values, uses that type to look up the set of legal tags. Those legal tags are used to // This set of reflective code pulls the type of the map values, uses that type to look up the set of legal tags. Those legal tags are used to
// walk the list of remaining parts until we find a match to a legal tag or the end of the string. That name is used to burn all the used parts. // walk the list of remaining parts until we find a match to a legal tag or the end of the string. That name is used to burn all the used parts.
mapValueType := currType.Elem() mapValueType := currType.Elem().Elem()
mapValueOptions, err := getPotentialTypeValues(mapValueType) mapValueOptions, err := getPotentialTypeValues(mapValueType)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -120,6 +120,10 @@ func findNameStep(parts []string, typeOptions util.StringSet) string {
// getPotentialTypeValues takes a type and looks up the tags used to represent its fields when serialized. // getPotentialTypeValues takes a type and looks up the tags used to represent its fields when serialized.
func getPotentialTypeValues(typeValue reflect.Type) (map[string]reflect.Type, error) { func getPotentialTypeValues(typeValue reflect.Type) (map[string]reflect.Type, error) {
if typeValue.Kind() == reflect.Ptr {
typeValue = typeValue.Elem()
}
if typeValue.Kind() != reflect.Struct { if typeValue.Kind() != reflect.Struct {
return nil, fmt.Errorf("%v is not of type struct", typeValue) return nil, fmt.Errorf("%v is not of type struct", typeValue)
} }

View File

@@ -36,7 +36,7 @@ func TestParseWithDots(t *testing.T) {
path: "clusters.my.dot.delimited.name.server", path: "clusters.my.dot.delimited.name.server",
expectedNavigationSteps: navigationSteps{ expectedNavigationSteps: navigationSteps{
steps: []navigationStep{ steps: []navigationStep{
{"clusters", reflect.TypeOf(make(map[string]clientcmdapi.Cluster))}, {"clusters", reflect.TypeOf(make(map[string]*clientcmdapi.Cluster))},
{"my.dot.delimited.name", reflect.TypeOf(clientcmdapi.Cluster{})}, {"my.dot.delimited.name", reflect.TypeOf(clientcmdapi.Cluster{})},
{"server", reflect.TypeOf("")}, {"server", reflect.TypeOf("")},
}, },
@@ -51,7 +51,7 @@ func TestParseWithDotsEndingWithName(t *testing.T) {
path: "contexts.10.12.12.12", path: "contexts.10.12.12.12",
expectedNavigationSteps: navigationSteps{ expectedNavigationSteps: navigationSteps{
steps: []navigationStep{ steps: []navigationStep{
{"contexts", reflect.TypeOf(make(map[string]clientcmdapi.Context))}, {"contexts", reflect.TypeOf(make(map[string]*clientcmdapi.Context))},
{"10.12.12.12", reflect.TypeOf(clientcmdapi.Context{})}, {"10.12.12.12", reflect.TypeOf(clientcmdapi.Context{})},
}, },
}, },
@@ -91,5 +91,6 @@ func (test stepParserTest) run(t *testing.T) {
if !reflect.DeepEqual(test.expectedNavigationSteps, *actualSteps) { if !reflect.DeepEqual(test.expectedNavigationSteps, *actualSteps) {
t.Errorf("diff: %v", util.ObjectDiff(test.expectedNavigationSteps, *actualSteps)) t.Errorf("diff: %v", util.ObjectDiff(test.expectedNavigationSteps, *actualSteps))
t.Errorf("expected: %#v\n actual: %#v", test.expectedNavigationSteps, *actualSteps)
} }
} }

View File

@@ -82,7 +82,7 @@ func (o setOptions) run() error {
return err return err
} }
if err := ModifyConfig(o.configAccess, *config); err != nil { if err := ModifyConfig(o.configAccess, *config, false); err != nil {
return err return err
} }
@@ -139,26 +139,15 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri
needToSetNewMapValue := currMapValue.Kind() == reflect.Invalid needToSetNewMapValue := currMapValue.Kind() == reflect.Invalid
if needToSetNewMapValue { if needToSetNewMapValue {
currMapValue = reflect.New(mapValueType).Elem() currMapValue = reflect.New(mapValueType.Elem()).Elem().Addr()
actualCurrValue.SetMapIndex(mapKey, currMapValue) actualCurrValue.SetMapIndex(mapKey, currMapValue)
} }
// our maps do not hold pointers to structs, they hold the structs themselves. This means that MapIndex returns the struct itself err := modifyConfig(currMapValue, steps, propertyValue, unset)
// That in turn means that they have kinds of type.Struct, which is not a settable type. Because of this, we need to make new struct of that type
// copy all the data from the old value into the new value, then take the .addr of the new value to modify it in the next recursion.
// clear as mud
modifiableMapValue := reflect.New(currMapValue.Type()).Elem()
modifiableMapValue.Set(currMapValue)
if modifiableMapValue.Kind() == reflect.Struct {
modifiableMapValue = modifiableMapValue.Addr()
}
err := modifyConfig(modifiableMapValue, steps, propertyValue, unset)
if err != nil { if err != nil {
return err return err
} }
actualCurrValue.SetMapIndex(mapKey, reflect.Indirect(modifiableMapValue))
return nil return nil
case reflect.String: case reflect.String:
@@ -213,5 +202,6 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri
} }
return fmt.Errorf("Unrecognized type: %v", actualCurrValue) panic(fmt.Errorf("Unrecognized type: %v", actualCurrValue))
return nil
} }

View File

@@ -75,7 +75,7 @@ func (o unsetOptions) run() error {
return err return err
} }
if err := ModifyConfig(o.configAccess, *config); err != nil { if err := ModifyConfig(o.configAccess, *config, false); err != nil {
return err return err
} }

View File

@@ -66,7 +66,7 @@ func (o useContextOptions) run() error {
config.CurrentContext = o.contextName config.CurrentContext = o.contextName
if err := ModifyConfig(o.configAccess, *config); err != nil { if err := ModifyConfig(o.configAccess, *config, true); err != nil {
return err return err
} }

View File

@@ -27,7 +27,10 @@ import (
) )
const ( const (
stop_long = `Gracefully shut down a resource by name or filename. stop_long = `Deprecated: Gracefully shut down a resource by name or filename.
stop command is deprecated, all its functionalities are covered by delete command.
See 'kubectl delete --help' for more details.
Attempts to shut down and delete a resource that supports graceful termination. Attempts to shut down and delete a resource that supports graceful termination.
If the resource is scalable it will be scaled to 0 before deletion.` If the resource is scalable it will be scaled to 0 before deletion.`
@@ -50,7 +53,7 @@ func NewCmdStop(f *cmdutil.Factory, out io.Writer) *cobra.Command {
}{} }{}
cmd := &cobra.Command{ cmd := &cobra.Command{
Use: "stop (-f FILENAME | RESOURCE (NAME | -l label | --all))", Use: "stop (-f FILENAME | RESOURCE (NAME | -l label | --all))",
Short: "Gracefully shut down a resource by name or filename.", Short: "Deprecated: Gracefully shut down a resource by name or filename.",
Long: stop_long, Long: stop_long,
Example: stop_example, Example: stop_example,
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {

View File

@@ -414,8 +414,7 @@ func TestTemplateStrings(t *testing.T) {
"true", "true",
}, },
} }
// The point of this test is to verify that the below template works. If you change this // The point of this test is to verify that the below template works.
// template, you need to update hack/e2e-suite/update.sh.
tmpl := `{{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "foo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}` tmpl := `{{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "foo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`
p, err := NewTemplatePrinter([]byte(tmpl)) p, err := NewTemplatePrinter([]byte(tmpl))
if err != nil { if err != nil {

View File

@@ -74,19 +74,19 @@ func (c *fakeRc) Get(name string) (*api.ReplicationController, error) {
if len(c.responses) == 0 { if len(c.responses) == 0 {
return nil, fmt.Errorf("Unexpected Action: %s", action) return nil, fmt.Errorf("Unexpected Action: %s", action)
} }
c.Fake.Actions = append(c.Fake.Actions, action) c.Fake.Invokes(action, nil)
result := c.responses[0] result := c.responses[0]
c.responses = c.responses[1:] c.responses = c.responses[1:]
return result.controller, result.err return result.controller, result.err
} }
func (c *fakeRc) Create(controller *api.ReplicationController) (*api.ReplicationController, error) { func (c *fakeRc) Create(controller *api.ReplicationController) (*api.ReplicationController, error) {
c.Fake.Actions = append(c.Fake.Actions, testclient.FakeAction{Action: "create-controller", Value: controller.ObjectMeta.Name}) c.Fake.Invokes(testclient.FakeAction{Action: "create-controller", Value: controller.ObjectMeta.Name}, nil)
return controller, nil return controller, nil
} }
func (c *fakeRc) Update(controller *api.ReplicationController) (*api.ReplicationController, error) { func (c *fakeRc) Update(controller *api.ReplicationController) (*api.ReplicationController, error) {
c.Fake.Actions = append(c.Fake.Actions, testclient.FakeAction{Action: "update-controller", Value: controller.ObjectMeta.Name}) c.Fake.Invokes(testclient.FakeAction{Action: "update-controller", Value: controller.ObjectMeta.Name}, nil)
return controller, nil return controller, nil
} }

View File

@@ -73,14 +73,15 @@ func TestReplicationControllerScale(t *testing.T) {
name := "foo" name := "foo"
scaler.Scale("default", name, count, &preconditions, nil, nil) scaler.Scale("default", name, count, &preconditions, nil, nil)
if len(fake.Actions) != 2 { actions := fake.Actions()
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions) if len(actions) != 2 {
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
} }
if fake.Actions[0].Action != "get-replicationController" || fake.Actions[0].Value != name { if actions[0].Action != "get-replicationController" || actions[0].Value != name {
t.Errorf("unexpected action: %v, expected get-replicationController %s", fake.Actions[0], name) t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
} }
if fake.Actions[1].Action != "update-replicationController" || fake.Actions[1].Value.(*api.ReplicationController).Spec.Replicas != int(count) { if actions[1].Action != "update-replicationController" || actions[1].Value.(*api.ReplicationController).Spec.Replicas != int(count) {
t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", fake.Actions[1], count) t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", actions[1], count)
} }
} }
@@ -96,11 +97,12 @@ func TestReplicationControllerScaleFailsPreconditions(t *testing.T) {
name := "foo" name := "foo"
scaler.Scale("default", name, count, &preconditions, nil, nil) scaler.Scale("default", name, count, &preconditions, nil, nil)
if len(fake.Actions) != 1 { actions := fake.Actions()
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions) if len(actions) != 1 {
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions)
} }
if fake.Actions[0].Action != "get-replicationController" || fake.Actions[0].Value != name { if actions[0].Action != "get-replicationController" || actions[0].Value != name {
t.Errorf("unexpected action: %v, expected get-replicationController %s", fake.Actions[0], name) t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name)
} }
} }

View File

@@ -47,12 +47,13 @@ func TestReplicationControllerStop(t *testing.T) {
if s != expected { if s != expected {
t.Errorf("expected %s, got %s", expected, s) t.Errorf("expected %s, got %s", expected, s)
} }
if len(fake.Actions) != 7 { actions := fake.Actions()
if len(actions) != 7 {
t.Errorf("unexpected actions: %v, expected 6 actions (get, list, get, update, get, get, delete)", fake.Actions) t.Errorf("unexpected actions: %v, expected 6 actions (get, list, get, update, get, get, delete)", fake.Actions)
} }
for i, action := range []string{"get", "list", "get", "update", "get", "get", "delete"} { for i, action := range []string{"get", "list", "get", "update", "get", "get", "delete"} {
if fake.Actions[i].Action != action+"-replicationController" { if actions[i].Action != action+"-replicationController" {
t.Errorf("unexpected action: %+v, expected %s-replicationController", fake.Actions[i], action) t.Errorf("unexpected action: %+v, expected %s-replicationController", actions[i], action)
} }
} }
} }
@@ -159,10 +160,11 @@ func TestSimpleStop(t *testing.T) {
t.Errorf("unexpected return: %s (%s)", s, test.test) t.Errorf("unexpected return: %s (%s)", s, test.test)
} }
} }
if len(test.actions) != len(fake.Actions) { actions := fake.Actions()
if len(test.actions) != len(actions) {
t.Errorf("unexpected actions: %v; expected %v (%s)", fake.Actions, test.actions, test.test) t.Errorf("unexpected actions: %v; expected %v (%s)", fake.Actions, test.actions, test.test)
} }
for i, action := range fake.Actions { for i, action := range actions {
testAction := test.actions[i] testAction := test.actions[i]
if action.Action != testAction { if action.Action != testAction {
t.Errorf("unexpected action: %v; expected %v (%s)", action, testAction, test.test) t.Errorf("unexpected action: %v; expected %v (%s)", action, testAction, test.test)

View File

@@ -32,15 +32,18 @@ import (
) )
type sourceURL struct { type sourceURL struct {
url string url string
nodeName string header http.Header
updates chan<- interface{} nodeName string
data []byte updates chan<- interface{}
data []byte
failureLogs int
} }
func NewSourceURL(url, nodeName string, period time.Duration, updates chan<- interface{}) { func NewSourceURL(url string, header http.Header, nodeName string, period time.Duration, updates chan<- interface{}) {
config := &sourceURL{ config := &sourceURL{
url: url, url: url,
header: header,
nodeName: nodeName, nodeName: nodeName,
updates: updates, updates: updates,
data: nil, data: nil,
@@ -51,7 +54,19 @@ func NewSourceURL(url, nodeName string, period time.Duration, updates chan<- int
func (s *sourceURL) run() { func (s *sourceURL) run() {
if err := s.extractFromURL(); err != nil { if err := s.extractFromURL(); err != nil {
glog.Errorf("Failed to read URL: %v", err) // Don't log this multiple times per minute. The first few entries should be
// enough to get the point across.
if s.failureLogs < 3 {
glog.Warningf("Failed to read pods from URL: %v", err)
} else if s.failureLogs == 3 {
glog.Warningf("Failed to read pods from URL. Won't log this message anymore: %v", err)
}
s.failureLogs++
} else {
if s.failureLogs > 0 {
glog.Info("Successfully read pods from URL.")
s.failureLogs = 0
}
} }
} }
@@ -60,7 +75,13 @@ func (s *sourceURL) applyDefaults(pod *api.Pod) error {
} }
func (s *sourceURL) extractFromURL() error { func (s *sourceURL) extractFromURL() error {
resp, err := http.Get(s.url) req, err := http.NewRequest("GET", s.url, nil)
if err != nil {
return err
}
req.Header = s.header
client := &http.Client{}
resp, err := client.Do(req)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -18,6 +18,7 @@ package config
import ( import (
"encoding/json" "encoding/json"
"net/http"
"net/http/httptest" "net/http/httptest"
"testing" "testing"
"time" "time"
@@ -33,7 +34,7 @@ import (
func TestURLErrorNotExistNoUpdate(t *testing.T) { func TestURLErrorNotExistNoUpdate(t *testing.T) {
ch := make(chan interface{}) ch := make(chan interface{})
NewSourceURL("http://localhost:49575/_not_found_", "localhost", time.Millisecond, ch) NewSourceURL("http://localhost:49575/_not_found_", http.Header{}, "localhost", time.Millisecond, ch)
select { select {
case got := <-ch: case got := <-ch:
t.Errorf("Expected no update, Got %#v", got) t.Errorf("Expected no update, Got %#v", got)
@@ -43,7 +44,7 @@ func TestURLErrorNotExistNoUpdate(t *testing.T) {
func TestExtractFromHttpBadness(t *testing.T) { func TestExtractFromHttpBadness(t *testing.T) {
ch := make(chan interface{}, 1) ch := make(chan interface{}, 1)
c := sourceURL{"http://localhost:49575/_not_found_", "other", ch, nil} c := sourceURL{"http://localhost:49575/_not_found_", http.Header{}, "other", ch, nil, 0}
if err := c.extractFromURL(); err == nil { if err := c.extractFromURL(); err == nil {
t.Errorf("Expected error") t.Errorf("Expected error")
} }
@@ -112,7 +113,7 @@ func TestExtractInvalidPods(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
ch := make(chan interface{}, 1) ch := make(chan interface{}, 1)
c := sourceURL{testServer.URL, "localhost", ch, nil} c := sourceURL{testServer.URL, http.Header{}, "localhost", ch, nil, 0}
if err := c.extractFromURL(); err == nil { if err := c.extractFromURL(); err == nil {
t.Errorf("%s: Expected error", testCase.desc) t.Errorf("%s: Expected error", testCase.desc)
} }
@@ -259,7 +260,7 @@ func TestExtractPodsFromHTTP(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
ch := make(chan interface{}, 1) ch := make(chan interface{}, 1)
c := sourceURL{testServer.URL, hostname, ch, nil} c := sourceURL{testServer.URL, http.Header{}, hostname, ch, nil, 0}
if err := c.extractFromURL(); err != nil { if err := c.extractFromURL(); err != nil {
t.Errorf("%s: Unexpected error: %v", testCase.desc, err) t.Errorf("%s: Unexpected error: %v", testCase.desc, err)
continue continue
@@ -276,3 +277,47 @@ func TestExtractPodsFromHTTP(t *testing.T) {
} }
} }
} }
func TestURLWithHeader(t *testing.T) {
pod := &api.Pod{
TypeMeta: api.TypeMeta{
APIVersion: testapi.Version(),
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
Name: "foo",
UID: "111",
Namespace: "mynamespace",
},
Spec: api.PodSpec{
NodeName: "localhost",
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
},
}
data, err := json.Marshal(pod)
if err != nil {
t.Fatalf("Unexpected json marshalling error: %v", err)
}
fakeHandler := util.FakeHandler{
StatusCode: 200,
ResponseBody: string(data),
}
testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close()
ch := make(chan interface{}, 1)
header := make(http.Header)
header.Set("Metadata-Flavor", "Google")
c := sourceURL{testServer.URL, header, "localhost", ch, nil, 0}
if err := c.extractFromURL(); err != nil {
t.Fatalf("Unexpected error extracting from URL: %v", err)
}
update := (<-ch).(kubelet.PodUpdate)
headerVal := fakeHandler.RequestReceived.Header["Metadata-Flavor"]
if len(headerVal) != 1 || headerVal[0] != "Google" {
t.Errorf("Header missing expected entry %v. Got %v", header, fakeHandler.RequestReceived.Header)
}
if len(update.Pods) != 1 {
t.Errorf("Received wrong number of pods, expected one: %v", update.Pods)
}
}

View File

@@ -2322,10 +2322,11 @@ func TestUpdateNewNodeStatus(t *testing.T) {
if err := kubelet.updateNodeStatus(); err != nil { if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
if len(kubeClient.Actions) != 2 || kubeClient.Actions[1].Action != "update-status-node" { actions := kubeClient.Actions()
t.Fatalf("unexpected actions: %v", kubeClient.Actions) if len(actions) != 2 || actions[1].Action != "update-status-node" {
t.Fatalf("unexpected actions: %v", actions)
} }
updatedNode, ok := kubeClient.Actions[1].Value.(*api.Node) updatedNode, ok := actions[1].Value.(*api.Node)
if !ok { if !ok {
t.Errorf("unexpected object type") t.Errorf("unexpected object type")
} }
@@ -2419,10 +2420,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
if err := kubelet.updateNodeStatus(); err != nil { if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
if len(kubeClient.Actions) != 2 { actions := kubeClient.Actions()
t.Errorf("unexpected actions: %v", kubeClient.Actions) if len(actions) != 2 {
t.Errorf("unexpected actions: %v", actions)
} }
updatedNode, ok := kubeClient.Actions[1].Value.(*api.Node) updatedNode, ok := actions[1].Value.(*api.Node)
if !ok { if !ok {
t.Errorf("unexpected object type") t.Errorf("unexpected object type")
} }
@@ -2506,10 +2508,11 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
if err := kubelet.updateNodeStatus(); err != nil { if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
if len(kubeClient.Actions) != 2 || kubeClient.Actions[1].Action != "update-status-node" { actions := kubeClient.Actions()
t.Fatalf("unexpected actions: %v", kubeClient.Actions) if len(actions) != 2 || actions[1].Action != "update-status-node" {
t.Fatalf("unexpected actions: %v", actions)
} }
updatedNode, ok := kubeClient.Actions[1].Value.(*api.Node) updatedNode, ok := actions[1].Value.(*api.Node)
if !ok { if !ok {
t.Errorf("unexpected object type") t.Errorf("unexpected object type")
} }
@@ -2536,8 +2539,8 @@ func TestUpdateNodeStatusError(t *testing.T) {
if err := kubelet.updateNodeStatus(); err == nil { if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err) t.Errorf("unexpected non error: %v", err)
} }
if len(testKubelet.fakeKubeClient.Actions) != nodeStatusUpdateRetry { if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions) t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
} }
} }

Some files were not shown because too many files have changed in this diff Show More