diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 4bb159e3fd5..d6862129e11 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -261,93 +261,93 @@ }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "0.16.0-81-g27fb6d5", - "Rev": "27fb6d593c6bffe274718119659815771e79e198" + "Comment": "0.16.0.2", + "Rev": "cefada41b87c35294533638733c563a349b95f05" }, { "ImportPath": "github.com/google/gofuzz", diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go index ab5d93eef73..43839038576 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go @@ -19,6 +19,7 @@ import ( "net/http" "path" "strconv" + "time" "github.com/golang/glog" info "github.com/google/cadvisor/info/v1" @@ -449,8 +450,63 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma } } +func instCpuStats(last, cur *info.ContainerStats) (*v2.CpuInstStats, error) { + if last == nil { + return nil, nil + } + if !cur.Timestamp.After(last.Timestamp) { + return nil, fmt.Errorf("container stats move backwards in time") + } + if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) { + return nil, fmt.Errorf("different number of cpus") + } + timeDelta := cur.Timestamp.Sub(last.Timestamp) + if timeDelta <= 100*time.Millisecond { + return nil, fmt.Errorf("time delta unexpectedly small") + } + // Nanoseconds to gain precision and avoid having zero seconds if the + // difference between the timestamps is just under a second + timeDeltaNs := uint64(timeDelta.Nanoseconds()) + convertToRate := func(lastValue, curValue uint64) (uint64, error) { + if curValue < lastValue { + return 0, fmt.Errorf("cumulative stats decrease") + } + valueDelta := curValue - lastValue + return (valueDelta * 1e9) / timeDeltaNs, nil + } + total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total) + if err != nil { + return nil, err + } + percpu := make([]uint64, len(last.Cpu.Usage.PerCpu)) + for i := range percpu { + var err error + percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i]) + if err != nil { + return nil, err + } + } + user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User) + if err != nil { + return nil, err + } + system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System) + if err != nil { + return nil, err + } + return &v2.CpuInstStats{ + Usage: v2.CpuInstUsage{ + Total: total, + PerCpu: percpu, + User: user, + System: system, + }, + }, nil +} + func convertStats(cont *info.ContainerInfo) []v2.ContainerStats { - stats := []v2.ContainerStats{} + stats := make([]v2.ContainerStats, 0, len(cont.Stats)) + var last *info.ContainerStats for _, val := range cont.Stats { stat := v2.ContainerStats{ Timestamp: val.Timestamp, @@ -463,6 +519,13 @@ func convertStats(cont *info.ContainerInfo) []v2.ContainerStats { } if stat.HasCpu { stat.Cpu = val.Cpu + cpuInst, err := instCpuStats(last, val) + if err != nil { + glog.Warningf("Could not get instant cpu stats: %v", err) + } else { + stat.CpuInst = cpuInst + } + last = val } if stat.HasMemory { stat.Memory = val.Memory diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions_test.go index 0a107858a1b..82675703f29 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/api/versions_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/api/versions_test.go @@ -19,9 +19,11 @@ import ( "net/http" "reflect" "testing" + "time" "github.com/google/cadvisor/events" info "github.com/google/cadvisor/info/v1" + "github.com/google/cadvisor/info/v2" "github.com/stretchr/testify/assert" ) @@ -78,3 +80,170 @@ func TestGetEventRequestDoubleArgument(t *testing.T) { assert.True(t, stream) assert.Nil(t, err) } + +func TestInstCpuStats(t *testing.T) { + tests := []struct { + last *info.ContainerStats + cur *info.ContainerStats + want *v2.CpuInstStats + }{ + // Last is missing + { + nil, + &info.ContainerStats{}, + nil, + }, + // Goes back in time + { + &info.ContainerStats{ + Timestamp: time.Unix(100, 0).Add(time.Second), + }, + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + }, + nil, + }, + // Zero time delta + { + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + }, + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + }, + nil, + }, + // Unexpectedly small time delta + { + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + }, + &info.ContainerStats{ + Timestamp: time.Unix(100, 0).Add(30 * time.Millisecond), + }, + nil, + }, + // Different number of cpus + { + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + PerCpu: []uint64{100, 200}, + }, + }, + }, + &info.ContainerStats{ + Timestamp: time.Unix(100, 0).Add(time.Second), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + PerCpu: []uint64{100, 200, 300}, + }, + }, + }, + nil, + }, + // Stat numbers decrease + { + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + Total: 300, + PerCpu: []uint64{100, 200}, + User: 250, + System: 50, + }, + }, + }, + &info.ContainerStats{ + Timestamp: time.Unix(100, 0).Add(time.Second), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + Total: 200, + PerCpu: []uint64{100, 100}, + User: 150, + System: 50, + }, + }, + }, + nil, + }, + // One second elapsed + { + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + Total: 300, + PerCpu: []uint64{100, 200}, + User: 250, + System: 50, + }, + }, + }, + &info.ContainerStats{ + Timestamp: time.Unix(100, 0).Add(time.Second), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + Total: 500, + PerCpu: []uint64{200, 300}, + User: 400, + System: 100, + }, + }, + }, + &v2.CpuInstStats{ + Usage: v2.CpuInstUsage{ + Total: 200, + PerCpu: []uint64{100, 100}, + User: 150, + System: 50, + }, + }, + }, + // Two seconds elapsed + { + &info.ContainerStats{ + Timestamp: time.Unix(100, 0), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + Total: 300, + PerCpu: []uint64{100, 200}, + User: 250, + System: 50, + }, + }, + }, + &info.ContainerStats{ + Timestamp: time.Unix(100, 0).Add(2 * time.Second), + Cpu: info.CpuStats{ + Usage: info.CpuUsage{ + Total: 500, + PerCpu: []uint64{200, 300}, + User: 400, + System: 100, + }, + }, + }, + &v2.CpuInstStats{ + Usage: v2.CpuInstUsage{ + Total: 100, + PerCpu: []uint64{50, 50}, + User: 75, + System: 25, + }, + }, + }, + } + for _, c := range tests { + got, err := instCpuStats(c.last, c.cur) + if err != nil { + if c.want == nil { + continue + } + t.Errorf("Unexpected error: %v", err) + } + assert.Equal(t, c.want, got) + } +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go index 7dd3b1edb89..e449586edf2 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go @@ -96,7 +96,7 @@ func (self *dockerFactory) String() string { return DockerNamespace } -func (self *dockerFactory) NewContainerHandler(name string) (handler container.ContainerHandler, err error) { +func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool) (handler container.ContainerHandler, err error) { client, err := docker.NewClient(*ArgDockerEndpoint) if err != nil { return @@ -108,6 +108,7 @@ func (self *dockerFactory) NewContainerHandler(name string) (handler container.C self.fsInfo, self.usesAufsDriver, &self.cgroupSubsystems, + inHostNamespace, ) return } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go index 2ad1f306c1f..c2ff1af697d 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go @@ -71,6 +71,18 @@ type dockerContainerHandler struct { // Metadata labels associated with the container. labels map[string]string + + // The container PID used to switch namespaces as required + pid int + + // Image name used for this container. + image string + + // The host root FS to read + rootFs string + + // The network mode of the container + networkMode string } func newDockerContainerHandler( @@ -80,6 +92,7 @@ func newDockerContainerHandler( fsInfo fs.FsInfo, usesAufsDriver bool, cgroupSubsystems *containerLibcontainer.CgroupSubsystems, + inHostNamespace bool, ) (container.ContainerHandler, error) { // Create the cgroup paths. cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) @@ -95,6 +108,11 @@ func newDockerContainerHandler( Paths: cgroupPaths, } + rootFs := "/" + if !inHostNamespace { + rootFs = "/rootfs" + } + id := ContainerNameToDockerId(name) handler := &dockerContainerHandler{ id: id, @@ -105,6 +123,7 @@ func newDockerContainerHandler( cgroupManager: cgroupManager, usesAufsDriver: usesAufsDriver, fsInfo: fsInfo, + rootFs: rootFs, } handler.storageDirs = append(handler.storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id)) @@ -114,11 +133,14 @@ func newDockerContainerHandler( return nil, fmt.Errorf("failed to inspect container %q: %v", id, err) } handler.creationTime = ctnr.Created + handler.pid = ctnr.State.Pid // Add the name and bare ID as aliases of the container. handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/")) handler.aliases = append(handler.aliases, id) handler.labels = ctnr.Config.Labels + handler.image = ctnr.Config.Image + handler.networkMode = ctnr.HostConfig.NetworkMode return handler, nil } @@ -167,21 +189,23 @@ func libcontainerConfigToContainerSpec(config *libcontainerConfigs.Config, mi *i } spec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores) - // Docker reports a loop device for containers with --net=host. Ignore - // those too. - networkCount := 0 - for _, n := range config.Networks { - if n.Type != "loopback" { - networkCount += 1 - } - } - - spec.HasNetwork = networkCount > 0 spec.HasDiskIo = true return spec } +var ( + hasNetworkModes = map[string]bool{ + "host": true, + "bridge": true, + "default": true, + } +) + +func hasNet(networkMode string) bool { + return hasNetworkModes[networkMode] +} + func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { mi, err := self.machineInfoFactory.GetMachineInfo() if err != nil { @@ -198,6 +222,8 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) { spec.HasFilesystem = true } spec.Labels = self.labels + spec.Image = self.image + spec.HasNetwork = hasNet(self.networkMode) return spec, err } @@ -247,32 +273,16 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error // TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers. func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) { - config, err := self.readLibcontainerConfig() - if err != nil { - return nil, err - } - - var networkInterfaces []string - if len(config.Networks) > 0 { - // ContainerStats only reports stat for one network device. - // TODO(vmarmol): Handle multiple physical network devices. - for _, n := range config.Networks { - // Take the first non-loopback. - if n.Type != "loopback" { - networkInterfaces = []string{n.HostInterfaceName} - break - } - } - } - stats, err := containerLibcontainer.GetStats(self.cgroupManager, networkInterfaces) + stats, err := containerLibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid) if err != nil { return stats, err } - - // TODO(rjnagal): Remove the conversion when network stats are read from libcontainer. - convertInterfaceStats(&stats.Network.InterfaceStats) - for i := range stats.Network.Interfaces { - convertInterfaceStats(&stats.Network.Interfaces[i]) + // Clean up stats for containers that don't have their own network - this + // includes containers running in Kubernetes pods that use the network of the + // infrastructure container. This stops metrics being reported multiple times + // for each container in a pod. + if !hasNet(self.networkMode) { + stats.Network = info.NetworkStats{} } // Get filesystem stats. @@ -284,21 +294,6 @@ func (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) { return stats, nil } -func convertInterfaceStats(stats *info.InterfaceStats) { - net := *stats - - // Ingress for host veth is from the container. - // Hence tx_bytes stat on the host veth is actually number of bytes received by the container. - stats.RxBytes = net.TxBytes - stats.RxPackets = net.TxPackets - stats.RxErrors = net.TxErrors - stats.RxDropped = net.TxDropped - stats.TxBytes = net.RxBytes - stats.TxPackets = net.RxPackets - stats.TxErrors = net.RxErrors - stats.TxDropped = net.RxDropped -} - func (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) { if self.name != "/docker" { return []info.ContainerReference{}, nil diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go index d5ab8290359..9a120f7caa2 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory.go @@ -23,7 +23,7 @@ import ( type ContainerHandlerFactory interface { // Create a new ContainerHandler using this factory. CanHandleAndAccept() must have returned true. - NewContainerHandler(name string) (c ContainerHandler, err error) + NewContainerHandler(name string, inHostNamespace bool) (c ContainerHandler, err error) // Returns whether this factory can handle and accept the specified container. CanHandleAndAccept(name string) (handle bool, accept bool, err error) @@ -60,7 +60,7 @@ func HasFactories() bool { } // Create a new ContainerHandler for the specified container. -func NewContainerHandler(name string) (ContainerHandler, bool, error) { +func NewContainerHandler(name string, inHostNamespace bool) (ContainerHandler, bool, error) { factoriesLock.RLock() defer factoriesLock.RUnlock() @@ -76,7 +76,7 @@ func NewContainerHandler(name string) (ContainerHandler, bool, error) { return nil, false, nil } glog.V(3).Infof("Using factory %q for container %q", factory, name) - handle, err := factory.NewContainerHandler(name) + handle, err := factory.NewContainerHandler(name, inHostNamespace) return handle, canAccept, err } else { glog.V(4).Infof("Factory %q was unable to handle container %q", factory, name) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go index 991c365ab3b..8c4bb0429d7 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/factory_test.go @@ -39,7 +39,7 @@ func (self *mockContainerHandlerFactory) CanHandleAndAccept(name string) (bool, return self.CanHandleValue, self.CanAcceptValue, nil } -func (self *mockContainerHandlerFactory) NewContainerHandler(name string) (ContainerHandler, error) { +func (self *mockContainerHandlerFactory) NewContainerHandler(name string, isHostNamespace bool) (ContainerHandler, error) { args := self.Called(name) return args.Get(0).(ContainerHandler), args.Error(1) } @@ -60,13 +60,13 @@ func TestNewContainerHandler_FirstMatches(t *testing.T) { RegisterContainerHandlerFactory(allwaysYes) // The yes factory should be asked to create the ContainerHandler. - mockContainer, err := mockFactory.NewContainerHandler(testContainerName) + mockContainer, err := mockFactory.NewContainerHandler(testContainerName, true) if err != nil { t.Error(err) } allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil) - cont, _, err := NewContainerHandler(testContainerName) + cont, _, err := NewContainerHandler(testContainerName, true) if err != nil { t.Error(err) } @@ -93,13 +93,13 @@ func TestNewContainerHandler_SecondMatches(t *testing.T) { RegisterContainerHandlerFactory(allwaysYes) // The yes factory should be asked to create the ContainerHandler. - mockContainer, err := mockFactory.NewContainerHandler(testContainerName) + mockContainer, err := mockFactory.NewContainerHandler(testContainerName, true) if err != nil { t.Error(err) } allwaysYes.On("NewContainerHandler", testContainerName).Return(mockContainer, nil) - cont, _, err := NewContainerHandler(testContainerName) + cont, _, err := NewContainerHandler(testContainerName, true) if err != nil { t.Error(err) } @@ -125,7 +125,7 @@ func TestNewContainerHandler_NoneMatch(t *testing.T) { } RegisterContainerHandlerFactory(allwaysNo2) - _, _, err := NewContainerHandler(testContainerName) + _, _, err := NewContainerHandler(testContainerName, true) if err == nil { t.Error("Expected NewContainerHandler to fail") } @@ -148,7 +148,7 @@ func TestNewContainerHandler_Accept(t *testing.T) { } RegisterContainerHandlerFactory(cannotAccept) - _, accept, err := NewContainerHandler(testContainerName) + _, accept, err := NewContainerHandler(testContainerName, true) if err != nil { t.Error("Expected NewContainerHandler to succeed") } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go index 0cd4c119b9d..a3b2e50a1f6 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go @@ -15,14 +15,19 @@ package libcontainer import ( + "bufio" "fmt" + "io/ioutil" "path" + "regexp" + "strconv" + "strings" "time" "github.com/docker/libcontainer" "github.com/docker/libcontainer/cgroups" + "github.com/golang/glog" info "github.com/google/cadvisor/info/v1" - "github.com/google/cadvisor/utils/sysinfo" ) type CgroupSubsystems struct { @@ -74,7 +79,7 @@ var supportedSubsystems map[string]struct{} = map[string]struct{}{ } // Get cgroup and networking stats of the specified container -func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info.ContainerStats, error) { +func GetStats(cgroupManager cgroups.Manager, rootFs string, pid int) (*info.ContainerStats, error) { cgroupStats, err := cgroupManager.GetStats() if err != nil { return nil, err @@ -84,23 +89,90 @@ func GetStats(cgroupManager cgroups.Manager, networkInterfaces []string) (*info. } stats := toContainerStats(libcontainerStats) - // TODO(rjnagal): Use networking stats directly from libcontainer. - stats.Network.Interfaces = make([]info.InterfaceStats, len(networkInterfaces)) - for i := range networkInterfaces { - interfaceStats, err := sysinfo.GetNetworkStats(networkInterfaces[i]) + // If we know the pid then get network stats from /proc//net/dev + if pid > 0 { + netStats, err := networkStatsFromProc(rootFs, pid) if err != nil { - return stats, err + glog.V(2).Infof("Unable to get network stats from pid %d: %v", pid, err) + } else { + stats.Network.Interfaces = append(stats.Network.Interfaces, netStats...) } - stats.Network.Interfaces[i] = interfaceStats } + // For backwards compatibility. - if len(networkInterfaces) > 0 { + if len(stats.Network.Interfaces) > 0 { stats.Network.InterfaceStats = stats.Network.Interfaces[0] } return stats, nil } +func networkStatsFromProc(rootFs string, pid int) ([]info.InterfaceStats, error) { + netStatsFile := path.Join(rootFs, "proc", strconv.Itoa(pid), "/net/dev") + + ifaceStats, err := scanInterfaceStats(netStatsFile) + if err != nil { + return []info.InterfaceStats{}, fmt.Errorf("couldn't read network stats: %v", err) + } + + return ifaceStats, nil +} + +var ( + ignoredDevicePrefixes = []string{"lo", "veth", "docker"} + netStatLineRE = regexp.MustCompile("[ ]*(.+):([ ]+[0-9]+){16}") +) + +func isIgnoredDevice(ifName string) bool { + for _, prefix := range ignoredDevicePrefixes { + if strings.HasPrefix(strings.ToLower(ifName), prefix) { + return true + } + } + return false +} + +func scanInterfaceStats(netStatsFile string) ([]info.InterfaceStats, error) { + var ( + bkt uint64 + ) + + stats := []info.InterfaceStats{} + + data, err := ioutil.ReadFile(netStatsFile) + if err != nil { + return stats, fmt.Errorf("failure opening %s: %v", netStatsFile, err) + } + + reader := strings.NewReader(string(data)) + scanner := bufio.NewScanner(reader) + + scanner.Split(bufio.ScanLines) + + for scanner.Scan() { + line := scanner.Text() + if netStatLineRE.MatchString(line) { + line = strings.Replace(line, ":", "", -1) + + i := info.InterfaceStats{} + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d", + &i.Name, &i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped, &bkt, &bkt, &bkt, + &bkt, &i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped, &bkt, &bkt, &bkt, &bkt) + + if err != nil { + return stats, fmt.Errorf("failure opening %s: %v", netStatsFile, err) + } + + if !isIgnoredDevice(i.Name) { + stats = append(stats, i) + } + } + } + + return stats, nil +} + func GetProcesses(cgroupManager cgroups.Manager) ([]int, error) { pids, err := cgroupManager.GetPids() if err != nil { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go index 7422b3ddbc1..f949c57908a 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go @@ -109,7 +109,7 @@ func (self *FactoryForMockContainerHandler) String() string { return self.Name } -func (self *FactoryForMockContainerHandler) NewContainerHandler(name string) (ContainerHandler, error) { +func (self *FactoryForMockContainerHandler) NewContainerHandler(name string, inHostNamespace bool) (ContainerHandler, error) { handler := &MockContainerHandler{} if self.PrepareContainerHandlerFunc != nil { self.PrepareContainerHandlerFunc(name, handler) diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go index be1c799b75f..5b45ee923c8 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/factory.go @@ -45,8 +45,12 @@ func (self *rawFactory) String() string { return "raw" } -func (self *rawFactory) NewContainerHandler(name string) (container.ContainerHandler, error) { - return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher) +func (self *rawFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) { + rootFs := "/" + if !inHostNamespace { + rootFs = "/rootfs" + } + return newRawContainerHandler(name, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, self.watcher, rootFs) } // The raw factory can handle any container. If --docker_only is set to false, non-docker containers are ignored. diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go index 51405db74f7..8223b6e50f5 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go @@ -61,9 +61,11 @@ type rawContainerHandler struct { fsInfo fs.FsInfo externalMounts []mount + + rootFs string } -func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher) (container.ContainerHandler, error) { +func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSubsystems, machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, watcher *InotifyWatcher, rootFs string) (container.ContainerHandler, error) { // Create the cgroup paths. cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints)) for key, val := range cgroupSubsystems.MountPoints { @@ -108,6 +110,7 @@ func newRawContainerHandler(name string, cgroupSubsystems *libcontainer.CgroupSu hasNetwork: hasNetwork, externalMounts: externalMounts, watcher: watcher, + rootFs: rootFs, }, nil } @@ -326,15 +329,7 @@ func (self *rawContainerHandler) getFsStats(stats *info.ContainerStats) error { } func (self *rawContainerHandler) GetStats() (*info.ContainerStats, error) { - nd, err := self.GetRootNetworkDevices() - if err != nil { - return new(info.ContainerStats), err - } - networkInterfaces := make([]string, len(nd)) - for i := range nd { - networkInterfaces[i] = nd[i].Name - } - stats, err := libcontainer.GetStats(self.cgroupManager, networkInterfaces) + stats, err := libcontainer.GetStats(self.cgroupManager, self.rootFs, os.Getpid()) if err != nil { return stats, err } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go index 91512eca296..3ca17c461f6 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go @@ -61,6 +61,9 @@ type ContainerSpec struct { HasCustomMetrics bool `json:"has_custom_metrics"` CustomMetrics []MetricSpec `json:"custom_metrics,omitempty"` + + // Image name used for this container. + Image string `json:"image,omitempty"` } // Container reference contains enough information to uniquely identify a container diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go index 74446890cac..cbf8c2525fa 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go @@ -80,14 +80,20 @@ type ContainerSpec struct { HasNetwork bool `json:"has_network"` HasFilesystem bool `json:"has_filesystem"` HasDiskIo bool `json:"has_diskio"` + + // Image name used for this container. + Image string `json:"image,omitempty"` } type ContainerStats struct { // The time of this stat point. Timestamp time.Time `json:"timestamp"` // CPU statistics - HasCpu bool `json:"has_cpu"` - Cpu v1.CpuStats `json:"cpu,omitempty"` + HasCpu bool `json:"has_cpu"` + // In nanoseconds (aggregated) + Cpu v1.CpuStats `json:"cpu,omitempty"` + // In nanocores per second (instantaneous) + CpuInst *CpuInstStats `json:"cpu_inst,omitempty"` // Disk IO statistics HasDiskIo bool `json:"has_diskio"` DiskIo v1.DiskIoStats `json:"diskio,omitempty"` @@ -204,3 +210,27 @@ type NetworkStats struct { // Network stats by interface. Interfaces []v1.InterfaceStats `json:"interfaces,omitempty"` } + +// Instantaneous CPU stats +type CpuInstStats struct { + Usage CpuInstUsage `json:"usage"` +} + +// CPU usage time statistics. +type CpuInstUsage struct { + // Total CPU usage. + // Units: nanocores per second + Total uint64 `json:"total"` + + // Per CPU/core usage of the container. + // Unit: nanocores per second + PerCpu []uint64 `json:"per_cpu_usage,omitempty"` + + // Time spent in user space. + // Unit: nanocores per second + User uint64 `json:"user"` + + // Time spent in kernel space. + // Unit: nanocores per second + System uint64 `json:"system"` +} diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go index df360263016..8526bc450ce 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go @@ -380,6 +380,7 @@ func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec { HasNetwork: specV1.HasNetwork, HasDiskIo: specV1.HasDiskIo, HasCustomMetrics: specV1.HasCustomMetrics, + Image: specV1.Image, } if specV1.HasCpu { specV2.Cpu.Limit = specV1.Cpu.Limit @@ -736,7 +737,7 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c // Create a container. func (m *manager) createContainer(containerName string) error { - handler, accept, err := container.NewContainerHandler(containerName) + handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace) if err != nil { return err } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go b/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go index f957c7c9cbb..a0c05779b45 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go @@ -61,7 +61,7 @@ type containerMetric struct { } func (cm *containerMetric) desc() *prometheus.Desc { - return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id"}, cm.extraLabels...), nil) + return prometheus.NewDesc(cm.name, cm.help, append([]string{"name", "id", "image"}, cm.extraLabels...), nil) } // PrometheusCollector implements prometheus.Collector. @@ -287,60 +287,124 @@ func NewPrometheusCollector(infoProvider subcontainersInfoProvider) *PrometheusC }) }, }, { - name: "container_network_receive_bytes_total", - help: "Cumulative count of bytes received", - valueType: prometheus.CounterValue, + name: "container_network_receive_bytes_total", + help: "Cumulative count of bytes received", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.RxBytes)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.RxBytes), + labels: []string{value.Name}, + }) + } + return values }, }, { - name: "container_network_receive_packets_total", - help: "Cumulative count of packets received", - valueType: prometheus.CounterValue, + name: "container_network_receive_packets_total", + help: "Cumulative count of packets received", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.RxPackets)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.RxPackets), + labels: []string{value.Name}, + }) + } + return values }, }, { - name: "container_network_receive_packets_dropped_total", - help: "Cumulative count of packets dropped while receiving", - valueType: prometheus.CounterValue, + name: "container_network_receive_packets_dropped_total", + help: "Cumulative count of packets dropped while receiving", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.RxDropped)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.RxDropped), + labels: []string{value.Name}, + }) + } + return values }, }, { - name: "container_network_receive_errors_total", - help: "Cumulative count of errors encountered while receiving", - valueType: prometheus.CounterValue, + name: "container_network_receive_errors_total", + help: "Cumulative count of errors encountered while receiving", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.RxErrors)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.RxErrors), + labels: []string{value.Name}, + }) + } + return values }, }, { - name: "container_network_transmit_bytes_total", - help: "Cumulative count of bytes transmitted", - valueType: prometheus.CounterValue, + name: "container_network_transmit_bytes_total", + help: "Cumulative count of bytes transmitted", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.TxBytes)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.TxBytes), + labels: []string{value.Name}, + }) + } + return values }, }, { - name: "container_network_transmit_packets_total", - help: "Cumulative count of packets transmitted", - valueType: prometheus.CounterValue, + name: "container_network_transmit_packets_total", + help: "Cumulative count of packets transmitted", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.TxPackets)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.TxPackets), + labels: []string{value.Name}, + }) + } + return values }, }, { - name: "container_network_transmit_packets_dropped_total", - help: "Cumulative count of packets dropped while transmitting", - valueType: prometheus.CounterValue, + name: "container_network_transmit_packets_dropped_total", + help: "Cumulative count of packets dropped while transmitting", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.TxDropped)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.TxDropped), + labels: []string{value.Name}, + }) + } + return values }, }, { - name: "container_network_transmit_errors_total", - help: "Cumulative count of errors encountered while transmitting", - valueType: prometheus.CounterValue, + name: "container_network_transmit_errors_total", + help: "Cumulative count of errors encountered while transmitting", + valueType: prometheus.CounterValue, + extraLabels: []string{"interface"}, getValues: func(s *info.ContainerStats) metricValues { - return metricValues{{value: float64(s.Network.TxErrors)}} + values := make(metricValues, 0, len(s.Network.Interfaces)) + for _, value := range s.Network.Interfaces { + values = append(values, metricValue{ + value: float64(value.TxErrors), + labels: []string{value.Name}, + }) + } + return values }, }, { name: "container_tasks_state", @@ -401,12 +465,13 @@ func (c *PrometheusCollector) Collect(ch chan<- prometheus.Metric) { if len(container.Aliases) > 0 { name = container.Aliases[0] } + image := container.Spec.Image stats := container.Stats[0] for _, cm := range c.containerMetrics { desc := cm.desc() for _, metricValue := range cm.getValues(stats) { - ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id}, metricValue.labels...)...) + ch <- prometheus.MustNewConstMetric(desc, cm.valueType, float64(metricValue.value), append([]string{name, id, image}, metricValue.labels...)...) } } } diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go b/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go index 3efbe043cc7..0bb6c5e1c87 100644 --- a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go +++ b/Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus_test.go @@ -34,6 +34,9 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container ContainerReference: info.ContainerReference{ Name: "testcontainer", }, + Spec: info.ContainerSpec{ + Image: "test", + }, Stats: []*info.ContainerStats{ { Cpu: info.CpuStats{ @@ -68,6 +71,19 @@ func (p testSubcontainersInfoProvider) SubcontainersInfo(string, *info.Container TxErrors: 20, TxDropped: 21, }, + Interfaces: []info.InterfaceStats{ + { + Name: "eth0", + RxBytes: 14, + RxPackets: 15, + RxErrors: 16, + RxDropped: 17, + TxBytes: 18, + TxPackets: 19, + TxErrors: 20, + TxDropped: 21, + }, + }, }, Filesystem: []info.FsStats{ { diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics b/Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics deleted file mode 100644 index 70d5d977a11..00000000000 --- a/Godeps/_workspace/src/github.com/google/cadvisor/metrics/testdata/prometheus_metrics +++ /dev/null @@ -1,155 +0,0 @@ -# HELP container_cpu_system_seconds_total Cumulative system cpu time consumed in seconds. -# TYPE container_cpu_system_seconds_total counter -container_cpu_system_seconds_total{id="testcontainer",name="testcontainer"} 7e-09 -# HELP container_cpu_usage_seconds_total Cumulative cpu time consumed per cpu in seconds. -# TYPE container_cpu_usage_seconds_total counter -container_cpu_usage_seconds_total{cpu="cpu00",id="testcontainer",name="testcontainer"} 2e-09 -container_cpu_usage_seconds_total{cpu="cpu01",id="testcontainer",name="testcontainer"} 3e-09 -container_cpu_usage_seconds_total{cpu="cpu02",id="testcontainer",name="testcontainer"} 4e-09 -container_cpu_usage_seconds_total{cpu="cpu03",id="testcontainer",name="testcontainer"} 5e-09 -# HELP container_cpu_user_seconds_total Cumulative user cpu time consumed in seconds. -# TYPE container_cpu_user_seconds_total counter -container_cpu_user_seconds_total{id="testcontainer",name="testcontainer"} 6e-09 -# HELP container_fs_io_current Number of I/Os currently in progress -# TYPE container_fs_io_current gauge -container_fs_io_current{device="sda1",id="testcontainer",name="testcontainer"} 42 -container_fs_io_current{device="sda2",id="testcontainer",name="testcontainer"} 47 -# HELP container_fs_io_time_seconds_total Cumulative count of seconds spent doing I/Os -# TYPE container_fs_io_time_seconds_total counter -container_fs_io_time_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.3e-08 -container_fs_io_time_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.8e-08 -# HELP container_fs_io_time_weighted_seconds_total Cumulative weighted I/O time in seconds -# TYPE container_fs_io_time_weighted_seconds_total counter -container_fs_io_time_weighted_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.4e-08 -container_fs_io_time_weighted_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.9e-08 -# HELP container_fs_limit_bytes Number of bytes that can be consumed by the container on this filesystem. -# TYPE container_fs_limit_bytes gauge -container_fs_limit_bytes{device="sda1",id="testcontainer",name="testcontainer"} 22 -container_fs_limit_bytes{device="sda2",id="testcontainer",name="testcontainer"} 37 -# HELP container_fs_read_seconds_total Cumulative count of seconds spent reading -# TYPE container_fs_read_seconds_total counter -container_fs_read_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 2.7e-08 -container_fs_read_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.2e-08 -# HELP container_fs_reads_merged_total Cumulative count of reads merged -# TYPE container_fs_reads_merged_total counter -container_fs_reads_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 25 -container_fs_reads_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 40 -# HELP container_fs_reads_total Cumulative count of reads completed -# TYPE container_fs_reads_total counter -container_fs_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 24 -container_fs_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 39 -# HELP container_fs_sector_reads_total Cumulative count of sector reads completed -# TYPE container_fs_sector_reads_total counter -container_fs_sector_reads_total{device="sda1",id="testcontainer",name="testcontainer"} 26 -container_fs_sector_reads_total{device="sda2",id="testcontainer",name="testcontainer"} 41 -# HELP container_fs_sector_writes_total Cumulative count of sector writes completed -# TYPE container_fs_sector_writes_total counter -container_fs_sector_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 40 -container_fs_sector_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 45 -# HELP container_fs_usage_bytes Number of bytes that are consumed by the container on this filesystem. -# TYPE container_fs_usage_bytes gauge -container_fs_usage_bytes{device="sda1",id="testcontainer",name="testcontainer"} 23 -container_fs_usage_bytes{device="sda2",id="testcontainer",name="testcontainer"} 38 -# HELP container_fs_write_seconds_total Cumulative count of seconds spent writing -# TYPE container_fs_write_seconds_total counter -container_fs_write_seconds_total{device="sda1",id="testcontainer",name="testcontainer"} 4.1e-08 -container_fs_write_seconds_total{device="sda2",id="testcontainer",name="testcontainer"} 4.6e-08 -# HELP container_fs_writes_merged_total Cumulative count of writes merged -# TYPE container_fs_writes_merged_total counter -container_fs_writes_merged_total{device="sda1",id="testcontainer",name="testcontainer"} 39 -container_fs_writes_merged_total{device="sda2",id="testcontainer",name="testcontainer"} 44 -# HELP container_fs_writes_total Cumulative count of writes completed -# TYPE container_fs_writes_total counter -container_fs_writes_total{device="sda1",id="testcontainer",name="testcontainer"} 28 -container_fs_writes_total{device="sda2",id="testcontainer",name="testcontainer"} 43 -# HELP container_last_seen Last time a container was seen by the exporter -# TYPE container_last_seen gauge -container_last_seen{id="testcontainer",name="testcontainer"} 1.426203694e+09 -# HELP container_memory_failures_total Cumulative count of memory allocation failures. -# TYPE container_memory_failures_total counter -container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgfault"} 10 -container_memory_failures_total{id="testcontainer",name="testcontainer",scope="container",type="pgmajfault"} 11 -container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgfault"} 12 -container_memory_failures_total{id="testcontainer",name="testcontainer",scope="hierarchy",type="pgmajfault"} 13 -# HELP container_memory_usage_bytes Current memory usage in bytes. -# TYPE container_memory_usage_bytes gauge -container_memory_usage_bytes{id="testcontainer",name="testcontainer"} 8 -# HELP container_memory_working_set_bytes Current working set in bytes. -# TYPE container_memory_working_set_bytes gauge -container_memory_working_set_bytes{id="testcontainer",name="testcontainer"} 9 -# HELP container_network_receive_bytes_total Cumulative count of bytes received -# TYPE container_network_receive_bytes_total counter -container_network_receive_bytes_total{id="testcontainer",name="testcontainer"} 14 -# HELP container_network_receive_errors_total Cumulative count of errors encountered while receiving -# TYPE container_network_receive_errors_total counter -container_network_receive_errors_total{id="testcontainer",name="testcontainer"} 16 -# HELP container_network_receive_packets_dropped_total Cumulative count of packets dropped while receiving -# TYPE container_network_receive_packets_dropped_total counter -container_network_receive_packets_dropped_total{id="testcontainer",name="testcontainer"} 17 -# HELP container_network_receive_packets_total Cumulative count of packets received -# TYPE container_network_receive_packets_total counter -container_network_receive_packets_total{id="testcontainer",name="testcontainer"} 15 -# HELP container_network_transmit_bytes_total Cumulative count of bytes transmitted -# TYPE container_network_transmit_bytes_total counter -container_network_transmit_bytes_total{id="testcontainer",name="testcontainer"} 18 -# HELP container_network_transmit_errors_total Cumulative count of errors encountered while transmitting -# TYPE container_network_transmit_errors_total counter -container_network_transmit_errors_total{id="testcontainer",name="testcontainer"} 20 -# HELP container_network_transmit_packets_dropped_total Cumulative count of packets dropped while transmitting -# TYPE container_network_transmit_packets_dropped_total counter -container_network_transmit_packets_dropped_total{id="testcontainer",name="testcontainer"} 21 -# HELP container_network_transmit_packets_total Cumulative count of packets transmitted -# TYPE container_network_transmit_packets_total counter -container_network_transmit_packets_total{id="testcontainer",name="testcontainer"} 19 -# HELP container_scrape_error 1 if there was an error while getting container metrics, 0 otherwise -# TYPE container_scrape_error gauge -container_scrape_error 0 -# HELP container_tasks_state Number of tasks in given state -# TYPE container_tasks_state gauge -container_tasks_state{id="testcontainer",name="testcontainer",state="iowaiting"} 54 -container_tasks_state{id="testcontainer",name="testcontainer",state="running"} 51 -container_tasks_state{id="testcontainer",name="testcontainer",state="sleeping"} 50 -container_tasks_state{id="testcontainer",name="testcontainer",state="stopped"} 52 -container_tasks_state{id="testcontainer",name="testcontainer",state="uninterruptible"} 53 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 0 -http_request_duration_microseconds_sum{handler="prometheus"} 0 -http_request_duration_microseconds_count{handler="prometheus"} 0 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="prometheus",quantile="0.5"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 0 -http_request_size_bytes_sum{handler="prometheus"} 0 -http_request_size_bytes_count{handler="prometheus"} 0 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="prometheus",quantile="0.5"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 0 -http_response_size_bytes_sum{handler="prometheus"} 0 -http_response_size_bytes_count{handler="prometheus"} 0 -# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. -# TYPE process_cpu_seconds_total counter -process_cpu_seconds_total 0 -# HELP process_goroutines Number of goroutines that currently exist. -# TYPE process_goroutines gauge -process_goroutines 16 -# HELP process_max_fds Maximum number of open file descriptors. -# TYPE process_max_fds gauge -process_max_fds 1024 -# HELP process_open_fds Number of open file descriptors. -# TYPE process_open_fds gauge -process_open_fds 4 -# HELP process_resident_memory_bytes Resident memory size in bytes. -# TYPE process_resident_memory_bytes gauge -process_resident_memory_bytes 7.74144e+06 -# HELP process_start_time_seconds Start time of the process since unix epoch in seconds. -# TYPE process_start_time_seconds gauge -process_start_time_seconds 1.42620369439e+09 -# HELP process_virtual_memory_bytes Virtual memory size in bytes. -# TYPE process_virtual_memory_bytes gauge -process_virtual_memory_bytes 1.16420608e+08 diff --git a/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo b/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo deleted file mode 100644 index ca2b722a560..00000000000 --- a/Godeps/_workspace/src/github.com/google/cadvisor/utils/machine/testdata/cpuinfo +++ /dev/null @@ -1,251 +0,0 @@ -processor : 0 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 0 -siblings : 6 -core id : 0 -cpu cores : 6 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 1 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 0 -siblings : 6 -core id : 1 -cpu cores : 6 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 2 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 0 -siblings : 6 -core id : 2 -cpu cores : 6 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 3 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 1 -siblings : 6 -core id : 3 -cpu cores : 6 -apicid : 16 -initial apicid : 16 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 4 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 1 -siblings : 6 -core id : 4 -cpu cores : 6 -apicid : 18 -initial apicid : 18 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 5 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 1 -siblings : 6 -core id : 5 -cpu cores : 6 -apicid : 20 -initial apicid : 20 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 6 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 2661.000 -cache size : 12288 KB -physical id : 0 -siblings : 6 -core id : 0 -cpu cores : 6 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 7 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 2661.000 -cache size : 12288 KB -physical id : 0 -siblings : 6 -core id : 1 -cpu cores : 6 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 8 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 0 -siblings : 6 -core id : 2 -cpu cores : 6 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 9 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 2661.000 -cache size : 12288 KB -physical id : 1 -siblings : 6 -core id : 3 -cpu cores : 6 -apicid : 17 -initial apicid : 17 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - -processor : 10 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 1596.000 -cache size : 12288 KB -physical id : 1 -siblings : 6 -core id : 4 -cpu cores : 6 -apicid : 19 -initial apicid : 19 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual -processor : 11 -cpu family : 6 -stepping : 2 -microcode : 0x10 -cpu MHz : 2661.000 -cache size : 12288 KB -physical id : 1 -siblings : 6 -core id : 5 -cpu cores : 6 -apicid : 21 -initial apicid : 21 -fpu : yes -fpu_exception : yes -cpuid level : 11 -wp : yes -bogomips : 5333.60 -clflush size : 64 -cache_alignment : 64 -address sizes : 40 bits physical, 48 bits virtual - diff --git a/README.md b/README.md index d94a9a2e831..8604bf4c5a1 100644 --- a/README.md +++ b/README.md @@ -101,6 +101,10 @@ Do you want to help "shape the evolution of technologies that are container pack You should consider joining the [Cloud Native Computing Foundation](https://cncf.io/about). For details about who's involved and how Kubernetes plays a role, read [their announcement](https://cncf.io/news/announcement/2015/07/new-cloud-native-computing-foundation-drive-alignment-among-container). +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + #### Are you ready to add to the discussion? We have presence on: diff --git a/cluster/centos/.gitignore b/cluster/centos/.gitignore new file mode 100644 index 00000000000..c97ce235be4 --- /dev/null +++ b/cluster/centos/.gitignore @@ -0,0 +1,12 @@ +binaries + +master/bin/etcd +master/bin/etcdctl +master/bin/kube* + +node/bin/docker +node/bin/etcd +node/bin/etcdctl +node/bin/flanneld +node/bin/kube* +local-test.sh diff --git a/cluster/centos/build.sh b/cluster/centos/build.sh new file mode 100755 index 00000000000..55a675fad0d --- /dev/null +++ b/cluster/centos/build.sh @@ -0,0 +1,135 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Download the flannel, etcd, docker, bridge-utils and K8s binaries automatically +# and store into binaries directory. +# Run as sudoers only + +# author @kevin-wangzefeng + +set -o errexit +set -o nounset +set -o pipefail + +readonly ROOT=$(dirname "${BASH_SOURCE}") +source ${ROOT}/config-build.sh + +# ensure $RELEASES_DIR is an absolute file path +mkdir -p ${RELEASES_DIR} +RELEASES_DIR=$(cd ${RELEASES_DIR}; pwd) + +# get absolute file path of binaries +BINARY_DIR=$(cd ${ROOT}; pwd)/binaries + +function clean-up() { + rm -rf ${RELEASES_DIR} + rm -rf ${BINARY_DIR} +} + +function download-releases() { + rm -rf ${RELEASES_DIR} + mkdir -p ${RELEASES_DIR} + + echo "Download flannel release v${FLANNEL_VERSION} ..." + curl -L ${FLANNEL_DOWNLOAD_URL} -o ${RELEASES_DIR}/flannel.tar.gz + + echo "Download etcd release v${ETCD_VERSION} ..." + curl -L ${ETCD_DOWNLOAD_URL} -o ${RELEASES_DIR}/etcd.tar.gz + + echo "Download kubernetes release v${K8S_VERSION} ..." + curl -L ${K8S_DOWNLOAD_URL} -o ${RELEASES_DIR}/kubernetes.tar.gz + + echo "Download docker-latest ..." + curl -L https://get.docker.com/builds/Linux/x86_64/docker-latest -o ${RELEASES_DIR}/docker +} + +function unpack-releases() { + rm -rf ${BINARY_DIR} + mkdir -p ${BINARY_DIR}/master/bin + mkdir -p ${BINARY_DIR}/node/bin + + # flannel + if [[ -f ${RELEASES_DIR}/flannel.tar.gz ]] ; then + tar xzf ${RELEASES_DIR}/flannel.tar.gz -C ${RELEASES_DIR} + cp ${RELEASES_DIR}/flannel-${FLANNEL_VERSION}/flanneld ${BINARY_DIR}/master/bin + cp ${RELEASES_DIR}/flannel-${FLANNEL_VERSION}/flanneld ${BINARY_DIR}/node/bin + fi + + # ectd + if [[ -f ${RELEASES_DIR}/etcd.tar.gz ]] ; then + tar xzf ${RELEASES_DIR}/etcd.tar.gz -C ${RELEASES_DIR} + ETCD="etcd-v${ETCD_VERSION}-linux-amd64" + cp ${RELEASES_DIR}/$ETCD/etcd \ + ${RELEASES_DIR}/$ETCD/etcdctl ${BINARY_DIR}/master/bin + cp ${RELEASES_DIR}/$ETCD/etcd \ + ${RELEASES_DIR}/$ETCD/etcdctl ${BINARY_DIR}/node/bin + fi + + # k8s + if [[ -f ${RELEASES_DIR}/kubernetes.tar.gz ]] ; then + tar xzf ${RELEASES_DIR}/kubernetes.tar.gz -C ${RELEASES_DIR} + + pushd ${RELEASES_DIR}/kubernetes/server + tar xzf kubernetes-server-linux-amd64.tar.gz + popd + cp ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-apiserver \ + ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-controller-manager \ + ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-scheduler ${BINARY_DIR}/master/bin + + cp ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kubelet \ + ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kube-proxy ${BINARY_DIR}/node/bin + + cp ${RELEASES_DIR}/kubernetes/server/kubernetes/server/bin/kubectl ${BINARY_DIR} + fi + + if [[ -f ${RELEASES_DIR}/docker ]]; then + cp ${RELEASES_DIR}/docker ${BINARY_DIR}/node/bin + fi + + chmod -R +x ${BINARY_DIR} + echo "Done! All binaries are stored in ${BINARY_DIR}" +} + +function parse-opt() { + local opt=${1-} + + case $opt in + download) + download-releases + ;; + unpack) + unpack-releases + ;; + clean) + clean-up + ;; + all) + download-releases + unpack-releases + ;; + *) + echo "Usage: " + echo " build.sh " + echo "Commands:" + echo " clean Clean up downloaded releases and unpacked binaries." + echo " download Download releases to \"${RELEASES_DIR}\"." + echo " unpack Unpack releases downloaded in \"${RELEASES_DIR}\", and copy binaries to \"${BINARY_DIR}\"." + echo " all Download releases and unpack them." + ;; + esac +} + +parse-opt $@ diff --git a/cluster/centos/config-build.sh b/cluster/centos/config-build.sh new file mode 100755 index 00000000000..879a7e0cad3 --- /dev/null +++ b/cluster/centos/config-build.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## Contains configuration values for the Binaries downloading and unpacking. + +# Directory to store release packages that will be downloaded. +RELEASES_DIR=${RELEASES_DIR:-/tmp/downloads} + +# Define flannel version to use. +FLANNEL_VERSION=${FLANNEL_VERSION:-"0.5.3"} + +# Define etcd version to use. +ETCD_VERSION=${ETCD_VERSION:-"2.0.12"} + +# Define k8s version to use. +K8S_VERSION=${K8S_VERSION:-"1.0.4"} + +FLANNEL_DOWNLOAD_URL=\ +"https://github.com/coreos/flannel/releases/download/v${FLANNEL_VERSION}/flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz" + +ETCD_DOWNLOAD_URL=\ +"https://github.com/coreos/etcd/releases/download/v${ETCD_VERSION}/etcd-v${ETCD_VERSION}-linux-amd64.tar.gz" + +K8S_DOWNLOAD_URL=\ +"https://github.com/kubernetes/kubernetes/releases/download/v${K8S_VERSION}/kubernetes.tar.gz" diff --git a/cluster/centos/config-default.sh b/cluster/centos/config-default.sh new file mode 100755 index 00000000000..beb0841bf78 --- /dev/null +++ b/cluster/centos/config-default.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## Contains configuration values for the CentOS cluster +# The user should have sudo privilege +export MASTER=${MASTER:-"centos@172.10.0.11"} +export MASTER_IP=${MASTER#*@} + +# Define all your minion nodes, +# And separated with blank space like . +# The user should have sudo privilege +export MINIONS=${MINIONS:-"centos@172.10.0.12 centos@172.10.0.13"} +# If it practically impossible to set an array as an environment variable +# from a script, so assume variable is a string then convert it to an array +export MINIONS_ARRAY=($MINIONS) + +# Number of nodes in your cluster. +export NUM_MINIONS=${NUM_MINIONS:-2} + +# By default, the cluster will use the etcd installed on master. +export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:4001"} + +# define the IP range used for service cluster IPs. +# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here. +export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-"192.168.3.0/24"} + +# define the IP range used for flannel overlay network, should not conflict with above SERVICE_CLUSTER_IP_RANGE +export FLANNEL_NET=${FLANNEL_NET:-"172.16.0.0/16"} + +# Admission Controllers to invoke prior to persisting objects in cluster +export ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny + +# Extra options to set on the Docker command line. +# This is useful for setting --insecure-registry for local registries. +export DOCKER_OPTS=${DOCKER_OPTS:-""} + + +# Timeouts for process checking on master and minion +export PROCESS_CHECK_TIMEOUT=${PROCESS_CHECK_TIMEOUT:-180} # seconds. diff --git a/cluster/centos/master/scripts/apiserver.sh b/cluster/centos/master/scripts/apiserver.sh new file mode 100755 index 00000000000..53c4e5fcce3 --- /dev/null +++ b/cluster/centos/master/scripts/apiserver.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +MASTER_ADDRESS=${1:-"8.8.8.18"} +ETCD_SERVERS=${2:-"http://8.8.8.18:4001"} +SERVICE_CLUSTER_IP_RANGE=${3:-"10.10.10.0/24"} +ADMISSION_CONTROL=${4:-""} + +cat </opt/kubernetes/cfg/kube-apiserver +# --logtostderr=true: log to standard error instead of files +KUBE_LOGTOSTDERR="--logtostderr=true" + +# --v=0: log level for V logs +KUBE_LOG_LEVEL="--v=4" + +# --etcd-servers=[]: List of etcd servers to watch (http://ip:port), +# comma separated. Mutually exclusive with -etcd-config +KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}" + +# --address=127.0.0.1: DEPRECATED: see --insecure-bind-address instead +KUBE_API_ADDRESS="--address=${MASTER_ADDRESS}" + +# --port=8080: DEPRECATED: see --insecure-port instead +KUBE_API_PORT="--port=8080" + +# --kubelet-port=10250: Kubelet port +MINION_PORT="--kubelet-port=10250" + +# --allow-privileged=false: If true, allow privileged containers. +KUBE_ALLOW_PRIV="--allow-privileged=false" + +# --service-cluster-ip-range=: A CIDR notation IP range from which to assign service cluster IPs. +# This must not overlap with any IP ranges assigned to nodes for pods. +KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" + +# --admission-control="AlwaysAdmit": Ordered list of plug-ins +# to do admission control of resources into cluster. +# Comma-delimited list of: +# LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists, +# NamespaceLifecycle, NamespaceAutoProvision, DenyExecOnPrivileged, +# AlwaysAdmit, ServiceAccount, ResourceQuota +#KUBE_ADMISSION_CONTROL="--admission-control=\"${ADMISSION_CONTROL}\"" + +# --client-ca-file="": If set, any request presenting a client certificate signed +# by one of the authorities in the client-ca-file is authenticated with an identity +# corresponding to the CommonName of the client certificate. +KUBE_API_CLIENT_CA_FILE="--client-ca-file=/srv/kubernetes/ca.crt" + +# --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, +# concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file +# and --tls-private-key-file are not provided, a self-signed certificate and key are +# generated for the public address and saved to /var/run/kubernetes. +KUBE_API_TLS_CERT_FILE="--tls-cert-file=/srv/kubernetes/server.cert" + +# --tls-private-key-file="": File containing x509 private key matching --tls-cert-file. +KUBE_API_TLS_PRIVATE_KEY_FILE="--tls-private-key-file=/srv/kubernetes/server.key" +EOF + +KUBE_APISERVER_OPTS=" \${KUBE_LOGTOSTDERR} \\ + \${KUBE_LOG_LEVEL} \\ + \${KUBE_ETCD_SERVERS} \\ + \${KUBE_API_ADDRESS} \\ + \${KUBE_API_PORT} \\ + \${MINION_PORT} \\ + \${KUBE_ALLOW_PRIV} \\ + \${KUBE_SERVICE_ADDRESSES} \\ + \${KUBE_API_CLIENT_CA_FILE} \\ + \${KUBE_API_TLS_CERT_FILE} \\ + \${KUBE_API_TLS_PRIVATE_KEY_FILE}" + + +cat </usr/lib/systemd/system/kube-apiserver.service +[Unit] +Description=Kubernetes API Server +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver +ExecStart=/opt/kubernetes/bin/kube-apiserver ${KUBE_APISERVER_OPTS} +Restart=on-failure + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable kube-apiserver +systemctl start kube-apiserver diff --git a/cluster/centos/master/scripts/controller-manager.sh b/cluster/centos/master/scripts/controller-manager.sh new file mode 100755 index 00000000000..b6fb216c8de --- /dev/null +++ b/cluster/centos/master/scripts/controller-manager.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +MASTER_ADDRESS=${1:-"8.8.8.18"} + +cat </opt/kubernetes/cfg/kube-controller-manager +KUBE_LOGTOSTDERR="--logtostderr=true" +KUBE_LOG_LEVEL="--v=4" +KUBE_MASTER="--master=${MASTER_ADDRESS}:8080" + +# --root-ca-file="": If set, this root certificate authority will be included in +# service account's token secret. This must be a valid PEM-encoded CA bundle. +KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt" + +# --service-account-private-key-file="": Filename containing a PEM-encoded private +# RSA key used to sign service account tokens. +KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key" +EOF + +KUBE_CONTROLLER_MANAGER_OPTS=" \${KUBE_LOGTOSTDERR} \\ + \${KUBE_LOG_LEVEL} \\ + \${KUBE_MASTER} \\ + \${KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE} \\ + \${KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE}" + +cat </usr/lib/systemd/system/kube-controller-manager.service +[Unit] +Description=Kubernetes Controller Manager +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager +ExecStart=/opt/kubernetes/bin/kube-controller-manager ${KUBE_CONTROLLER_MANAGER_OPTS} +Restart=on-failure + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable kube-controller-manager +systemctl start kube-controller-manager diff --git a/cluster/centos/master/scripts/etcd.sh b/cluster/centos/master/scripts/etcd.sh new file mode 100755 index 00000000000..31c458538d8 --- /dev/null +++ b/cluster/centos/master/scripts/etcd.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +## Create etcd.conf, etcd.service, and start etcd service. + + +etcd_data_dir=/var/lib/etcd/ +mkdir -p ${etcd_data_dir} + +cat </opt/kubernetes/cfg/etcd.conf +# [member] +ETCD_NAME=default +ETCD_DATA_DIR="${etcd_data_dir}/default.etcd" +#ETCD_SNAPSHOT_COUNTER="10000" +#ETCD_HEARTBEAT_INTERVAL="100" +#ETCD_ELECTION_TIMEOUT="1000" +#ETCD_LISTEN_PEER_URLS="http://localhost:2380,http://localhost:7001" +ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:4001" +#ETCD_MAX_SNAPSHOTS="5" +#ETCD_MAX_WALS="5" +#ETCD_CORS="" +# +#[cluster] +#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380,http://localhost:7001" +# if you use different ETCD_NAME (e.g. test), +# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." +#ETCD_INITIAL_CLUSTER="default=http://localhost:2380,default=http://localhost:7001" +#ETCD_INITIAL_CLUSTER_STATE="new" +#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" +ETCD_ADVERTISE_CLIENT_URLS="http://localhost:2379,http://localhost:4001" +#ETCD_DISCOVERY="" +#ETCD_DISCOVERY_SRV="" +#ETCD_DISCOVERY_FALLBACK="proxy" +#ETCD_DISCOVERY_PROXY="" +# +#[proxy] +#ETCD_PROXY="off" +# +#[security] +#ETCD_CA_FILE="" +#ETCD_CERT_FILE="" +#ETCD_KEY_FILE="" +#ETCD_PEER_CA_FILE="" +#ETCD_PEER_CERT_FILE="" +#ETCD_PEER_KEY_FILE="" +EOF + +cat <//usr/lib/systemd/system/etcd.service +[Unit] +Description=Etcd Server +After=network.target + +[Service] +Type=simple +WorkingDirectory=${etcd_data_dir} +EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf +# set GOMAXPROCS to number of processors +ExecStart=/bin/bash -c "GOMAXPROCS=\$(nproc) /opt/kubernetes/bin/etcd" + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable etcd +systemctl start etcd diff --git a/cluster/centos/master/scripts/scheduler.sh b/cluster/centos/master/scripts/scheduler.sh new file mode 100755 index 00000000000..beafd8c9278 --- /dev/null +++ b/cluster/centos/master/scripts/scheduler.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +MASTER_ADDRESS=${1:-"8.8.8.18"} + +cat </opt/kubernetes/cfg/kube-scheduler +### +# kubernetes scheduler config + +# --logtostderr=true: log to standard error instead of files +KUBE_LOGTOSTDERR="--logtostderr=true" + +# --v=0: log level for V logs +KUBE_LOG_LEVEL="--v=4" + +KUBE_MASTER="--master=${MASTER_ADDRESS}:8080" + +# Add your own! +KUBE_SCHEDULER_ARGS="" + +EOF + +KUBE_SCHEDULER_OPTS=" \${KUBE_LOGTOSTDERR} \\ + \${KUBE_LOG_LEVEL} \\ + \${KUBE_MASTER} \\ + \${KUBE_SCHEDULER_ARGS}" + +cat </usr/lib/systemd/system/kube-scheduler.service +[Unit] +Description=Kubernetes Scheduler +Documentation=https://github.com/GoogleCloudPlatform/kubernetes + +[Service] +EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler +ExecStart=/opt/kubernetes/bin/kube-scheduler ${KUBE_SCHEDULER_OPTS} +Restart=on-failure + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable kube-scheduler +systemctl start kube-scheduler diff --git a/cluster/centos/node/bin/mk-docker-opts.sh b/cluster/centos/node/bin/mk-docker-opts.sh new file mode 100755 index 00000000000..f3c50531bda --- /dev/null +++ b/cluster/centos/node/bin/mk-docker-opts.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generate Docker daemon options based on flannel env file. + +# exit on any error +set -e + +usage() { + echo "$0 [-f FLANNEL-ENV-FILE] [-d DOCKER-ENV-FILE] [-i] [-c] [-m] [-k COMBINED-KEY] + +Generate Docker daemon options based on flannel env file +OPTIONS: + -f Path to flannel env file. Defaults to /run/flannel/subnet.env + -d Path to Docker env file to write to. Defaults to /run/docker_opts.env + -i Output each Docker option as individual var. e.g. DOCKER_OPT_MTU=1500 + -c Output combined Docker options into DOCKER_OPTS var + -k Set the combined options key to this value (default DOCKER_OPTS=) + -m Do not output --ip-masq (useful for older Docker version) +" >/dev/stderr + exit 1 +} + +flannel_env="/run/flannel/subnet.env" +docker_env="/run/docker_opts.env" +combined_opts_key="DOCKER_OPTS" +indiv_opts=false +combined_opts=false +ipmasq=true + +while getopts "f:d:ick:" opt; do + case $opt in + f) + flannel_env=$OPTARG + ;; + d) + docker_env=$OPTARG + ;; + i) + indiv_opts=true + ;; + c) + combined_opts=true + ;; + m) + ipmasq=false + ;; + k) + combined_opts_key=$OPTARG + ;; + \?) + usage + ;; + esac +done + +if [[ $indiv_opts = false ]] && [[ $combined_opts = false ]]; then + indiv_opts=true + combined_opts=true +fi + +if [[ -f "$flannel_env" ]]; then + source $flannel_env +fi + +if [[ -n "$FLANNEL_SUBNET" ]]; then + DOCKER_OPT_BIP="--bip=$FLANNEL_SUBNET" +fi + +if [[ -n "$FLANNEL_MTU" ]]; then + DOCKER_OPT_MTU="--mtu=$FLANNEL_MTU" +fi + +if [[ "$FLANNEL_IPMASQ" = true ]] && [[ $ipmasq = true ]]; then + DOCKER_OPT_IPMASQ="--ip-masq=false" +fi + +eval docker_opts="\$${combined_opts_key}" +docker_opts+=" " + +echo -n "" >$docker_env +for opt in $(compgen -v DOCKER_OPT_); do + eval val=\$$opt + + if [[ "$indiv_opts" = true ]]; then + echo "$opt=\"$val\"" >>$docker_env + fi + + docker_opts+="$val " +done + +if [[ "$combined_opts" = true ]]; then + echo "${combined_opts_key}=\"${docker_opts}\"" >>$docker_env +fi + diff --git a/cluster/centos/node/bin/remove-docker0.sh b/cluster/centos/node/bin/remove-docker0.sh new file mode 100755 index 00000000000..31a90c50c79 --- /dev/null +++ b/cluster/centos/node/bin/remove-docker0.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Delete default docker bridge, so that docker can start with flannel network. + +# exit on any error +set -e + +rc=0 +ip link show docker0 >/dev/null 2>&1 || rc="$?" +if [[ "$rc" -eq "0" ]]; then + ip link set dev docker0 down + ip link delete docker0 +fi diff --git a/cluster/centos/node/scripts/docker.sh b/cluster/centos/node/scripts/docker.sh new file mode 100755 index 00000000000..5d1b6ebfd36 --- /dev/null +++ b/cluster/centos/node/scripts/docker.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +DOCKER_OPTS=${1:-""} + +DOCKER_CONFIG=/opt/kubernetes/cfg/docker + +cat <$DOCKER_CONFIG +DOCKER_OPTS="-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock -s devicemapper --selinux-enabled=false ${DOCKER_OPTS}" +EOF + +cat </usr/lib/systemd/system/docker.service +[Unit] +Description=Docker Application Container Engine +Documentation=http://docs.docker.com +After=network.target flannel.service +Requires=flannel.service + +[Service] +Type=notify +EnvironmentFile=-/run/flannel/docker +EnvironmentFile=-/opt/kubernetes/cfg/docker +WorkingDirectory=/opt/kubernetes/bin +ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh +ExecStart=/opt/kubernetes/bin/docker daemon \$DOCKER_OPT_BIP \$DOCKER_OPT_MTU \$DOCKER_OPTS +LimitNOFILE=1048576 +LimitNPROC=1048576 + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable docker +systemctl start docker diff --git a/cluster/centos/node/scripts/flannel.sh b/cluster/centos/node/scripts/flannel.sh new file mode 100755 index 00000000000..c19480d93a3 --- /dev/null +++ b/cluster/centos/node/scripts/flannel.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +ETCD_SERVERS=${1:-"http://8.8.8.18:4001"} +FLANNEL_NET=${2:-"172.16.0.0/16"} + + +cat </opt/kubernetes/cfg/flannel +FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}" +FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network" +EOF + +cat </usr/lib/systemd/system/flannel.service +[Unit] +Description=Flanneld overlay address etcd agent +After=network.target +Before=docker.service + +[Service] +EnvironmentFile=-/opt/kubernetes/cfg/flannel +ExecStart=/opt/kubernetes/bin/flanneld \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY} +ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker + +Type=notify + +[Install] +WantedBy=multi-user.target +RequiredBy=docker.service +EOF + +# Store FLANNEL_NET to etcd. +attempt=0 +while true; do + /opt/kubernetes/bin/etcdctl --no-sync -C ${ETCD_SERVERS} \ + get /coreos.com/network/config >/dev/null 2>&1 + if [[ "$?" == 0 ]]; then + break + else + if (( attempt > 600 )); then + echo "timeout for waiting network config" > ~/kube/err.log + exit 2 + fi + + /opt/kubernetes/bin/etcdctl --no-sync -C ${ETCD_SERVERS} \ + mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1 + attempt=$((attempt+1)) + sleep 3 + fi +done +wait + +systemctl daemon-reload \ No newline at end of file diff --git a/cluster/centos/node/scripts/kubelet.sh b/cluster/centos/node/scripts/kubelet.sh new file mode 100755 index 00000000000..c186c36939e --- /dev/null +++ b/cluster/centos/node/scripts/kubelet.sh @@ -0,0 +1,75 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +MASTER_ADDRESS=${1:-"8.8.8.18"} +NODE_ADDRESS=${2:-"8.8.8.20"} + + +cat </opt/kubernetes/cfg/kubelet +# --logtostderr=true: log to standard error instead of files +KUBE_LOGTOSTDERR="--logtostderr=true" + +# --v=0: log level for V logs +KUBE_LOG_LEVEL="--v=4" + +# --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces) +MINION_ADDRESS="--address=${NODE_ADDRESS}" + +# --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag. +MINION_PORT="--port=10250" + +# --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. +MINION_HOSTNAME="--hostname-override=${NODE_ADDRESS}" + +# --api-servers=[]: List of Kubernetes API servers for publishing events, +# and reading pods and services. (ip:port), comma separated. +KUBELET_API_SERVER="--api-servers=${MASTER_ADDRESS}:8080" + +# --allow-privileged=false: If true, allow containers to request privileged mode. [default=false] +KUBE_ALLOW_PRIV="--allow-privileged=false" + +# Add your own! +KUBELET_ARGS="" +EOF + +KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ + \${KUBE_LOG_LEVEL} \\ + \${MINION_ADDRESS} \\ + \${MINION_PORT} \\ + \${MINION_HOSTNAME} \\ + \${KUBELET_API_SERVER} \\ + \${KUBE_ALLOW_PRIV} \\ + \${KUBELET_ARGS}" + +cat </usr/lib/systemd/system/kubelet.service +[Unit] +Description=Kubernetes Kubelet +After=docker.service +Requires=docker.service + +[Service] +EnvironmentFile=-/opt/kubernetes/cfg/kubelet +ExecStart=/opt/kubernetes/bin/kubelet ${KUBE_PROXY_OPTS} +Restart=on-failure + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable kubelet +systemctl start kubelet \ No newline at end of file diff --git a/cluster/centos/node/scripts/proxy.sh b/cluster/centos/node/scripts/proxy.sh new file mode 100755 index 00000000000..5e4181f2c1b --- /dev/null +++ b/cluster/centos/node/scripts/proxy.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +MASTER_ADDRESS=${1:-"8.8.8.18"} + +cat </opt/kubernetes/cfg/kube-proxy +# --logtostderr=true: log to standard error instead of files +KUBE_LOGTOSTDERR="--logtostderr=true" + +# --v=0: log level for V logs +KUBE_LOG_LEVEL="--v=4" + +# --master="": The address of the Kubernetes API server (overrides any value in kubeconfig) +KUBE_MASTER="--master=http://${MASTER_ADDRESS}:8080" +EOF + +KUBE_PROXY_OPTS=" \${KUBE_LOGTOSTDERR} \\ + \${KUBE_LOG_LEVEL} \\ + \${KUBE_MASTER}" + +cat </usr/lib/systemd/system/kube-proxy.service +[Unit] +Description=Kubernetes Proxy +After=network.target + +[Service] +EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy +ExecStart=/opt/kubernetes/bin/kube-proxy ${KUBE_PROXY_OPTS} +Restart=on-failure + +[Install] +WantedBy=multi-user.target +EOF + +systemctl daemon-reload +systemctl enable kube-proxy +systemctl start kube-proxy \ No newline at end of file diff --git a/cluster/centos/util.sh b/cluster/centos/util.sh new file mode 100755 index 00000000000..9242d9b1904 --- /dev/null +++ b/cluster/centos/util.sh @@ -0,0 +1,315 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts. + +# exit on any error +set -e + +SSH_OPTS="-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oLogLevel=ERROR" + +# Use the config file specified in $KUBE_CONFIG_FILE, or default to +# config-default.sh. +KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. +readonly ROOT=$(dirname "${BASH_SOURCE}") +source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}" +source "$KUBE_ROOT/cluster/common.sh" + + +KUBECTL_PATH=${KUBE_ROOT}/cluster/centos/binaries/kubectl + +# Directory to be used for master and minion provisioning. +KUBE_TEMP="~/kube_temp" + + +# Must ensure that the following ENV vars are set +function detect-master() { + KUBE_MASTER=$MASTER + KUBE_MASTER_IP=${MASTER#*@} + echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 + echo "KUBE_MASTER: ${MASTER}" 1>&2 +} + +# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[] +function detect-minions() { + KUBE_MINION_IP_ADDRESSES=() + for minion in ${MINIONS}; do + KUBE_MINION_IP_ADDRESSES+=("${minion#*@}") + done + echo "KUBE_MINION_IP_ADDRESSES: [${KUBE_MINION_IP_ADDRESSES[*]}]" 1>&2 +} + +# Verify prereqs on host machine +function verify-prereqs() { + local rc + rc=0 + ssh-add -L 1> /dev/null 2> /dev/null || rc="$?" + # "Could not open a connection to your authentication agent." + if [[ "${rc}" -eq 2 ]]; then + eval "$(ssh-agent)" > /dev/null + trap-add "kill ${SSH_AGENT_PID}" EXIT + fi + rc=0 + ssh-add -L 1> /dev/null 2> /dev/null || rc="$?" + # "The agent has no identities." + if [[ "${rc}" -eq 1 ]]; then + # Try adding one of the default identities, with or without passphrase. + ssh-add || true + fi + rc=0 + # Expect at least one identity to be available. + if ! ssh-add -L 1> /dev/null 2> /dev/null; then + echo "Could not find or add an SSH identity." + echo "Please start ssh-agent, add your identity, and retry." + exit 1 + fi +} + +# Install handler for signal trap +function trap-add { + local handler="$1" + local signal="${2-EXIT}" + local cur + + cur="$(eval "sh -c 'echo \$3' -- $(trap -p ${signal})")" + if [[ -n "${cur}" ]]; then + handler="${cur}; ${handler}" + fi + + trap "${handler}" ${signal} +} + +# Validate a kubernetes cluster +function validate-cluster() { + # by default call the generic validate-cluster.sh script, customizable by + # any cluster provider if this does not fit. + "${KUBE_ROOT}/cluster/validate-cluster.sh" +} + +# Instantiate a kubernetes cluster +function kube-up() { + provision-master + + for minion in ${MINIONS}; do + provision-minion ${minion} + done + + verify-master + for minion in ${MINIONS}; do + verify-minion ${minion} + done + + detect-master + + # set CONTEXT and KUBE_SERVER values for create-kubeconfig() and get-password() + export CONTEXT="centos" + export KUBE_SERVER="http://${KUBE_MASTER_IP}:8080" + source "${KUBE_ROOT}/cluster/common.sh" + + # set kubernetes user and password + get-password + create-kubeconfig +} + +# Delete a kubernetes cluster +function kube-down() { + tear-down-master + for minion in ${MINIONS}; do + tear-down-minion ${minion} + done +} + + +function verify-master() { + # verify master has all required daemons + printf "[INFO] Validating master ${MASTER}" + local -a required_daemon=("kube-apiserver" "kube-controller-manager" "kube-scheduler") + local validated="1" + local try_count=0 + until [[ "$validated" == "0" ]]; do + validated="0" + local daemon + for daemon in "${required_daemon[@]}"; do + local rc=0 + kube-ssh "${MASTER}" "sudo pgrep -f ${daemon}" >/dev/null 2>&1 || rc="$?" + if [[ "${rc}" -ne "0" ]]; then + printf "." + validated="1" + ((try_count=try_count+2)) + if [[ ${try_count} -gt ${PROCESS_CHECK_TIMEOUT} ]]; then + printf "\nWarning: Process \"${daemon}\" failed to run on ${MASTER}, please check.\n" + exit 1 + fi + sleep 2 + fi + done + done + printf "\n" + +} + +function verify-minion() { + # verify minion has all required daemons + printf "[INFO] Validating minion ${1}" + local -a required_daemon=("kube-proxy" "kubelet" "docker") + local validated="1" + local try_count=0 + until [[ "$validated" == "0" ]]; do + validated="0" + local daemon + for daemon in "${required_daemon[@]}"; do + local rc=0 + kube-ssh "${1}" "sudo pgrep -f ${daemon}" >/dev/null 2>&1 || rc="$?" + if [[ "${rc}" -ne "0" ]]; then + printf "." + validated="1" + ((try_count=try_count+2)) + if [[ ${try_count} -gt ${PROCESS_CHECK_TIMEOUT} ]] ; then + printf "\nWarning: Process \"${daemon}\" failed to run on ${1}, please check.\n" + exit 1 + fi + sleep 2 + fi + done + done + printf "\n" +} + +# Clean up on master +function tear-down-master() { +echo "[INFO] tear-down-master on ${MASTER}" + for service_name in etcd kube-apiserver kube-controller-manager kube-scheduler ; do + service_file="/usr/lib/systemd/system/${service_name}.service" + kube-ssh "$MASTER" " \ + if [[ -f $service_file ]]; then \ + sudo systemctl stop $service_name; \ + sudo systemctl disable $service_name; \ + sudo rm -f $service_file; \ + fi" + done + kube-ssh "${MASTER}" "sudo rm -rf /opt/kubernetes" + kube-ssh "${MASTER}" "sudo rm -rf ${KUBE_TEMP}" + kube-ssh "${MASTER}" "sudo rm -rf /var/lib/etcd" +} + +# Clean up on minion +function tear-down-minion() { +echo "[INFO] tear-down-minion on $1" + for service_name in kube-proxy kubelet docker flannel ; do + service_file="/usr/lib/systemd/system/${service_name}.service" + kube-ssh "$1" " \ + if [[ -f $service_file ]]; then \ + sudo systemctl stop $service_name; \ + sudo systemctl disable $service_name; \ + sudo rm -f $service_file; \ + fi" + done + kube-ssh "$1" "sudo rm -rf /run/flannel" + kube-ssh "$1" "sudo rm -rf /opt/kubernetes" + kube-ssh "$1" "sudo rm -rf ${KUBE_TEMP}" +} + +# Provision master +# +# Assumed vars: +# MASTER +# KUBE_TEMP +# ETCD_SERVERS +# SERVICE_CLUSTER_IP_RANGE +function provision-master() { + echo "[INFO] Provision master on ${MASTER}" + local master_ip=${MASTER#*@} + ensure-setup-dir ${MASTER} + + # scp -r ${SSH_OPTS} master config-default.sh copy-files.sh util.sh "${MASTER}:${KUBE_TEMP}" + kube-scp ${MASTER} "${ROOT}/../saltbase/salt/generate-cert/make-ca-cert.sh ${ROOT}/binaries/master ${ROOT}/master ${ROOT}/config-default.sh ${ROOT}/util.sh" "${KUBE_TEMP}" + kube-ssh "${MASTER}" " \ + sudo cp -r ${KUBE_TEMP}/master/bin /opt/kubernetes; \ + sudo chmod -R +x /opt/kubernetes/bin; \ + sudo bash ${KUBE_TEMP}/make-ca-cert.sh ${master_ip} IP:${master_ip},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \ + sudo bash ${KUBE_TEMP}/master/scripts/etcd.sh; \ + sudo bash ${KUBE_TEMP}/master/scripts/apiserver.sh ${master_ip} ${ETCD_SERVERS} ${SERVICE_CLUSTER_IP_RANGE} ${ADMISSION_CONTROL}; \ + sudo bash ${KUBE_TEMP}/master/scripts/controller-manager.sh ${master_ip}; \ + sudo bash ${KUBE_TEMP}/master/scripts/scheduler.sh ${master_ip}" +} + + +# Provision minion +# +# Assumed vars: +# $1 (minion) +# MASTER +# KUBE_TEMP +# ETCD_SERVERS +# FLANNEL_NET +# DOCKER_OPTS +function provision-minion() { + echo "[INFO] Provision minion on $1" + local master_ip=${MASTER#*@} + local minion=$1 + local minion_ip=${minion#*@} + ensure-setup-dir ${minion} + + # scp -r ${SSH_OPTS} minion config-default.sh copy-files.sh util.sh "${minion_ip}:${KUBE_TEMP}" + kube-scp ${minion} "${ROOT}/binaries/node ${ROOT}/node ${ROOT}/config-default.sh ${ROOT}/util.sh" ${KUBE_TEMP} + kube-ssh "${minion}" " \ + sudo cp -r ${KUBE_TEMP}/node/bin /opt/kubernetes; \ + sudo chmod -R +x /opt/kubernetes/bin; \ + sudo bash ${KUBE_TEMP}/node/scripts/flannel.sh ${ETCD_SERVERS} ${FLANNEL_NET}; \ + sudo bash ${KUBE_TEMP}/node/scripts/docker.sh \"${DOCKER_OPTS}\"; \ + sudo bash ${KUBE_TEMP}/node/scripts/kubelet.sh ${master_ip} ${minion_ip}; \ + sudo bash ${KUBE_TEMP}/node/scripts/proxy.sh ${master_ip}" +} + +# Create dirs that'll be used during setup on target machine. +# +# Assumed vars: +# KUBE_TEMP +function ensure-setup-dir() { + kube-ssh "${1}" "mkdir -p ${KUBE_TEMP}; \ + sudo mkdir -p /opt/kubernetes/bin; \ + sudo mkdir -p /opt/kubernetes/cfg" +} + +# Run command over ssh +function kube-ssh() { + local host="$1" + shift + ssh ${SSH_OPTS} -t "${host}" "$@" >/dev/null 2>&1 +} + +# Copy file recursively over ssh +function kube-scp() { + local host="$1" + local src=($2) + local dst="$3" + scp -r ${SSH_OPTS} ${src[*]} "${host}:${dst}" +} + +# Ensure that we have a password created for validating to the master. Will +# read from kubeconfig if available. +# +# Vars set: +# KUBE_USER +# KUBE_PASSWORD +function get-password { + get-kubeconfig-basicauth + if [[ -z "${KUBE_USER}" || -z "${KUBE_PASSWORD}" ]]; then + KUBE_USER=admin + KUBE_PASSWORD=$(python -c 'import string,random; \ + print "".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16))') + fi +} diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index ac537bf52f1..a45477a3e7d 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -58,7 +58,7 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}" KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL" -APISERVER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" +APISERVER_TEST_ARGS="--runtime-config=experimental/v1 ${TEST_CLUSTER_LOG_LEVEL}" CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" diff --git a/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml b/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml index caf90526c26..c2105b29476 100644 --- a/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml +++ b/cluster/saltbase/salt/fluentd-es/fluentd-es.yaml @@ -11,7 +11,7 @@ spec: limits: cpu: 100m args: - - -qq + - -q volumeMounts: - name: varlog mountPath: /var/log diff --git a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml index 6eae4c8f699..a7749b75801 100644 --- a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml +++ b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml @@ -19,6 +19,7 @@ spec: mountPath: /varlog - name: containers mountPath: /var/lib/docker/containers + readOnly: true terminationGracePeriodSeconds: 30 volumes: - name: varlog diff --git a/cmd/genconversion/conversion.go b/cmd/genconversion/conversion.go index 69dfbd08a2b..b89ecd5b5ce 100644 --- a/cmd/genconversion/conversion.go +++ b/cmd/genconversion/conversion.go @@ -30,7 +30,7 @@ import ( _ "k8s.io/kubernetes/pkg/expapi" _ "k8s.io/kubernetes/pkg/expapi/v1" pkg_runtime "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -84,7 +84,7 @@ func main() { glog.Errorf("error while generating conversion functions for %v: %v", knownType, err) } } - generator.RepackImports(util.NewStringSet()) + generator.RepackImports(sets.NewString()) if err := generator.WriteImports(data); err != nil { glog.Fatalf("error while writing imports: %v", err) } diff --git a/cmd/gendeepcopy/deep_copy.go b/cmd/gendeepcopy/deep_copy.go index 7c10aa9fd5c..7cfb05d9332 100644 --- a/cmd/gendeepcopy/deep_copy.go +++ b/cmd/gendeepcopy/deep_copy.go @@ -30,7 +30,7 @@ import ( _ "k8s.io/kubernetes/pkg/expapi" _ "k8s.io/kubernetes/pkg/expapi/v1" pkg_runtime "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -80,7 +80,7 @@ func main() { } versionPath := path.Join(pkgBase, group, version) - generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, util.NewStringSet("k8s.io/kubernetes")) + generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), versionPath, sets.NewString("k8s.io/kubernetes")) generator.AddImport(path.Join(pkgBase, "api")) if len(*overwrites) > 0 { diff --git a/cmd/integration/integration.go b/cmd/integration/integration.go index 1acd1a8cc8d..82d9357243b 100644 --- a/cmd/integration/integration.go +++ b/cmd/integration/integration.go @@ -55,6 +55,7 @@ import ( "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/volume/empty_dir" "k8s.io/kubernetes/plugin/pkg/admission/admit" @@ -204,7 +205,29 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string configFilePath := makeTempDirOrDie("config", testRootDir) glog.Infof("Using %s as root dir for kubelet #1", testRootDir) fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.15"} - kcfg := kubeletapp.SimpleKubelet(cl, &fakeDocker1, "localhost", testRootDir, firstManifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, kubecontainer.FakeOS{}) + + kcfg := kubeletapp.SimpleKubelet( + cl, + &fakeDocker1, + "localhost", + testRootDir, + firstManifestURL, + "127.0.0.1", + 10250, /* KubeletPort */ + 0, /* ReadOnlyPort */ + api.NamespaceDefault, + empty_dir.ProbeVolumePlugins(), + nil, + cadvisorInterface, + configFilePath, + nil, + kubecontainer.FakeOS{}, + 1*time.Second, /* FileCheckFrequency */ + 1*time.Second, /* HTTPCheckFrequency */ + 10*time.Second, /* MinimumGCAge */ + 3*time.Second, /* NodeStatusUpdateFrequency */ + 10*time.Second /* SyncFrequency */) + kubeletapp.RunKubelet(kcfg, nil) // Kubelet (machine) // Create a second kubelet so that the guestbook example's two redis slaves both @@ -212,7 +235,29 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string testRootDir = makeTempDirOrDie("kubelet_integ_2.", "") glog.Infof("Using %s as root dir for kubelet #2", testRootDir) fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.15"} - kcfg = kubeletapp.SimpleKubelet(cl, &fakeDocker2, "127.0.0.1", testRootDir, secondManifestURL, "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.FakeOS{}) + + kcfg = kubeletapp.SimpleKubelet( + cl, + &fakeDocker2, + "127.0.0.1", + testRootDir, + secondManifestURL, + "127.0.0.1", + 10251, /* KubeletPort */ + 0, /* ReadOnlyPort */ + api.NamespaceDefault, + empty_dir.ProbeVolumePlugins(), + nil, + cadvisorInterface, + "", + nil, + kubecontainer.FakeOS{}, + 1*time.Second, /* FileCheckFrequency */ + 1*time.Second, /* HTTPCheckFrequency */ + 10*time.Second, /* MinimumGCAge */ + 3*time.Second, /* NodeStatusUpdateFrequency */ + 10*time.Second /* SyncFrequency */) + kubeletapp.RunKubelet(kcfg, nil) return apiServer.URL, configFilePath } @@ -694,7 +739,7 @@ func runMasterServiceTest(client *client.Client) { glog.Fatalf("unexpected error listing services: %v", err) } var foundRW bool - found := util.StringSet{} + found := sets.String{} for i := range svcList.Items { found.Insert(svcList.Items[i].Name) if svcList.Items[i].Name == "kubernetes" { @@ -820,7 +865,7 @@ func runServiceTest(client *client.Client) { if err != nil { glog.Fatalf("Failed to list services across namespaces: %v", err) } - names := util.NewStringSet() + names := sets.NewString() for _, svc := range svcList.Items { names.Insert(fmt.Sprintf("%s/%s", svc.Namespace, svc.Name)) } @@ -967,7 +1012,7 @@ func main() { // Check that kubelet tried to make the containers. // Using a set to list unique creation attempts. Our fake is // really stupid, so kubelet tries to create these multiple times. - createdConts := util.StringSet{} + createdConts := sets.String{} for _, p := range fakeDocker1.Created { // The last 8 characters are random, so slice them off. if n := len(p); n > 8 { diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index f4d5cacdabf..5a18f73ace7 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -57,7 +57,7 @@ const ( // Set to a value larger than the timeouts in each watch server. ReadWriteTimeout = time.Minute * 60 //TODO: This can be tightened up. It still matches objects named watch or proxy. - defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs|portforward|exec)/?$)" + defaultLongRunningRequestRE = "(/|^)((watch|proxy)(/|$)|(logs?|portforward|exec|attach)/?$)" ) // APIServer runs a kubernetes api server. @@ -499,23 +499,24 @@ func (s *APIServer) Run(_ []string) error { } glog.Infof("Serving securely on %s", secureLocation) + if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { + s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") + s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") + // TODO (cjcullen): Is PublicAddress the right address to sign a cert with? + alternateIPs := []net.IP{config.ServiceReadWriteIP} + alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"} + // It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless + // alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME") + if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil { + glog.Errorf("Unable to generate self signed cert: %v", err) + } else { + glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) + } + } + go func() { defer util.HandleCrash() for { - if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" { - s.TLSCertFile = path.Join(s.CertDirectory, "apiserver.crt") - s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "apiserver.key") - // TODO (cjcullen): Is PublicAddress the right address to sign a cert with? - alternateIPs := []net.IP{config.ServiceReadWriteIP} - alternateDNS := []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"} - // It would be nice to set a fqdn subject alt name, but only the kubelets know, the apiserver is clueless - // alternateDNS = append(alternateDNS, "kubernetes.default.svc.CLUSTER.DNS.NAME") - if err := util.GenerateSelfSignedCert(config.PublicAddress.String(), s.TLSCertFile, s.TLSPrivateKeyFile, alternateIPs, alternateDNS); err != nil { - glog.Errorf("Unable to generate self signed cert: %v", err) - } else { - glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile) - } - } // err == systemd.SdNotifyNoSocket when not running on a systemd system if err := systemd.SdNotify("READY=1\n"); err != nil && err != systemd.SdNotifyNoSocket { glog.Errorf("Unable to send systemd daemon successful start message: %v\n", err) diff --git a/cmd/kube-apiserver/app/server_test.go b/cmd/kube-apiserver/app/server_test.go index 04ab483c5a6..c6bca513293 100644 --- a/cmd/kube-apiserver/app/server_test.go +++ b/cmd/kube-apiserver/app/server_test.go @@ -38,12 +38,16 @@ func TestLongRunningRequestRegexp(t *testing.T) { "/api/v1/watch/stuff", "/api/v1/default/service/proxy", "/api/v1/pods/proxy/path/to/thing", + "/api/v1/namespaces/myns/pods/mypod/log", "/api/v1/namespaces/myns/pods/mypod/logs", "/api/v1/namespaces/myns/pods/mypod/portforward", "/api/v1/namespaces/myns/pods/mypod/exec", + "/api/v1/namespaces/myns/pods/mypod/attach", + "/api/v1/namespaces/myns/pods/mypod/log/", "/api/v1/namespaces/myns/pods/mypod/logs/", "/api/v1/namespaces/myns/pods/mypod/portforward/", "/api/v1/namespaces/myns/pods/mypod/exec/", + "/api/v1/namespaces/myns/pods/mypod/attach/", "/api/v1/watch/namespaces/myns/pods", } for _, path := range dontMatch { diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 30d7fba666f..7a7e7aecd63 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -242,9 +242,16 @@ func (s *CMServer) Run(_ []string) error { resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient) resourceQuotaController.Run(s.ResourceQuotaSyncPeriod) - namespaceController := namespacecontroller.NewNamespaceController(kubeClient, s.NamespaceSyncPeriod) + // An OR of all flags to enable/disable experimental features + experimentalMode := s.EnableHorizontalPodAutoscaler + namespaceController := namespacecontroller.NewNamespaceController(kubeClient, experimentalMode, s.NamespaceSyncPeriod) namespaceController.Run() + if s.EnableHorizontalPodAutoscaler { + horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient)) + horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod) + } + pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) pvclaimBinder.Run() pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)) @@ -287,15 +294,5 @@ func (s *CMServer) Run(_ []string) error { serviceaccount.DefaultServiceAccountsControllerOptions(), ).Run() - if s.EnableHorizontalPodAutoscaler { - expClient, err := client.NewExperimental(kubeconfig) - if err != nil { - glog.Fatalf("Invalid API configuration: %v", err) - } - horizontalPodAutoscalerController := autoscalercontroller.New(kubeClient, expClient, - metrics.NewHeapsterMetricsClient(kubeClient)) - horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod) - } - select {} } diff --git a/cmd/kube-proxy/app/server.go b/cmd/kube-proxy/app/server.go index 1036d118623..a7f98813fa3 100644 --- a/cmd/kube-proxy/app/server.go +++ b/cmd/kube-proxy/app/server.go @@ -165,12 +165,17 @@ func (s *ProxyServer) Run(_ []string) error { var proxier proxy.ProxyProvider var endpointsHandler config.EndpointsConfigHandler - // guaranteed false on error, error only necessary for debugging - shouldUseIptables, err := iptables.ShouldUseIptablesProxier() - if err != nil { - glog.Errorf("Can't determine whether to use iptables or userspace, using userspace proxier: %v", err) + shouldUseIptables := false + if !s.ForceUserspaceProxy { + var err error + // guaranteed false on error, error only necessary for debugging + shouldUseIptables, err = iptables.ShouldUseIptablesProxier() + if err != nil { + glog.Errorf("Can't determine whether to use iptables proxy, using userspace proxier: %v", err) + } } - if !s.ForceUserspaceProxy && shouldUseIptables { + + if shouldUseIptables { glog.V(2).Info("Using iptables Proxier.") execer := exec.New() diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 3eb671c148f..225d756f26a 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -66,73 +66,75 @@ const defaultRootDir = "/var/lib/kubelet" // KubeletServer encapsulates all of the parameters necessary for starting up // a kubelet. These can either be set via command line or directly. type KubeletServer struct { - Config string - SyncFrequency time.Duration - FileCheckFrequency time.Duration - HTTPCheckFrequency time.Duration - ManifestURL string - ManifestURLHeader string - EnableServer bool Address net.IP - Port uint - ReadOnlyPort uint - HostnameOverride string - PodInfraContainerImage string - DockerEndpoint string - RootDirectory string AllowPrivileged bool - HostNetworkSources string - RegistryPullQPS float64 - RegistryBurst int - RunOnce bool - EnableDebuggingHandlers bool - MinimumGCAge time.Duration - MaxPerPodContainerCount int - MaxContainerCount int - AuthPath util.StringFlag // Deprecated -- use KubeConfig instead - KubeConfig util.StringFlag - CadvisorPort uint - HealthzPort int - HealthzBindAddress net.IP - OOMScoreAdj int APIServerList []string - RegisterNode bool - StandaloneMode bool - ClusterDomain string - MasterServiceNamespace string + AuthPath util.StringFlag // Deprecated -- use KubeConfig instead + CadvisorPort uint + CertDirectory string + CgroupRoot string + CloudConfigFile string + CloudProvider string ClusterDNS net.IP - StreamingConnectionIdleTimeout time.Duration + ClusterDomain string + Config string + ConfigureCBR0 bool + ContainerRuntime string + CPUCFSQuota bool + DockerDaemonContainer string + DockerEndpoint string + DockerExecHandlerName string + EnableDebuggingHandlers bool + EnableServer bool + EventBurst int + EventRecordQPS float32 + FileCheckFrequency time.Duration + HealthzBindAddress net.IP + HealthzPort int + HostnameOverride string + HostNetworkSources string + HTTPCheckFrequency time.Duration ImageGCHighThresholdPercent int ImageGCLowThresholdPercent int + KubeConfig util.StringFlag LowDiskSpaceThresholdMB int - NetworkPluginName string + ManifestURL string + ManifestURLHeader string + MasterServiceNamespace string + MaxContainerCount int + MaxPerPodContainerCount int + MaxPods int + MinimumGCAge time.Duration NetworkPluginDir string - CloudProvider string - CloudConfigFile string + NetworkPluginName string + NodeStatusUpdateFrequency time.Duration + OOMScoreAdj int + PodCIDR string + PodInfraContainerImage string + Port uint + ReadOnlyPort uint + RegisterNode bool + RegistryBurst int + RegistryPullQPS float64 + ResolverConfig string + ResourceContainer string + RktPath string + RootDirectory string + RunOnce bool + StandaloneMode bool + StreamingConnectionIdleTimeout time.Duration + SyncFrequency time.Duration + SystemContainer string TLSCertFile string TLSPrivateKeyFile string - CertDirectory string - NodeStatusUpdateFrequency time.Duration - ResourceContainer string - CgroupRoot string - ContainerRuntime string - RktPath string - DockerDaemonContainer string - SystemContainer string - ConfigureCBR0 bool - PodCIDR string - MaxPods int - DockerExecHandlerName string - ResolverConfig string - CPUCFSQuota bool - // Flags intended for testing - // Crash immediately, rather than eating panics. - ReallyCrashForTesting bool - // Insert a probability of random errors during calls to the master. - ChaosChance float64 + // Flags intended for testing // Is the kubelet containerized? Containerized bool + // Insert a probability of random errors during calls to the master. + ChaosChance float64 + // Crash immediately, rather than eating panics. + ReallyCrashForTesting bool } // bootstrapping interface for kubelet, targets the initialization protocol @@ -151,45 +153,45 @@ type KubeletBuilder func(kc *KubeletConfig) (KubeletBootstrap, *config.PodConfig // NewKubeletServer will create a new KubeletServer with default values. func NewKubeletServer() *KubeletServer { return &KubeletServer{ - SyncFrequency: 10 * time.Second, - FileCheckFrequency: 20 * time.Second, - HTTPCheckFrequency: 20 * time.Second, - EnableServer: true, Address: net.ParseIP("0.0.0.0"), - Port: ports.KubeletPort, - ReadOnlyPort: ports.KubeletReadOnlyPort, - PodInfraContainerImage: dockertools.PodInfraContainerImage, - RootDirectory: defaultRootDir, - RegistryBurst: 10, - EnableDebuggingHandlers: true, - MinimumGCAge: 1 * time.Minute, - MaxPerPodContainerCount: 2, - MaxContainerCount: 100, AuthPath: util.NewStringFlag("/var/lib/kubelet/kubernetes_auth"), // deprecated - KubeConfig: util.NewStringFlag("/var/lib/kubelet/kubeconfig"), CadvisorPort: 4194, - HealthzPort: 10248, + CertDirectory: "/var/run/kubernetes", + CgroupRoot: "", + ConfigureCBR0: false, + ContainerRuntime: "docker", + CPUCFSQuota: false, + DockerDaemonContainer: "/docker-daemon", + DockerExecHandlerName: "native", + EnableDebuggingHandlers: true, + EnableServer: true, + FileCheckFrequency: 20 * time.Second, HealthzBindAddress: net.ParseIP("127.0.0.1"), - RegisterNode: true, // will be ignored if no apiserver is configured - OOMScoreAdj: qos.KubeletOomScoreAdj, - MasterServiceNamespace: api.NamespaceDefault, + HealthzPort: 10248, + HostNetworkSources: kubelet.FileSource, + HTTPCheckFrequency: 20 * time.Second, ImageGCHighThresholdPercent: 90, ImageGCLowThresholdPercent: 80, + KubeConfig: util.NewStringFlag("/var/lib/kubelet/kubeconfig"), LowDiskSpaceThresholdMB: 256, - NetworkPluginName: "", + MasterServiceNamespace: api.NamespaceDefault, + MaxContainerCount: 100, + MaxPerPodContainerCount: 2, + MinimumGCAge: 1 * time.Minute, NetworkPluginDir: "/usr/libexec/kubernetes/kubelet-plugins/net/exec/", - HostNetworkSources: kubelet.FileSource, - CertDirectory: "/var/run/kubernetes", + NetworkPluginName: "", NodeStatusUpdateFrequency: 10 * time.Second, - ResourceContainer: "/kubelet", - CgroupRoot: "", - ContainerRuntime: "docker", - RktPath: "", - DockerDaemonContainer: "/docker-daemon", - SystemContainer: "", - ConfigureCBR0: false, - DockerExecHandlerName: "native", - CPUCFSQuota: false, + OOMScoreAdj: qos.KubeletOomScoreAdj, + PodInfraContainerImage: dockertools.PodInfraContainerImage, + Port: ports.KubeletPort, + ReadOnlyPort: ports.KubeletReadOnlyPort, + RegisterNode: true, // will be ignored if no apiserver is configured + RegistryBurst: 10, + ResourceContainer: "/kubelet", + RktPath: "", + RootDirectory: defaultRootDir, + SyncFrequency: 10 * time.Second, + SystemContainer: "", } } @@ -220,6 +222,8 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.HostNetworkSources, "host-network-sources", s.HostNetworkSources, "Comma-separated list of sources from which the Kubelet allows pods to use of host network. For all sources use \"*\" [default=\"file\"]") fs.Float64Var(&s.RegistryPullQPS, "registry-qps", s.RegistryPullQPS, "If > 0, limit registry pull QPS to this value. If 0, unlimited. [default=0.0]") fs.IntVar(&s.RegistryBurst, "registry-burst", s.RegistryBurst, "Maximum size of a bursty pulls, temporarily allows pulls to burst to this number, while still not exceeding registry-qps. Only used if --registry-qps > 0") + fs.Float32Var(&s.EventRecordQPS, "event-qps", s.EventRecordQPS, "If > 0, limit event creations per second to this value. If 0, unlimited. [default=0.0]") + fs.IntVar(&s.EventBurst, "event-burst", s.EventBurst, "Maximum size of a bursty event records, temporarily allows event records to burst to this number, while still not exceeding event-qps. Only used if --event-qps > 0") fs.BoolVar(&s.RunOnce, "runonce", s.RunOnce, "If true, exit after spawning pods from local manifests or remote urls. Exclusive with --api-servers, and --enable-server") fs.BoolVar(&s.EnableDebuggingHandlers, "enable-debugging-handlers", s.EnableDebuggingHandlers, "Enables server endpoints for log collection and local running of containers and commands") fs.DurationVar(&s.MinimumGCAge, "minimum-container-ttl-duration", s.MinimumGCAge, "Minimum age for a finished container before it is garbage collected. Examples: '300ms', '10s' or '2h45m'") @@ -313,58 +317,61 @@ func (s *KubeletServer) KubeletConfig() (*KubeletConfig, error) { } return &KubeletConfig{ - Address: s.Address, - AllowPrivileged: s.AllowPrivileged, - HostNetworkSources: hostNetworkSources, - HostnameOverride: s.HostnameOverride, - RootDirectory: s.RootDirectory, - ConfigFile: s.Config, - ManifestURL: s.ManifestURL, - ManifestURLHeader: manifestURLHeader, - FileCheckFrequency: s.FileCheckFrequency, - HTTPCheckFrequency: s.HTTPCheckFrequency, - PodInfraContainerImage: s.PodInfraContainerImage, - SyncFrequency: s.SyncFrequency, - RegistryPullQPS: s.RegistryPullQPS, - RegistryBurst: s.RegistryBurst, - MinimumGCAge: s.MinimumGCAge, - MaxPerPodContainerCount: s.MaxPerPodContainerCount, - MaxContainerCount: s.MaxContainerCount, - RegisterNode: s.RegisterNode, - StandaloneMode: (len(s.APIServerList) == 0), - ClusterDomain: s.ClusterDomain, - ClusterDNS: s.ClusterDNS, - Runonce: s.RunOnce, + Address: s.Address, + AllowPrivileged: s.AllowPrivileged, + CadvisorInterface: nil, // launches background processes, not set here + CgroupRoot: s.CgroupRoot, + Cloud: nil, // cloud provider might start background processes + ClusterDNS: s.ClusterDNS, + ClusterDomain: s.ClusterDomain, + ConfigFile: s.Config, + ConfigureCBR0: s.ConfigureCBR0, + ContainerRuntime: s.ContainerRuntime, + CPUCFSQuota: s.CPUCFSQuota, + DiskSpacePolicy: diskSpacePolicy, + DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), + DockerDaemonContainer: s.DockerDaemonContainer, + DockerExecHandler: dockerExecHandler, + EnableDebuggingHandlers: s.EnableDebuggingHandlers, + EnableServer: s.EnableServer, + EventBurst: s.EventBurst, + EventRecordQPS: s.EventRecordQPS, + FileCheckFrequency: s.FileCheckFrequency, + HostnameOverride: s.HostnameOverride, + HostNetworkSources: hostNetworkSources, + HTTPCheckFrequency: s.HTTPCheckFrequency, + ImageGCPolicy: imageGCPolicy, + KubeClient: nil, + ManifestURL: s.ManifestURL, + ManifestURLHeader: manifestURLHeader, + MasterServiceNamespace: s.MasterServiceNamespace, + MaxContainerCount: s.MaxContainerCount, + MaxPerPodContainerCount: s.MaxPerPodContainerCount, + MaxPods: s.MaxPods, + MinimumGCAge: s.MinimumGCAge, + Mounter: mounter, + NetworkPluginName: s.NetworkPluginName, + NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), + NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, + OSInterface: kubecontainer.RealOS{}, + PodCIDR: s.PodCIDR, + PodInfraContainerImage: s.PodInfraContainerImage, Port: s.Port, ReadOnlyPort: s.ReadOnlyPort, - CadvisorInterface: nil, // launches background processes, not set here - EnableServer: s.EnableServer, - EnableDebuggingHandlers: s.EnableDebuggingHandlers, - DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint), - KubeClient: nil, - MasterServiceNamespace: s.MasterServiceNamespace, - VolumePlugins: ProbeVolumePlugins(), - NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir), - NetworkPluginName: s.NetworkPluginName, + RegisterNode: s.RegisterNode, + RegistryBurst: s.RegistryBurst, + RegistryPullQPS: s.RegistryPullQPS, + ResolverConfig: s.ResolverConfig, + ResourceContainer: s.ResourceContainer, + RktPath: s.RktPath, + RootDirectory: s.RootDirectory, + Runonce: s.RunOnce, + StandaloneMode: (len(s.APIServerList) == 0), StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout, + SyncFrequency: s.SyncFrequency, + SystemContainer: s.SystemContainer, TLSOptions: tlsOptions, - ImageGCPolicy: imageGCPolicy, - DiskSpacePolicy: diskSpacePolicy, - Cloud: nil, // cloud provider might start background processes - NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency, - ResourceContainer: s.ResourceContainer, - CgroupRoot: s.CgroupRoot, - ContainerRuntime: s.ContainerRuntime, - RktPath: s.RktPath, - Mounter: mounter, - DockerDaemonContainer: s.DockerDaemonContainer, - SystemContainer: s.SystemContainer, - ConfigureCBR0: s.ConfigureCBR0, - PodCIDR: s.PodCIDR, - MaxPods: s.MaxPods, - DockerExecHandler: dockerExecHandler, - ResolverConfig: s.ResolverConfig, - CPUCFSQuota: s.CPUCFSQuota, + VolumePlugins: ProbeVolumePlugins(), }, nil } @@ -554,13 +561,15 @@ func SimpleKubelet(client *client.Client, dockerClient dockertools.DockerInterface, hostname, rootDir, manifestURL, address string, port uint, + readOnlyPort uint, masterServiceNamespace string, volumePlugins []volume.VolumePlugin, tlsOptions *kubelet.TLSOptions, cadvisorInterface cadvisor.Interface, configFilePath string, cloud cloudprovider.Interface, - osInterface kubecontainer.OSInterface) *KubeletConfig { + osInterface kubecontainer.OSInterface, + fileCheckFrequency, httpCheckFrequency, minimumGCAge, nodeStatusUpdateFrequency, syncFrequency time.Duration) *KubeletConfig { imageGCPolicy := kubelet.ImageGCPolicy{ HighThresholdPercent: 90, @@ -571,43 +580,44 @@ func SimpleKubelet(client *client.Client, RootFreeDiskMB: 256, } kcfg := KubeletConfig{ - KubeClient: client, - DockerClient: dockerClient, - HostnameOverride: hostname, - RootDirectory: rootDir, - ManifestURL: manifestURL, - PodInfraContainerImage: dockertools.PodInfraContainerImage, - Port: port, - Address: net.ParseIP(address), - EnableServer: true, - EnableDebuggingHandlers: true, - HTTPCheckFrequency: 1 * time.Second, - FileCheckFrequency: 1 * time.Second, - SyncFrequency: 3 * time.Second, - MinimumGCAge: 10 * time.Second, - MaxPerPodContainerCount: 2, - MaxContainerCount: 100, - RegisterNode: true, - MasterServiceNamespace: masterServiceNamespace, - VolumePlugins: volumePlugins, - TLSOptions: tlsOptions, - CadvisorInterface: cadvisorInterface, - ConfigFile: configFilePath, - ImageGCPolicy: imageGCPolicy, - DiskSpacePolicy: diskSpacePolicy, - Cloud: cloud, - NodeStatusUpdateFrequency: 10 * time.Second, - ResourceContainer: "/kubelet", - OSInterface: osInterface, + Address: net.ParseIP(address), + CadvisorInterface: cadvisorInterface, CgroupRoot: "", + Cloud: cloud, + ConfigFile: configFilePath, ContainerRuntime: "docker", - Mounter: mount.New(), - DockerDaemonContainer: "/docker-daemon", - SystemContainer: "", - MaxPods: 32, - DockerExecHandler: &dockertools.NativeExecHandler{}, - ResolverConfig: kubelet.ResolvConfDefault, CPUCFSQuota: false, + DiskSpacePolicy: diskSpacePolicy, + DockerClient: dockerClient, + DockerDaemonContainer: "/docker-daemon", + DockerExecHandler: &dockertools.NativeExecHandler{}, + EnableDebuggingHandlers: true, + EnableServer: true, + FileCheckFrequency: fileCheckFrequency, + HostnameOverride: hostname, + HTTPCheckFrequency: httpCheckFrequency, + ImageGCPolicy: imageGCPolicy, + KubeClient: client, + ManifestURL: manifestURL, + MasterServiceNamespace: masterServiceNamespace, + MaxContainerCount: 100, + MaxPerPodContainerCount: 2, + MaxPods: 32, + MinimumGCAge: minimumGCAge, + Mounter: mount.New(), + NodeStatusUpdateFrequency: nodeStatusUpdateFrequency, + OSInterface: osInterface, + PodInfraContainerImage: dockertools.PodInfraContainerImage, + Port: port, + ReadOnlyPort: readOnlyPort, + RegisterNode: true, + ResolverConfig: kubelet.ResolvConfDefault, + ResourceContainer: "/kubelet", + RootDirectory: rootDir, + SyncFrequency: syncFrequency, + SystemContainer: "", + TLSOptions: tlsOptions, + VolumePlugins: volumePlugins, } return &kcfg } @@ -646,7 +656,13 @@ func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error { eventBroadcaster.StartLogging(glog.V(3).Infof) if kcfg.KubeClient != nil { glog.V(4).Infof("Sending events to api server.") - eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events("")) + if kcfg.EventRecordQPS == 0.0 { + eventBroadcaster.StartRecordingToSink(kcfg.KubeClient.Events("")) + } else { + eventClient := *kcfg.KubeClient + eventClient.Throttle = util.NewTokenBucketRateLimiter(kcfg.EventRecordQPS, kcfg.EventBurst) + eventBroadcaster.StartRecordingToSink(eventClient.Events("")) + } } else { glog.Warning("No api server defined - no events will be sent to API server.") } @@ -723,62 +739,64 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig { // KubeletConfig is all of the parameters necessary for running a kubelet. // TODO: This should probably be merged with KubeletServer. The extra object is a consequence of refactoring. type KubeletConfig struct { - KubeClient *client.Client - DockerClient dockertools.DockerInterface - CadvisorInterface cadvisor.Interface Address net.IP AllowPrivileged bool - HostNetworkSources []string - HostnameOverride string - RootDirectory string + CadvisorInterface cadvisor.Interface + CgroupRoot string + Cloud cloudprovider.Interface + ClusterDNS net.IP + ClusterDomain string ConfigFile string + ConfigureCBR0 bool + ContainerRuntime string + CPUCFSQuota bool + DiskSpacePolicy kubelet.DiskSpacePolicy + DockerClient dockertools.DockerInterface + DockerDaemonContainer string + DockerExecHandler dockertools.ExecHandler + EnableDebuggingHandlers bool + EnableServer bool + EventBurst int + EventRecordQPS float32 + FileCheckFrequency time.Duration + Hostname string + HostnameOverride string + HostNetworkSources []string + HTTPCheckFrequency time.Duration + ImageGCPolicy kubelet.ImageGCPolicy + KubeClient *client.Client ManifestURL string ManifestURLHeader http.Header - FileCheckFrequency time.Duration - HTTPCheckFrequency time.Duration - Hostname string - NodeName string - PodInfraContainerImage string - SyncFrequency time.Duration - RegistryPullQPS float64 - RegistryBurst int - MinimumGCAge time.Duration - MaxPerPodContainerCount int + MasterServiceNamespace string MaxContainerCount int - RegisterNode bool - StandaloneMode bool - ClusterDomain string - ClusterDNS net.IP - EnableServer bool - EnableDebuggingHandlers bool + MaxPerPodContainerCount int + MaxPods int + MinimumGCAge time.Duration + Mounter mount.Interface + NetworkPluginName string + NetworkPlugins []network.NetworkPlugin + NodeName string + NodeStatusUpdateFrequency time.Duration + OSInterface kubecontainer.OSInterface + PodCIDR string + PodInfraContainerImage string Port uint ReadOnlyPort uint - Runonce bool - MasterServiceNamespace string - VolumePlugins []volume.VolumePlugin - NetworkPlugins []network.NetworkPlugin - NetworkPluginName string - StreamingConnectionIdleTimeout time.Duration Recorder record.EventRecorder - TLSOptions *kubelet.TLSOptions - ImageGCPolicy kubelet.ImageGCPolicy - DiskSpacePolicy kubelet.DiskSpacePolicy - Cloud cloudprovider.Interface - NodeStatusUpdateFrequency time.Duration - ResourceContainer string - OSInterface kubecontainer.OSInterface - CgroupRoot string - ContainerRuntime string - RktPath string - Mounter mount.Interface - DockerDaemonContainer string - SystemContainer string - ConfigureCBR0 bool - PodCIDR string - MaxPods int - DockerExecHandler dockertools.ExecHandler + RegisterNode bool + RegistryBurst int + RegistryPullQPS float64 ResolverConfig string - CPUCFSQuota bool + ResourceContainer string + RktPath string + RootDirectory string + Runonce bool + StandaloneMode bool + StreamingConnectionIdleTimeout time.Duration + SyncFrequency time.Duration + SystemContainer string + TLSOptions *kubelet.TLSOptions + VolumePlugins []volume.VolumePlugin } func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) { @@ -809,6 +827,8 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod kc.SyncFrequency, float32(kc.RegistryPullQPS), kc.RegistryBurst, + kc.EventRecordQPS, + kc.EventBurst, gcPolicy, pc.SeenAllSources, kc.RegisterNode, diff --git a/cmd/mungedocs/example_syncer.go b/cmd/mungedocs/example_syncer.go index 8df6a968e45..ba153b3c46d 100644 --- a/cmd/mungedocs/example_syncer.go +++ b/cmd/mungedocs/example_syncer.go @@ -43,7 +43,7 @@ var exampleMungeTagRE = regexp.MustCompile(beginMungeTag(fmt.Sprintf("%s %s", ex // bar: // ``` // -// [Download example](../../examples/guestbook/frontend-controller.yaml) +// [Download example](../../examples/guestbook/frontend-controller.yaml?raw=true) // func syncExamples(filePath string, mlines mungeLines) (mungeLines, error) { var err error @@ -108,7 +108,7 @@ func exampleContent(filePath, linkPath, fileType string) (mungeLines, error) { // remove leading and trailing spaces and newlines trimmedFileContent := strings.TrimSpace(string(dat)) - content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, fileRel) + content := fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s?raw=true)", fileType, trimmedFileContent, fileRel) out := getMungeLines(content) return out, nil } diff --git a/cmd/mungedocs/example_syncer_test.go b/cmd/mungedocs/example_syncer_test.go index 84fd8854a1e..72c1514a1ff 100644 --- a/cmd/mungedocs/example_syncer_test.go +++ b/cmd/mungedocs/example_syncer_test.go @@ -41,11 +41,11 @@ spec: {"", ""}, { "\n\n", - "\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml)\n\n", + "\n\n```yaml\n" + podExample + "```\n\n[Download example](testdata/pod.yaml?raw=true)\n\n", }, { "\n\n", - "\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml)\n\n", + "\n\n```yaml\n" + podExample + "```\n\n[Download example](../mungedocs/testdata/pod.yaml?raw=true)\n\n", }, } repoRoot = "" diff --git a/code-of-conduct.md b/code-of-conduct.md new file mode 100644 index 00000000000..0552eb7f6b8 --- /dev/null +++ b/code-of-conduct.md @@ -0,0 +1,59 @@ +## Kubernetes Community Code of Conduct + +### Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of fostering +an open and welcoming community, we pledge to respect all people who contribute +through reporting issues, posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free experience for +everyone, regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, body size, race, ethnicity, age, +religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic addresses, + without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are not +aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers +commit themselves to fairly and consistently applying these principles to every aspect +of managing this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by +opening an issue or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the Contributor Covenant +(http://contributor-covenant.org), version 1.2.0, available at +http://contributor-covenant.org/version/1/2/0/ + +### Kubernetes Events Code of Conduct +Kubernetes events are working conferences intended for professional networking and collaboration in the +Kubernetes community. Attendees are expected to behave according to professional standards and in accordance +with their employer's policies on appropriate workplace behavior. + +While at Kubernetes events or related social networking opportunities, attendees should not engage in +discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should +be especially aware of these concerns. + +The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes +team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to +be engaging in discriminatory or offensive speech or actions. + +Please bring any concerns to to the immediate attention of Kubernetes event staff + + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/code-of-conduct.md?pixel)]() diff --git a/contrib/mesos/pkg/controllermanager/controllermanager.go b/contrib/mesos/pkg/controllermanager/controllermanager.go index e7ac60e92e4..ad44a1c0e2b 100644 --- a/contrib/mesos/pkg/controllermanager/controllermanager.go +++ b/contrib/mesos/pkg/controllermanager/controllermanager.go @@ -144,7 +144,7 @@ func (s *CMServer) Run(_ []string) error { resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient) resourceQuotaController.Run(s.ResourceQuotaSyncPeriod) - namespaceController := namespacecontroller.NewNamespaceController(kubeClient, s.NamespaceSyncPeriod) + namespaceController := namespacecontroller.NewNamespaceController(kubeClient, false, s.NamespaceSyncPeriod) namespaceController.Run() pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod) diff --git a/contrib/mesos/pkg/executor/service/service.go b/contrib/mesos/pkg/executor/service/service.go index fe5cab5b89a..5508d78ee6e 100644 --- a/contrib/mesos/pkg/executor/service/service.go +++ b/contrib/mesos/pkg/executor/service/service.go @@ -301,6 +301,8 @@ func (ks *KubeletExecutorServer) createAndInitKubelet( kc.SyncFrequency, float32(kc.RegistryPullQPS), kc.RegistryBurst, + kc.EventRecordQPS, + kc.EventBurst, gcPolicy, pc.SeenAllSources, kc.RegisterNode, diff --git a/contrib/mesos/pkg/offers/offers.go b/contrib/mesos/pkg/offers/offers.go index d91a6806f1d..962fc81663c 100644 --- a/contrib/mesos/pkg/offers/offers.go +++ b/contrib/mesos/pkg/offers/offers.go @@ -30,7 +30,7 @@ import ( "k8s.io/kubernetes/contrib/mesos/pkg/queue" "k8s.io/kubernetes/contrib/mesos/pkg/runtime" "k8s.io/kubernetes/pkg/client/unversioned/cache" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) const ( @@ -453,7 +453,7 @@ func (s *offerStorage) nextListener() *offerListener { // notify listeners if we find an acceptable offer for them. listeners // are garbage collected after a certain age (see offerListenerMaxAge). // ids lists offer IDs that are retrievable from offer storage. -func (s *offerStorage) notifyListeners(ids func() (util.StringSet, uint64)) { +func (s *offerStorage) notifyListeners(ids func() (sets.String, uint64)) { listener := s.nextListener() // blocking offerIds, version := ids() @@ -493,8 +493,8 @@ func (s *offerStorage) Init(done <-chan struct{}) { // cached offer ids for the purposes of listener notification idCache := &stringsCache{ - refill: func() util.StringSet { - result := util.NewStringSet() + refill: func() sets.String { + result := sets.NewString() for _, v := range s.offers.List() { if offer, ok := v.(Perishable); ok { result.Insert(offer.Id()) @@ -510,14 +510,14 @@ func (s *offerStorage) Init(done <-chan struct{}) { type stringsCache struct { expiresAt time.Time - cached util.StringSet + cached sets.String ttl time.Duration - refill func() util.StringSet + refill func() sets.String version uint64 } // not thread-safe -func (c *stringsCache) Strings() (util.StringSet, uint64) { +func (c *stringsCache) Strings() (sets.String, uint64) { now := time.Now() if c.expiresAt.Before(now) { old := c.cached @@ -549,8 +549,8 @@ func (self *slaveStorage) add(slaveId, offerId string) { } // delete the slave-offer mappings for slaveId, returns the IDs of the offers that were unmapped -func (self *slaveStorage) deleteSlave(slaveId string) util.StringSet { - offerIds := util.NewStringSet() +func (self *slaveStorage) deleteSlave(slaveId string) sets.String { + offerIds := sets.NewString() self.Lock() defer self.Unlock() for oid, sid := range self.index { diff --git a/contrib/mesos/pkg/queue/delay.go b/contrib/mesos/pkg/queue/delay.go index 6ec436b71e8..be240223ade 100644 --- a/contrib/mesos/pkg/queue/delay.go +++ b/contrib/mesos/pkg/queue/delay.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type qitem struct { @@ -277,13 +277,13 @@ func (f *DelayFIFO) List() []UniqueID { return list } -// ContainedIDs returns a util.StringSet containing all IDs of the stored items. +// ContainedIDs returns a stringset.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. -func (c *DelayFIFO) ContainedIDs() util.StringSet { +func (c *DelayFIFO) ContainedIDs() sets.String { c.rlock() defer c.runlock() - set := util.StringSet{} + set := sets.String{} for id := range c.items { set.Insert(id) } diff --git a/contrib/mesos/pkg/queue/historical.go b/contrib/mesos/pkg/queue/historical.go index 6fe65f21e2b..a9021c14b4d 100644 --- a/contrib/mesos/pkg/queue/historical.go +++ b/contrib/mesos/pkg/queue/historical.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type entry struct { @@ -177,13 +177,13 @@ func (f *HistoricalFIFO) ListKeys() []string { return list } -// ContainedIDs returns a util.StringSet containing all IDs of the stored items. +// ContainedIDs returns a stringset.StringSet containing all IDs of the stored items. // This is a snapshot of a moment in time, and one should keep in mind that // other go routines can add or remove items after you call this. -func (c *HistoricalFIFO) ContainedIDs() util.StringSet { +func (c *HistoricalFIFO) ContainedIDs() sets.String { c.lock.RLock() defer c.lock.RUnlock() - set := util.StringSet{} + set := sets.String{} for id, entry := range c.items { if entry.Is(DELETE_EVENT | POP_EVENT) { continue diff --git a/contrib/mesos/pkg/scheduler/scheduler.go b/contrib/mesos/pkg/scheduler/scheduler.go index 0148e0d9368..fc722a5370c 100644 --- a/contrib/mesos/pkg/scheduler/scheduler.go +++ b/contrib/mesos/pkg/scheduler/scheduler.go @@ -47,7 +47,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/tools" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type Slave struct { @@ -711,7 +711,7 @@ func (k *KubernetesScheduler) explicitlyReconcileTasks(driver bindings.Scheduler // tell mesos to send us the latest status updates for all the non-terminal tasks that we know about statusList := []*mesos.TaskStatus{} - remaining := util.KeySet(reflect.ValueOf(taskToSlave)) + remaining := sets.KeySet(reflect.ValueOf(taskToSlave)) for taskId, slaveId := range taskToSlave { if slaveId == "" { delete(taskToSlave, taskId) diff --git a/contrib/mesos/pkg/service/endpoints_controller.go b/contrib/mesos/pkg/service/endpoints_controller.go index c38f692af9e..589aa0dfe3c 100644 --- a/contrib/mesos/pkg/service/endpoints_controller.go +++ b/contrib/mesos/pkg/service/endpoints_controller.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/workqueue" "k8s.io/kubernetes/pkg/watch" @@ -132,8 +133,8 @@ func (e *endpointController) Run(workers int, stopCh <-chan struct{}) { e.queue.ShutDown() } -func (e *endpointController) getPodServiceMemberships(pod *api.Pod) (util.StringSet, error) { - set := util.StringSet{} +func (e *endpointController) getPodServiceMemberships(pod *api.Pod) (sets.String, error) { + set := sets.String{} services, err := e.serviceStore.GetPodServices(pod) if err != nil { // don't log this error because this function makes pointless diff --git a/docs/admin/namespaces/README.md b/docs/admin/namespaces/README.md index f15c29baa00..c866b1e20d5 100644 --- a/docs/admin/namespaces/README.md +++ b/docs/admin/namespaces/README.md @@ -98,7 +98,7 @@ Use the file [`namespace-dev.json`](namespace-dev.json) which describes a develo } ``` -[Download example](namespace-dev.json) +[Download example](namespace-dev.json?raw=true) Create the development namespace using kubectl. diff --git a/docs/devel/coding-conventions.md b/docs/devel/coding-conventions.md index 1569d1aa0d8..8ddf000e205 100644 --- a/docs/devel/coding-conventions.md +++ b/docs/devel/coding-conventions.md @@ -50,6 +50,7 @@ Code conventions - so pkg/controllers/autoscaler/foo.go should say `package autoscaler` not `package autoscalercontroller`. - Unless there's a good reason, the `package foo` line should match the name of the directory in which the .go file exists. - Importers can use a different name if they need to disambiguate. + - Locks should be called `lock` and should never be embedded (always `lock sync.Mutex`). When multiple locks are present, give each lock a distinct name following Go conventions - `stateLock`, `mapLock` etc. - API conventions - [API changes](api_changes.md) - [API conventions](api-conventions.md) diff --git a/docs/devel/development.md b/docs/devel/development.md index fc14333b83b..75cb2365e85 100644 --- a/docs/devel/development.md +++ b/docs/devel/development.md @@ -96,7 +96,7 @@ git push -f origin myfeature ### Creating a pull request -1. Visit http://github.com/$YOUR_GITHUB_USERNAME/kubernetes +1. Visit https://github.com/$YOUR_GITHUB_USERNAME/kubernetes 2. Click the "Compare and pull request" button next to your "myfeature" branch. 3. Check out the pull request [process](pull-requests.md) for more details diff --git a/docs/devel/git_workflow.png b/docs/devel/git_workflow.png index e3bd70da02c..80a66248fb8 100644 Binary files a/docs/devel/git_workflow.png and b/docs/devel/git_workflow.png differ diff --git a/docs/getting-started-guides/aws.md b/docs/getting-started-guides/aws.md index a55c1f92b3d..72a6bb8d13c 100644 --- a/docs/getting-started-guides/aws.md +++ b/docs/getting-started-guides/aws.md @@ -76,7 +76,7 @@ using [cluster/aws/config-default.sh](http://releases.k8s.io/HEAD/cluster/aws/co This process takes about 5 to 10 minutes. Once the cluster is up, the IP addresses of your master and node(s) will be printed, as well as information about the default services running in the cluster (monitoring, logging, dns). User credentials and security -tokens are written in `~/.kube/kubeconfig`, they will be necessary to use the CLI or the HTTP Basic Auth. +tokens are written in `~/.kube/config`, they will be necessary to use the CLI or the HTTP Basic Auth. By default, the script will provision a new VPC and a 4 node k8s cluster in us-west-2a (Oregon) with `t2.micro` instances running on Ubuntu. You can override the variables defined in [config-default.sh](http://releases.k8s.io/HEAD/cluster/aws/config-default.sh) to change this behavior as follows: diff --git a/docs/getting-started-guides/azure.md b/docs/getting-started-guides/azure.md index 1983bc8773d..9918194a630 100644 --- a/docs/getting-started-guides/azure.md +++ b/docs/getting-started-guides/azure.md @@ -43,7 +43,7 @@ Getting started on Microsoft Azure ## Prerequisites -** Azure Prerequisites** +**Azure Prerequisites** 1. You need an Azure account. Visit http://azure.microsoft.com/ to get started. 2. Install and configure the Azure cross-platform command-line interface. http://azure.microsoft.com/en-us/documentation/articles/xplat-cli/ diff --git a/docs/getting-started-guides/fedora/fedora_manual_config.md b/docs/getting-started-guides/fedora/fedora_manual_config.md index f306cd80549..bba7c2f3cfe 100644 --- a/docs/getting-started-guides/fedora/fedora_manual_config.md +++ b/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -62,7 +62,7 @@ fed-node = 192.168.121.65 **Prepare the hosts:** * Install Kubernetes on all hosts - fed-{master,node}. This will also pull in docker. Also install etcd on fed-master. This guide has been tested with kubernetes-0.18 and beyond. -* The [--enablerepo=update-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive. +* The [--enablerepo=updates-testing](https://fedoraproject.org/wiki/QA:Updates_Testing) directive in the yum command below will ensure that the most recent Kubernetes version that is scheduled for pre-release will be installed. This should be a more recent version than the Fedora "stable" release for Kubernetes that you would get without adding the directive. * If you want the very latest Kubernetes release [you can download and yum install the RPM directly from Fedora Koji](http://koji.fedoraproject.org/koji/packageinfo?packageID=19202) instead of using the yum install command below. ```sh diff --git a/docs/getting-started-guides/logging.md b/docs/getting-started-guides/logging.md index 95460f9de1d..82ab815eec3 100644 --- a/docs/getting-started-guides/logging.md +++ b/docs/getting-started-guides/logging.md @@ -73,7 +73,7 @@ spec: 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] ``` -[Download example](../../examples/blog-logging/counter-pod.yaml) +[Download example](../../examples/blog-logging/counter-pod.yaml?raw=true) This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let’s create the pod in the default @@ -182,6 +182,7 @@ spec: mountPath: /varlog - name: containers mountPath: /var/lib/docker/containers + readOnly: true terminationGracePeriodSeconds: 30 volumes: - name: varlog @@ -192,7 +193,7 @@ spec: path: /var/lib/docker/containers ``` -[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) +[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml?raw=true) This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it. diff --git a/docs/man/man1/kubectl-describe.1 b/docs/man/man1/kubectl-describe.1 index 9633eb1da5f..6dfa016aafc 100644 --- a/docs/man/man1/kubectl-describe.1 +++ b/docs/man/man1/kubectl-describe.1 @@ -30,7 +30,7 @@ exists, it will output details for every resource that has a name prefixed with Possible resource types include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), -namespaces (ns) or secrets. +namespaces (ns), serviceaccounts or secrets. .SH OPTIONS diff --git a/docs/man/man1/kubectl-get.1 b/docs/man/man1/kubectl-get.1 index 63ecbb104d6..2a8965284ec 100644 --- a/docs/man/man1/kubectl-get.1 +++ b/docs/man/man1/kubectl-get.1 @@ -19,7 +19,7 @@ Display one or many resources. Possible resource types include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets. +resourcequotas (quota), namespaces (ns), endpoints (ep), serviceaccounts or secrets. .PP By specifying the output as 'template' and providing a Go template as the value diff --git a/docs/proposals/Kubemark_architecture.png b/docs/proposals/Kubemark_architecture.png new file mode 100644 index 00000000000..479ad8b11f4 Binary files /dev/null and b/docs/proposals/Kubemark_architecture.png differ diff --git a/docs/proposals/api-group.md b/docs/proposals/api-group.md new file mode 100644 index 00000000000..53531d43260 --- /dev/null +++ b/docs/proposals/api-group.md @@ -0,0 +1,152 @@ + + + + +WARNING +WARNING +WARNING +WARNING +WARNING + +

PLEASE NOTE: This document applies to the HEAD of the source tree

+ +If you are using a released version of Kubernetes, you should +refer to the docs that go with that version. + + +The latest 1.0.x release of this document can be found +[here](http://releases.k8s.io/release-1.0/docs/proposals/api-group.md). + +Documentation for other releases can be found at +[releases.k8s.io](http://releases.k8s.io). + +-- + + + + + +# Supporting multiple API groups + +## Goal + +1. Breaking the monolithic v1 API into modular groups and allowing groups to be enabled/disabled individually. This allows us to break the monolithic API server to smaller components in the future. + +2. Supporting different versions in different groups. This allows different groups to evolve at different speed. + +3. Supporting identically named kinds to exist in different groups. This is useful when we experiment new features of an API in the experimental group while supporting the stable API in the original group at the same time. + +4. Exposing the API groups and versions supported by the server. This is required to develop a dynamic client. + +5. Laying the basis for [API Plugin](../../docs/design/extending-api.md). + +6. Keeping the user interaction easy. For example, we should allow users to omit group name when using kubectl if there is no ambiguity. + + +## Bookkeeping for groups + +1. No changes to TypeMeta: + + Currently many internal structures, such as RESTMapper and Scheme, are indexed and retrieved by APIVersion. For a fast implementation targeting the v1.1 deadline, we will concatenate group with version, in the form of "group/version", and use it where a version string is expected, so that many code can be reused. This implies we will not add a new field to TypeMeta, we will use TypeMeta.APIVersion to hold "group/version". + + For backward compatibility, v1 objects belong to the group with an empty name, so existing v1 config files will remain valid. + +2. /pkg/conversion#Scheme: + + The key of /pkg/conversion#Scheme.versionMap for versioned types will be "group/version". For now, the internal version types of all groups will be registered to versionMap[""], as we don't have any identically named kinds in different groups yet. In the near future, internal version types will be registered to versionMap["group/"], and pkg/conversion#Scheme.InternalVersion will have type []string. + + We will need a mechanism to express if two kinds in different groups (e.g., compute/pods and experimental/pods) are convertible, and auto-generate the conversions if they are. + +3. meta.RESTMapper: + + Each group will have its own RESTMapper (of type DefaultRESTMapper), and these mappers will be registered to pkg/api#RESTMapper (of type MultiRESTMapper). + + To support identically named kinds in different groups, We need to expand the input of RESTMapper.VersionAndKindForResource from (resource string) to (group, resource string). If group is not specified and there is ambiguity (i.e., the resource exists in multiple groups), an error should be returned to force the user to specify the group. + +## Server-side implementation + +1. resource handlers' URL: + + We will force the URL to be in the form of prefix/group/version/... + + Prefix is used to differentiate API paths from other paths like /healthz. All groups will use the same prefix="apis", except when backward compatibility requires otherwise. No "/" is allowed in prefix, group, or version. Specifically, + + * for /api/v1, we set the prefix="api" (which is populated from cmd/kube-apiserver/app#APIServer.APIPrefix), group="", version="v1", so the URL remains to be /api/v1. + + * for new kube API groups, we will set the prefix="apis" (we will add a field in type APIServer to hold this prefix), group=GROUP_NAME, version=VERSION. For example, the URL of the experimental resources will be /apis/experimental/v1alpha1. + + * for OpenShift v1 API, because it's currently registered at /oapi/v1, to be backward compatible, OpenShift may set prefix="oapi", group="". + + * for other new third-party API, they should also use the prefix="apis" and choose the group and version. This can be done through the thirdparty API plugin mechanism in [13000](http://pr.k8s.io/13000). + +2. supporting API discovery: + + * At /prefix (e.g., /apis), API server will return the supported groups and their versions using pkg/api/unversioned#APIVersions type, setting the Versions field to "group/version". This is backward compatible, because currently API server does return "v1" encoded in pkg/api/unversioned#APIVersions at /api. (We will also rename the JSON field name from `versions` to `apiVersions`, to be consistent with pkg/api#TypeMeta.APIVersion field) + + * At /prefix/group, API server will return all supported versions of the group. We will create a new type VersionList (name is open to discussion) in pkg/api/unversioned as the API. + + * At /prefix/group/version, API server will return all supported resources in this group, and whether each resource is namespaced. We will create a new type APIResourceList (name is open to discussion) in pkg/api/unversioned as the API. + + We will design how to handle deeper path in other proposals. + + * At /swaggerapi/swagger-version/prefix/group/version, API server will return the Swagger spec of that group/version in `swagger-version` (e.g. we may support both Swagger v1.2 and v2.0). + +3. handling common API objects: + + * top-level common API objects: + + To handle the top-level API objects that are used by all groups, we either have to register them to all schemes, or we can choose not to encode them to a version. We plan to take the latter approach and place such types in a new package called `unversioned`, because many of the common top-level objects, such as APIVersions, VersionList, and APIResourceList, which are used in the API discovery, and pkg/api#Status, are part of the protocol between client and server, and do not belong to the domain-specific parts of the API, which will evolve independently over time. + + Types in the unversioned package will not have the APIVersion field, but may retain the Kind field. + + For backward compatibility, when hanlding the Status, the server will encode it to v1 if the client expects the Status to be encoded in v1, otherwise the server will send the unversioned#Status. If an error occurs before the version can be determined, the server will send the unversioned#Status. + + * non-top-level common API objects: + + Assuming object o belonging to group X is used as a field in an object belonging to group Y, currently genconversion will generate the conversion functions for o in package Y. Hence, we don't need any special treatment for non-top-level common API objects. + + TypeMeta is an exception, because it is a common object that is used by objects in all groups but does not logically belong to any group. We plan to move it to the package `unversioned`. + +## Client-side implementation + +1. clients: + + Currently we have structured (pkg/client/unversioned#ExperimentalClient, pkg/client/unversioned#Client) and unstructured (pkg/kubectl/resource#Helper) clients. The structured clients are not scalable because each of them implements specific interface, e.g., [here](../../pkg/client/unversioned/client.go#L32). Only the unstructured clients are scalable. We should either auto-generate the code for structured clients or migrate to use the unstructured clients as much as possible. + + We should also move the unstructured client to pkg/client/. + +2. Spelling the URL: + + The URL is in the form of prefix/group/version/. The prefix is hard-coded in the client/unversioned.Config (see [here](../../pkg/client/unversioned/experimental.go#L101)). The client should be able to figure out `group` and `version` using the RESTMapper. For a third-party client which does not have access to the RESTMapper, it should discover the mapping of `group`, `version` and `kind` by querying the server as described in point 2 of #server-side-implementation. + +3. kubectl: + + kubectl should accept arguments like `group/resource`, `group/resource/name`. Nevertheless, the user can omit the `group`, then kubectl shall rely on RESTMapper.VersionAndKindForResource() to figure out the default group/version of the resource. For example, for resources (like `node`) that exist in both k8s v1 API and k8s modularized API (like `infra/v2`), we should set kubectl default to use one of them. If there is no default group, kubectl should return an error for the ambiguity. + + When kubectl is used with a single resource type, the --api-version and --output-version flag of kubectl should accept values in the form of `group/version`, and they should work as they do today. For multi-resource operations, we will disable these two flags initially. + + Currently, by setting pkg/client/unversioned/clientcmd/api/v1#Config.NamedCluster[x].Cluster.APIVersion ([here](../../pkg/client/unversioned/clientcmd/api/v1/types.go#L58)), user can configure the default apiVersion used by kubectl to talk to server. It does not make sense to set a global version used by kubectl when there are multiple groups, so we plan to deprecate this field. We may extend the version negotiation function to negotiate the preferred version of each group. Details will be in another proposal. + +## OpenShift integration + +OpenShift can take a similar approach to break monolithic v1 API: keeping the v1 where they are, and gradually adding groups. + +For the v1 objects in OpenShift, they should keep doing what they do now: they should remain registered to Scheme.versionMap["v1"] scheme, they should keep being added to originMapper. + +For new OpenShift groups, they should do the same as native Kubernetes groups would do: each group should register to Scheme.versionMap["group/version"], each should has separate RESTMapper and the register the MultiRESTMapper. + +To expose a list of the supported Openshift groups to clients, OpenShift just has to call to pkg/cmd/server/origin#call initAPIVersionRoute() as it does now, passing in the supported "group/versions" instead of "versions". + + +## Future work + +1. Dependencies between groups: we need an interface to register the dependencies between groups. It is not our priority now as the use cases are not clear yet. + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/proposals/api-group.md?pixel)]() + diff --git a/docs/proposals/kubemark.md b/docs/proposals/kubemark.md new file mode 100644 index 00000000000..51ea4375e10 --- /dev/null +++ b/docs/proposals/kubemark.md @@ -0,0 +1,190 @@ + + + + +WARNING +WARNING +WARNING +WARNING +WARNING + +

PLEASE NOTE: This document applies to the HEAD of the source tree

+ +If you are using a released version of Kubernetes, you should +refer to the docs that go with that version. + + +The latest 1.0.x release of this document can be found +[here](http://releases.k8s.io/release-1.0/docs/proposals/kubemark.md). + +Documentation for other releases can be found at +[releases.k8s.io](http://releases.k8s.io). + +-- + + + + + +# Kubemark proposal + +## Goal of this document + +This document describes a design of Kubemark - a system that allows performance testing of a Kubernetes cluster. It describes the +assumption, high level design and discusses possible solutions for lower-level problems. It is supposed to be a starting point for more +detailed discussion. + +## Current state and objective + +Currently performance testing happens on ‘live’ clusters of up to 100 Nodes. It takes quite a while to start such cluster or to push +updates to all Nodes, and it uses quite a lot of resources. At this scale the amount of wasted time and used resources is still acceptable. +In the next quarter or two we’re targeting 1000 Node cluster, which will push it way beyond ‘acceptable’ level. Additionally we want to +enable people without many resources to run scalability tests on bigger clusters than they can afford at given time. Having an ability to +cheaply run scalability tests will enable us to run some set of them on "normal" test clusters, which in turn would mean ability to run +them on every PR. + +This means that we need a system that will allow for realistic performance testing on (much) smaller number of “real” machines. First +assumption we make is that Nodes are independent, i.e. number of existing Nodes do not impact performance of a single Node. This is not +entirely true, as number of Nodes can increase latency of various components on Master machine, which in turn may increase latency of Node +operations, but we’re not interested in measuring this effect here. Instead we want to measure how number of Nodes and the load imposed by +Node daemons affects the performance of Master components. + +## Kubemark architecture overview + +The high-level idea behind Kubemark is to write library that allows running artificial "Hollow" Nodes that will be able to simulate a +behavior of real Kubelet and KubeProxy in a single, lightweight binary. Hollow components will need to correctly respond to Controllers +(via API server), and preferably, in the fullness of time, be able to ‘replay’ previously recorded real traffic (this is out of scope for +initial version). To teach Hollow components replaying recorded traffic they will need to store data specifying when given Pod/Container +should die (e.g. observed lifetime). Such data can be extracted e.g. from etcd Raft logs, or it can be reconstructed from Events. In the +initial version we only want them to be able to fool Master components and put some configurable (in what way TBD) load on them. + +When we have Hollow Node ready, we’ll be able to test performance of Master Components by creating a real Master Node, with API server, +Controllers, etcd and whatnot, and create number of Hollow Nodes that will register to the running Master. + +To make Kubemark easier to maintain when system evolves Hollow components will reuse real "production" code for Kubelet and KubeProxy, but +will mock all the backends with no-op or very simple mocks. We believe that this approach is better in the long run than writing special +"performance-test-aimed" separate version of them. This may take more time to create an initial version, but we think maintenance cost will +be noticeably smaller. + +### Option 1 + +For the initial version we will teach Master components to use port number to identify Kubelet/KubeProxy. This will allow running those +components on non-default ports, and in the same time will allow to run multiple Hollow Nodes on a single machine. During setup we will +generate credentials for cluster communication and pass them to HollowKubelet/HollowProxy to use. Master will treat all HollowNodes as +normal ones. + +![Kubmark architecture diagram for option 1](Kubemark_architecture.png?raw=true "Kubemark architecture overview") +*Kubmark architecture diagram for option 1* + +### Option 2 + +As a second (equivalent) option we will run Kubemark on top of 'real' Kubernetes cluster, where both Master and Hollow Nodes will be Pods. +In this option we'll be able to use Kubernetes mechanisms to streamline setup, e.g. by using Kubernetes networking to ensure unique IPs for +Hollow Nodes, or using Secrets to distribute Kubelet credentials. The downside of this configuration is that it's likely that some noise +will appear in Kubemark results from either CPU/Memory pressure from other things running on Nodes (e.g. FluentD, or Kubelet) or running +cluster over an overlay network. We believe that it'll be possible to turn off cluster monitoring for Kubemark runs, so that the impact +of real Node daemons will be minimized, but we don't know what will be the impact of using higher level networking stack. Running a +comparison will be an interesting test in itself. + +### Discussion + +Before taking a closer look at steps necessary to set up a minimal Hollow cluster it's hard to tell which approach will be simpler. It's +quite possible that the initial version will end up as hybrid between running the Hollow cluster directly on top of VMs and running the +Hollow cluster on top of a Kubernetes cluster that is running on top of VMs. E.g. running Nodes as Pods in Kubernetes cluster and Master +directly on top of VM. + +## Things to simulate + +In real Kubernetes on a single Node we run two daemons that communicate with Master in some way: Kubelet and KubeProxy. + +### KubeProxy + +As a replacement for KubeProxy we'll use HollowProxy, which will be a real KubeProxy with injected no-op mocks everywhere it makes sense. + +### Kubelet + +As a replacement for Kubelet we'll use HollowKubelet, which will be a real Kubelet with injected no-op or simple mocks everywhere it makes +sense. + +Kubelet also exposes cadvisor endpoint which is scraped by Heapster, healthz to be read by supervisord, and we have FluentD running as a +Pod on each Node that exports logs to Elasticsearch (or Google Cloud Logging). Both Heapster and Elasticsearch are running in Pods in the +cluster so do not add any load on a Master components by themselves. There can be other systems that scrape Heapster through proxy running +on Master, which adds additional load, but they're not the part of default setup, so in the first version we won't simulate this behavior. + +In the first version we’ll assume that all started Pods will run indefinitely if not explicitly deleted. In the future we can add a model +of short-running batch jobs, but in the initial version we’ll assume only serving-like Pods. + +### Heapster + +In addition to system components we run Heapster as a part of cluster monitoring setup. Heapster currently watches Events, Pods and Nodes +through the API server. In the test setup we can use real heapster for watching API server, with mocked out piece that scrapes cAdvisor +data from Kubelets. + +### Elasticsearch and Fluentd + +Similarly to Heapster Elasticsearch runs outside the Master machine but generates some traffic on it. Fluentd “daemon” running on Master +periodically sends Docker logs it gathered to the Elasticsearch running on one of the Nodes. In the initial version we omit Elasticsearch, +as it produces only a constant small load on Master Node that does not change with the size of the cluster. + +## Necessary work + +There are three more or less independent things that needs to be worked on: +- HollowNode implementation, creating a library/binary that will be able to listen to Watches and respond in a correct fashion with Status +updates. This also involves creation of a CloudProvider that can produce such Hollow Nodes, or making sure that HollowNodes can correctly +self-register in no-provider Master. +- Kubemark setup, including figuring networking model, number of Hollow Nodes that will be allowed to run on a single “machine”, writing +setup/run/teardown scripts (in [option 1](#option-1)), or figuring out how to run Master and Hollow Nodes on top of Kubernetes +(in [option 2](#option-2)) +- Creating a Player component that will send requests to the API server putting a load on a cluster. This involves creating a way to +specify desired workload. This task is +very well isolated from the rest, as it is about sending requests to the real API server. Because of that we can discuss requirements +separately. + +## Concerns + +Network performance most likely won't be a problem for the initial version if running on directly on VMs rather than on top of a Kubernetes +cluster, as Kubemark will be running on standard networking stack (no cloud-provider software routes, or overlay network is needed, as we +don't need custom routing between Pods). Similarly we don't think that running Kubemark on Kubernetes virtualized cluster networking will +cause noticeable performance impact, but it requires testing. + +On the other hand when adding additional features it may turn out that we need to simulate Kubernetes Pod network. In such, when running +'pure' Kubemark we may try one of the following: + - running overlay network like Flannel or OVS instead of using cloud providers routes, + - write simple network multiplexer to multiplex communications from the Hollow Kubelets/KubeProxies on the machine. + +In case of Kubemark on Kubernetes it may turn that we run into a problem with adding yet another layer of network virtualization, but we +don't need to solve this problem now. + +## Work plan + +- Teach/make sure that Master can talk to multiple Kubelets on the same Machine [option 1](#option-1): + - make sure that Master can talk to a Kubelet on non-default port, + - make sure that Master can talk to all Kubelets on different ports, +- Write HollowNode library: + - new HollowProxy, + - new HollowKubelet, + - new HollowNode combining the two, + - make sure that Master can talk to two HollowKubelets running on the same machine +- Make sure that we can run Hollow cluster on top of Kubernetes [option 2](#option-2) +- Write a player that will automatically put some predefined load on Master, <- this is the moment when it’s possible to play with it and is useful by itself for +scalability tests. Alternatively we can just use current density/load tests, +- Benchmark our machines - see how many Watch clients we can have before everything explodes, +- See how many HollowNodes we can run on a single machine by attaching them to the real master <- this is the moment it starts to useful +- Update kube-up/kube-down scripts to enable creating “HollowClusters”/write a new scripts/something, integrate HollowCluster with a Elasticsearch/Heapster equivalents, +- Allow passing custom configuration to the Player + +## Future work + +In the future we want to add following capabilities to the Kubemark system: +- replaying real traffic reconstructed from the recorded Events stream, +- simulating scraping things running on Nodes through Master proxy. + + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/proposals/kubemark.md?pixel)]() + diff --git a/docs/user-guide/downward-api.md b/docs/user-guide/downward-api.md index 9551ec0876f..8f625bd6c84 100644 --- a/docs/user-guide/downward-api.md +++ b/docs/user-guide/downward-api.md @@ -108,7 +108,7 @@ spec: restartPolicy: Never ``` -[Download example](downward-api/dapi-pod.yaml) +[Download example](downward-api/dapi-pod.yaml?raw=true) @@ -178,7 +178,7 @@ spec: fieldPath: metadata.annotations ``` -[Download example](downward-api/volume/dapi-volume.yaml) +[Download example](downward-api/volume/dapi-volume.yaml?raw=true) Some more thorough examples: diff --git a/docs/user-guide/kubectl/kubectl.md b/docs/user-guide/kubectl/kubectl.md index cf8c2085cfb..a9be3c5f030 100644 --- a/docs/user-guide/kubectl/kubectl.md +++ b/docs/user-guide/kubectl/kubectl.md @@ -100,7 +100,7 @@ kubectl * [kubectl stop](kubectl_stop.md) - Deprecated: Gracefully shut down a resource by name or filename. * [kubectl version](kubectl_version.md) - Print the client and server version information. -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476725335 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.165115265 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_annotate.md b/docs/user-guide/kubectl/kubectl_annotate.md index 21909136b72..a6b7831c3c6 100644 --- a/docs/user-guide/kubectl/kubectl_annotate.md +++ b/docs/user-guide/kubectl/kubectl_annotate.md @@ -119,7 +119,7 @@ $ kubectl annotate pods foo description- * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-02 06:24:17.720533039 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.16095949 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_annotate.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_api-versions.md b/docs/user-guide/kubectl/kubectl_api-versions.md index eaeef7866f5..15051d3f917 100644 --- a/docs/user-guide/kubectl/kubectl_api-versions.md +++ b/docs/user-guide/kubectl/kubectl_api-versions.md @@ -76,7 +76,7 @@ kubectl api-versions * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476265479 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.164255617 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_api-versions.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_attach.md b/docs/user-guide/kubectl/kubectl_attach.md index 6155446b7d0..c47cf2035ec 100644 --- a/docs/user-guide/kubectl/kubectl_attach.md +++ b/docs/user-guide/kubectl/kubectl_attach.md @@ -98,7 +98,7 @@ $ kubectl attach 123456-7890 -c ruby-container -i -t * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471309711 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.155651469 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_attach.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_cluster-info.md b/docs/user-guide/kubectl/kubectl_cluster-info.md index f7728387eab..465984de41b 100644 --- a/docs/user-guide/kubectl/kubectl_cluster-info.md +++ b/docs/user-guide/kubectl/kubectl_cluster-info.md @@ -76,7 +76,7 @@ kubectl cluster-info * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476078738 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163962347 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_cluster-info.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config.md b/docs/user-guide/kubectl/kubectl_config.md index a16a4603b21..f0eb708b869 100644 --- a/docs/user-guide/kubectl/kubectl_config.md +++ b/docs/user-guide/kubectl/kubectl_config.md @@ -94,7 +94,7 @@ kubectl config SUBCOMMAND * [kubectl config use-context](kubectl_config_use-context.md) - Sets the current-context in a kubeconfig file * [kubectl config view](kubectl_config_view.md) - displays Merged kubeconfig settings or a specified kubeconfig file. -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475888484 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163685546 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set-cluster.md b/docs/user-guide/kubectl/kubectl_config_set-cluster.md index a24a48be32e..414f7cc08a0 100644 --- a/docs/user-guide/kubectl/kubectl_config_set-cluster.md +++ b/docs/user-guide/kubectl/kubectl_config_set-cluster.md @@ -96,7 +96,7 @@ $ kubectl config set-cluster e2e --insecure-skip-tls-verify=true * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474677631 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.161700827 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set-cluster.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set-context.md b/docs/user-guide/kubectl/kubectl_config_set-context.md index 116d2802511..15643a2d8c8 100644 --- a/docs/user-guide/kubectl/kubectl_config_set-context.md +++ b/docs/user-guide/kubectl/kubectl_config_set-context.md @@ -89,7 +89,7 @@ $ kubectl config set-context gce --user=cluster-admin * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475093212 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162402642 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set-context.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set-credentials.md b/docs/user-guide/kubectl/kubectl_config_set-credentials.md index dc84b808ce6..c4663ae0e34 100644 --- a/docs/user-guide/kubectl/kubectl_config_set-credentials.md +++ b/docs/user-guide/kubectl/kubectl_config_set-credentials.md @@ -109,7 +109,7 @@ $ kubectl config set-credentials cluster-admin --client-certificate=~/.kube/admi * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.474882527 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162045132 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set-credentials.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_set.md b/docs/user-guide/kubectl/kubectl_config_set.md index ab0229e3a81..9f72013f061 100644 --- a/docs/user-guide/kubectl/kubectl_config_set.md +++ b/docs/user-guide/kubectl/kubectl_config_set.md @@ -78,7 +78,7 @@ kubectl config set PROPERTY_NAME PROPERTY_VALUE * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475281504 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.162716308 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_set.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_unset.md b/docs/user-guide/kubectl/kubectl_config_unset.md index 5f86a0f58d9..1f609d34faf 100644 --- a/docs/user-guide/kubectl/kubectl_config_unset.md +++ b/docs/user-guide/kubectl/kubectl_config_unset.md @@ -77,7 +77,7 @@ kubectl config unset PROPERTY_NAME * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475473658 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163015642 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_unset.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_use-context.md b/docs/user-guide/kubectl/kubectl_config_use-context.md index 6a62618aa4c..d2dbdc773fb 100644 --- a/docs/user-guide/kubectl/kubectl_config_use-context.md +++ b/docs/user-guide/kubectl/kubectl_config_use-context.md @@ -76,7 +76,7 @@ kubectl config use-context CONTEXT_NAME * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.475674294 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.163336177 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_use-context.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_config_view.md b/docs/user-guide/kubectl/kubectl_config_view.md index bbfadeb2d91..2153266a154 100644 --- a/docs/user-guide/kubectl/kubectl_config_view.md +++ b/docs/user-guide/kubectl/kubectl_config_view.md @@ -103,7 +103,7 @@ $ kubectl config view -o template --template='{{range .users}}{{ if eq .name "e2 * [kubectl config](kubectl_config.md) - config modifies kubeconfig files -###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.775349034 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.161359997 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_config_view.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_create.md b/docs/user-guide/kubectl/kubectl_create.md index f718dc10fee..aeaf523944c 100644 --- a/docs/user-guide/kubectl/kubectl_create.md +++ b/docs/user-guide/kubectl/kubectl_create.md @@ -96,7 +96,7 @@ $ cat pod.json | kubectl create -f - * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469492371 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.152429973 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_create.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_delete.md b/docs/user-guide/kubectl/kubectl_delete.md index 18fc8061ca9..fb8a25085e4 100644 --- a/docs/user-guide/kubectl/kubectl_delete.md +++ b/docs/user-guide/kubectl/kubectl_delete.md @@ -119,7 +119,7 @@ $ kubectl delete pods --all * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470182255 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153952299 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_delete.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_describe.md b/docs/user-guide/kubectl/kubectl_describe.md index 5fa18144d32..8337c2ddbd6 100644 --- a/docs/user-guide/kubectl/kubectl_describe.md +++ b/docs/user-guide/kubectl/kubectl_describe.md @@ -51,7 +51,7 @@ exists, it will output details for every resource that has a name prefixed with Possible resource types include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), -namespaces (ns) or secrets. +namespaces (ns), serviceaccounts or secrets. ``` kubectl describe (-f FILENAME | TYPE [NAME_PREFIX | -l label] | TYPE/NAME) @@ -119,7 +119,7 @@ $ kubectl describe pods frontend * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469291072 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.152057668 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_describe.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_exec.md b/docs/user-guide/kubectl/kubectl_exec.md index a1471e6f347..0324769af11 100644 --- a/docs/user-guide/kubectl/kubectl_exec.md +++ b/docs/user-guide/kubectl/kubectl_exec.md @@ -99,7 +99,7 @@ $ kubectl exec 123456-7890 -c ruby-container -i -t -- bash -il * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471517301 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156052759 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_exec.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_expose.md b/docs/user-guide/kubectl/kubectl_expose.md index 397de0db7a2..5d1e82a957c 100644 --- a/docs/user-guide/kubectl/kubectl_expose.md +++ b/docs/user-guide/kubectl/kubectl_expose.md @@ -121,7 +121,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 09:05:42.928698484 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.159044239 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_get.md b/docs/user-guide/kubectl/kubectl_get.md index 659f6ce7b78..a62e562d6c6 100644 --- a/docs/user-guide/kubectl/kubectl_get.md +++ b/docs/user-guide/kubectl/kubectl_get.md @@ -43,7 +43,7 @@ Display one or many resources. Possible resource types include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets. +resourcequotas (quota), namespaces (ns), endpoints (ep), serviceaccounts or secrets. By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s). @@ -132,7 +132,7 @@ $ kubectl get rc/web service/frontend pods/web-pod-13je7 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.761418557 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.151532564 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_get.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_label.md b/docs/user-guide/kubectl/kubectl_label.md index 1b0ee3119bc..1a0a3fbdc6c 100644 --- a/docs/user-guide/kubectl/kubectl_label.md +++ b/docs/user-guide/kubectl/kubectl_label.md @@ -120,7 +120,7 @@ $ kubectl label pods foo bar- * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.773776248 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.160594172 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_label.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_logs.md b/docs/user-guide/kubectl/kubectl_logs.md index 79ac2d3b0b6..0a28240154b 100644 --- a/docs/user-guide/kubectl/kubectl_logs.md +++ b/docs/user-guide/kubectl/kubectl_logs.md @@ -98,7 +98,7 @@ $ kubectl logs -f 123456-7890 ruby-container * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470591683 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154570214 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_logs.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_namespace.md b/docs/user-guide/kubectl/kubectl_namespace.md index 3e686d46f00..14dbd6aee2b 100644 --- a/docs/user-guide/kubectl/kubectl_namespace.md +++ b/docs/user-guide/kubectl/kubectl_namespace.md @@ -79,7 +79,7 @@ kubectl namespace [namespace] * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.470380367 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154262869 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_namespace.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_patch.md b/docs/user-guide/kubectl/kubectl_patch.md index aee0b3a18be..7c6b57d6c46 100644 --- a/docs/user-guide/kubectl/kubectl_patch.md +++ b/docs/user-guide/kubectl/kubectl_patch.md @@ -102,7 +102,7 @@ kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469927571 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153568922 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_patch.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_port-forward.md b/docs/user-guide/kubectl/kubectl_port-forward.md index ee8771f2a6a..189acda0fc5 100644 --- a/docs/user-guide/kubectl/kubectl_port-forward.md +++ b/docs/user-guide/kubectl/kubectl_port-forward.md @@ -99,7 +99,7 @@ $ kubectl port-forward mypod 0:5000 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471732563 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156433376 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_port-forward.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_proxy.md b/docs/user-guide/kubectl/kubectl_proxy.md index 131d1c3060a..2be1e5f938b 100644 --- a/docs/user-guide/kubectl/kubectl_proxy.md +++ b/docs/user-guide/kubectl/kubectl_proxy.md @@ -121,7 +121,7 @@ $ kubectl proxy --api-prefix=/k8s-api * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.472010935 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.156927042 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_proxy.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_replace.md b/docs/user-guide/kubectl/kubectl_replace.md index ad65549704d..96a15b17b9e 100644 --- a/docs/user-guide/kubectl/kubectl_replace.md +++ b/docs/user-guide/kubectl/kubectl_replace.md @@ -110,7 +110,7 @@ kubectl replace --force -f ./pod.json * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.469727962 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.153166598 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_replace.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_rolling-update.md b/docs/user-guide/kubectl/kubectl_rolling-update.md index 26aea54d971..751c939335f 100644 --- a/docs/user-guide/kubectl/kubectl_rolling-update.md +++ b/docs/user-guide/kubectl/kubectl_rolling-update.md @@ -118,7 +118,7 @@ $ kubectl rolling-update frontend --image=image:v2 * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-08-29 13:01:26.768458355 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.154895732 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_rolling-update.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_run.md b/docs/user-guide/kubectl/kubectl_run.md index c664afd417c..7a8cf4da6a9 100644 --- a/docs/user-guide/kubectl/kubectl_run.md +++ b/docs/user-guide/kubectl/kubectl_run.md @@ -133,7 +133,7 @@ $ kubectl run nginx --image=nginx --command -- ... * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-07 06:40:12.142439604 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.15783835 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_run.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_scale.md b/docs/user-guide/kubectl/kubectl_scale.md index 068cbed9df0..1589bcf8157 100644 --- a/docs/user-guide/kubectl/kubectl_scale.md +++ b/docs/user-guide/kubectl/kubectl_scale.md @@ -108,7 +108,7 @@ $ kubectl scale --replicas=5 rc/foo rc/bar * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.471116954 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.155304524 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_scale.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_stop.md b/docs/user-guide/kubectl/kubectl_stop.md index 33c5fe100c9..29bf099afa8 100644 --- a/docs/user-guide/kubectl/kubectl_stop.md +++ b/docs/user-guide/kubectl/kubectl_stop.md @@ -110,7 +110,7 @@ $ kubectl stop -f path/to/resources * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.47250815 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.158360787 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_stop.md?pixel)]() diff --git a/docs/user-guide/kubectl/kubectl_version.md b/docs/user-guide/kubectl/kubectl_version.md index b5743a53c9a..1b1d2595079 100644 --- a/docs/user-guide/kubectl/kubectl_version.md +++ b/docs/user-guide/kubectl/kubectl_version.md @@ -82,7 +82,7 @@ kubectl version * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-09-03 21:06:22.476464324 +0000 UTC +###### Auto generated by spf13/cobra at 2015-09-10 18:53:03.164581808 +0000 UTC [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_version.md?pixel)]() diff --git a/docs/user-guide/logging.md b/docs/user-guide/logging.md index 49af4f527d6..c48f9ea7c5b 100644 --- a/docs/user-guide/logging.md +++ b/docs/user-guide/logging.md @@ -58,7 +58,7 @@ spec: 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] ``` -[Download example](../../examples/blog-logging/counter-pod.yaml) +[Download example](../../examples/blog-logging/counter-pod.yaml?raw=true) we can run the pod: diff --git a/docs/user-guide/services.md b/docs/user-guide/services.md index 2badff776ae..53a328e3c47 100644 --- a/docs/user-guide/services.md +++ b/docs/user-guide/services.md @@ -219,7 +219,7 @@ appropriate backend without the clients knowing anything about Kubernetes or ![Services overview diagram](services-overview.png) -By default, the choice of backend is random. Client-IP based session affinity +By default, the choice of backend is round robin. Client-IP based session affinity can be selected by setting `service.spec.sessionAffinity` to `"ClientIP"` (the default is `"None"`). diff --git a/docs/user-guide/simple-yaml.md b/docs/user-guide/simple-yaml.md index cf5f0c4a1fa..aa1bf743421 100644 --- a/docs/user-guide/simple-yaml.md +++ b/docs/user-guide/simple-yaml.md @@ -64,7 +64,7 @@ spec: - containerPort: 80 ``` -[Download example](pod.yaml) +[Download example](pod.yaml?raw=true) You can see your cluster's pods: @@ -116,7 +116,7 @@ spec: - containerPort: 80 ``` -[Download example](replication.yaml) +[Download example](replication.yaml?raw=true) To delete the replication controller (and the pods it created): diff --git a/docs/user-guide/walkthrough/README.md b/docs/user-guide/walkthrough/README.md index 0740e1674f2..e5fe13d13cb 100644 --- a/docs/user-guide/walkthrough/README.md +++ b/docs/user-guide/walkthrough/README.md @@ -165,7 +165,7 @@ spec: emptyDir: {} ``` -[Download example](pod-redis.yaml) +[Download example](pod-redis.yaml?raw=true) Notes: diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index 24f9c1d8001..229eef033d8 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -86,7 +86,7 @@ spec: - containerPort: 80 ``` -[Download example](pod-nginx-with-label.yaml) +[Download example](pod-nginx-with-label.yaml?raw=true) Create the labeled pod ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)): @@ -142,7 +142,7 @@ spec: - containerPort: 80 ``` -[Download example](replication-controller.yaml) +[Download example](replication-controller.yaml?raw=true) #### Replication Controller Management @@ -195,7 +195,7 @@ spec: app: nginx ``` -[Download example](service.yaml) +[Download example](service.yaml?raw=true) #### Service Management @@ -311,7 +311,7 @@ spec: - containerPort: 80 ``` -[Download example](pod-with-http-healthcheck.yaml) +[Download example](pod-with-http-healthcheck.yaml?raw=true) For more information about health checking, see [Container Probes](../pod-states.md#container-probes). diff --git a/examples/cassandra/README.md b/examples/cassandra/README.md index ff51ddd499f..7adb5c84aea 100644 --- a/examples/cassandra/README.md +++ b/examples/cassandra/README.md @@ -100,7 +100,7 @@ spec: emptyDir: {} ``` -[Download example](cassandra-controller.yaml) +[Download example](cassandra-controller.yaml?raw=true) There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later) @@ -131,7 +131,7 @@ spec: name: cassandra ``` -[Download example](cassandra-service.yaml) +[Download example](cassandra-service.yaml?raw=true) The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. @@ -241,7 +241,7 @@ spec: emptyDir: {} ``` -[Download example](cassandra-controller.yaml) +[Download example](cassandra-controller.yaml?raw=true) Most of this replication controller definition is identical to the Cassandra pod definition above, it simply gives the replication controller a recipe to use when it creates new Cassandra pods. The other differentiating parts are the ```selector``` attribute which contains the controller's selector query, and the ```replicas``` attribute which specifies the desired number of replicas, in this case 1. diff --git a/examples/celery-rabbitmq/README.md b/examples/celery-rabbitmq/README.md index 04cc646a53f..cc2a4db85c5 100644 --- a/examples/celery-rabbitmq/README.md +++ b/examples/celery-rabbitmq/README.md @@ -81,7 +81,7 @@ spec: component: rabbitmq ``` -[Download example](rabbitmq-service.yaml) +[Download example](rabbitmq-service.yaml?raw=true) To start the service, run: @@ -126,7 +126,7 @@ spec: cpu: 100m ``` -[Download example](rabbitmq-controller.yaml) +[Download example](rabbitmq-controller.yaml?raw=true) Running `$ kubectl create -f examples/celery-rabbitmq/rabbitmq-controller.yaml` brings up a replication controller that ensures one pod exists which is running a RabbitMQ instance. @@ -167,7 +167,7 @@ spec: cpu: 100m ``` -[Download example](celery-controller.yaml) +[Download example](celery-controller.yaml?raw=true) There are several things to point out here... @@ -238,7 +238,7 @@ spec: type: LoadBalancer ``` -[Download example](flower-service.yaml) +[Download example](flower-service.yaml?raw=true) It is marked as external (LoadBalanced). However on many platforms you will have to add an explicit firewall rule to open port 5555. @@ -279,7 +279,7 @@ spec: cpu: 100m ``` -[Download example](flower-controller.yaml) +[Download example](flower-controller.yaml?raw=true) This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed through the service endpoint. This image uses the following command to start Flower: diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index f6e71023705..c4df7774807 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -100,7 +100,7 @@ spec: - containerPort: 6379 ``` -[Download example](redis-master-controller.yaml) +[Download example](redis-master-controller.yaml?raw=true) Change to the `/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running: @@ -227,7 +227,7 @@ spec: name: redis-master ``` -[Download example](redis-master-service.yaml) +[Download example](redis-master-service.yaml?raw=true) Create the service by running: @@ -316,7 +316,7 @@ spec: - containerPort: 6379 ``` -[Download example](redis-slave-controller.yaml) +[Download example](redis-slave-controller.yaml?raw=true) and create the replication controller by running: @@ -328,7 +328,7 @@ replicationcontrollers/redis-slave $ kubectl get rc CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS redis-master master redis name=redis-master 1 -redis-slave slave kubernetes/redis-slave:v2 name=redis-slave 2 +redis-slave slave gcr.io/google_samples/gb-redisslave:v1 name=redis-slave 2 ``` Once the replication controller is up, you can list the pods in the cluster, to verify that the master and slaves are running. You should see a list that includes something like the following: @@ -367,7 +367,7 @@ spec: name: redis-slave ``` -[Download example](redis-slave-service.yaml) +[Download example](redis-slave-service.yaml?raw=true) This time the selector for the service is `name=redis-slave`, because that identifies the pods running redis slaves. It may also be helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "label=value"` command. @@ -413,7 +413,7 @@ spec: spec: containers: - name: php-redis - image: gcr.io/google_samples/gb-frontend:v2 + image: gcr.io/google_samples/gb-frontend:v3 env: - name: GET_HOSTS_FROM value: dns @@ -426,7 +426,7 @@ spec: - containerPort: 80 ``` -[Download example](frontend-controller.yaml) +[Download example](frontend-controller.yaml?raw=true) Using this file, you can turn up your frontend with: @@ -441,9 +441,9 @@ Then, list all your replication controllers: ```console $ kubectl get rc CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 3 +frontend php-redis kubernetes/example-guestbook-php-redis:v3 name=frontend 3 redis-master master redis name=redis-master 1 -redis-slave slave kubernetes/redis-slave:v2 name=redis-slave 2 +redis-slave slave gcr.io/google_samples/gb-redisslave:v1 name=redis-slave 2 ``` Once it's up (again, it may take up to thirty seconds to create the pods) you can list the pods in the cluster, to verify that the master, slaves and frontends are all running. You should see a list that includes something like the following: @@ -539,7 +539,7 @@ spec: name: frontend ``` -[Download example](frontend-service.yaml) +[Download example](frontend-service.yaml?raw=true) #### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) diff --git a/examples/guestbook/frontend-controller.yaml b/examples/guestbook/frontend-controller.yaml index ae8d24986bc..1a48f95b346 100644 --- a/examples/guestbook/frontend-controller.yaml +++ b/examples/guestbook/frontend-controller.yaml @@ -15,7 +15,7 @@ spec: spec: containers: - name: php-redis - image: gcr.io/google_samples/gb-frontend:v2 + image: gcr.io/google_samples/gb-frontend:v3 env: - name: GET_HOSTS_FROM value: dns diff --git a/examples/hazelcast/README.md b/examples/hazelcast/README.md index 5ae17f5c696..269755f45f2 100644 --- a/examples/hazelcast/README.md +++ b/examples/hazelcast/README.md @@ -83,7 +83,7 @@ spec: name: hazelcast ``` -[Download example](hazelcast-service.yaml) +[Download example](hazelcast-service.yaml?raw=true) The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. @@ -138,7 +138,7 @@ spec: name: hazelcast ``` -[Download example](hazelcast-controller.yaml) +[Download example](hazelcast-controller.yaml?raw=true) There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingly. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later). diff --git a/examples/mysql-wordpress-pd/README.md b/examples/mysql-wordpress-pd/README.md index 7a496eeedc4..34d2fd1a3da 100644 --- a/examples/mysql-wordpress-pd/README.md +++ b/examples/mysql-wordpress-pd/README.md @@ -131,7 +131,7 @@ spec: fsType: ext4 ``` -[Download example](mysql.yaml) +[Download example](mysql.yaml?raw=true) Note that we've defined a volume mount for `/var/lib/mysql`, and specified a volume that uses the persistent disk (`mysql-disk`) that you created. @@ -186,7 +186,7 @@ spec: name: mysql ``` -[Download example](mysql-service.yaml) +[Download example](mysql-service.yaml?raw=true) Start the service like this: @@ -241,7 +241,7 @@ spec: fsType: ext4 ``` -[Download example](wordpress.yaml) +[Download example](wordpress.yaml?raw=true) Create the pod: @@ -282,7 +282,7 @@ spec: type: LoadBalancer ``` -[Download example](wordpress-service.yaml) +[Download example](wordpress-service.yaml?raw=true) Note the `type: LoadBalancer` setting. This will set up the wordpress service behind an external IP. diff --git a/examples/phabricator/README.md b/examples/phabricator/README.md index 4aa15260ab4..d9fca4bbe16 100644 --- a/examples/phabricator/README.md +++ b/examples/phabricator/README.md @@ -98,7 +98,7 @@ To start Phabricator server use the file [`examples/phabricator/phabricator-cont } ``` -[Download example](phabricator-controller.json) +[Download example](phabricator-controller.json?raw=true) Create the phabricator pod in your Kubernetes cluster by running: @@ -188,7 +188,7 @@ To automate this process and make sure that a proper host is authorized even if } ``` -[Download example](authenticator-controller.json) +[Download example](authenticator-controller.json?raw=true) To create the pod run: @@ -237,7 +237,7 @@ Use the file [`examples/phabricator/phabricator-service.json`](phabricator-servi } ``` -[Download example](phabricator-service.json) +[Download example](phabricator-service.json?raw=true) To create the service run: diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 56f8b8e6728..bec74401388 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -138,6 +138,7 @@ GCE_PARALLEL_FLAKY_TESTS=( "Services.*identically\snamed" "Services.*release.*load\sbalancer" "Services.*endpoint" + "Services.*up\sand\sdown" ) # Tests that should not run on soak cluster. diff --git a/hack/local-up-cluster.sh b/hack/local-up-cluster.sh index 8065e692026..dda072a8360 100755 --- a/hack/local-up-cluster.sh +++ b/hack/local-up-cluster.sh @@ -212,7 +212,7 @@ function start_apiserver { fi runtime_config="" if [[ -n "${RUNTIME_CONFIG}" ]]; then - runtime_config="--runtime-config=\"${RUNTIME_CONFIG}\"" + runtime_config="--runtime-config=${RUNTIME_CONFIG}" fi APISERVER_LOG=/tmp/kube-apiserver.log diff --git a/hack/verify-flags/exceptions.txt b/hack/verify-flags/exceptions.txt index 5036ad26938..a8816eff7e5 100644 --- a/hack/verify-flags/exceptions.txt +++ b/hack/verify-flags/exceptions.txt @@ -69,7 +69,7 @@ examples/elasticsearch/production_cluster/README.md: "cluster_name" : "myesdb", examples/elasticsearch/production_cluster/README.md: "cluster_name" : "myesdb", hack/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]} hack/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]} -hack/local-up-cluster.sh: runtime_config="--runtime-config=\"${RUNTIME_CONFIG}\"" +hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}" hack/local-up-cluster.sh: runtime_config="" pkg/cloudprovider/providers/vagrant/vagrant_test.go: testSaltMinionsResponse = []byte(`{ "return": [{"kubernetes-minion-1": {"kernel": "Linux", "domain": "", "zmqversion": "3.2.4", "kernelrelease": "3.11.10-301.fc20.x86_64", "pythonpath": ["/usr/bin", "/usr/lib64/python27.zip", "/usr/lib64/python2.7", "/usr/lib64/python2.7/plat-linux2", "/usr/lib64/python2.7/lib-tk", "/usr/lib64/python2.7/lib-old", "/usr/lib64/python2.7/lib-dynload", "/usr/lib64/python2.7/site-packages", "/usr/lib/python2.7/site-packages"], "etcd_servers": "10.245.1.2", "ip_interfaces": {"lo": ["127.0.0.1"], "docker0": ["172.17.42.1"], "enp0s8": ["10.245.2.2"], "p2p1": ["10.0.2.15"]}, "shell": "/bin/sh", "mem_total": 491, "saltversioninfo": [2014, 1, 7], "osmajorrelease": ["20"], "node_ip": "10.245.2.2", "id": "kubernetes-minion-1", "osrelease": "20", "ps": "ps -efH", "server_id": 1005530826, "num_cpus": 1, "hwaddr_interfaces": {"lo": "00:00:00:00:00:00", "docker0": "56:84:7a:fe:97:99", "enp0s8": "08:00:27:17:c5:0f", "p2p1": "08:00:27:96:96:e1"}, "virtual": "VirtualBox", "osfullname": "Fedora", "master": "kubernetes-master", "ipv4": ["10.0.2.15", "10.245.2.2", "127.0.0.1", "172.17.42.1"], "ipv6": ["::1", "fe80::a00:27ff:fe17:c50f", "fe80::a00:27ff:fe96:96e1"], "cpu_flags": ["fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "mmx", "fxsr", "sse", "sse2", "syscall", "nx", "rdtscp", "lm", "constant_tsc", "rep_good", "nopl", "pni", "monitor", "ssse3", "lahf_lm"], "localhost": "kubernetes-minion-1", "lsb_distrib_id": "Fedora", "fqdn_ip4": ["127.0.0.1"], "fqdn_ip6": [], "nodename": "kubernetes-minion-1", "saltversion": "2014.1.7", "saltpath": "/usr/lib/python2.7/site-packages/salt", "pythonversion": [2, 7, 5, "final", 0], "host": "kubernetes-minion-1", "os_family": "RedHat", "oscodename": "Heisenbug", "defaultencoding": "UTF-8", "osfinger": "Fedora-20", "roles": ["kubernetes-pool"], "num_gpus": 1, "cpu_model": "Intel(R) Core(TM) i7-4600U CPU @ 2.10GHz", "fqdn": "kubernetes-minion-1", "osarch": "x86_64", "cpuarch": "x86_64", "gpus": [{"model": "VirtualBox Graphics Adapter", "vendor": "unknown"}], "path": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin", "os": "Fedora", "defaultlanguage": "en_US"}}]}`) pkg/kubelet/qos/memory_policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOomScoreAdj, test.highOomScoreAdj, oomScoreAdj) diff --git a/hack/verify-flags/known-flags.txt b/hack/verify-flags/known-flags.txt index 32ac4143353..9fe4e91dc48 100644 --- a/hack/verify-flags/known-flags.txt +++ b/hack/verify-flags/known-flags.txt @@ -1,269 +1,274 @@ -accept-hosts -accept-paths -account-for-pod-resources -admission-control -admission-control-config-file -advertise-address -advertised-address -algorithm-provider -all-namespaces -allocate-node-cidrs -allow-privileged -api-prefix -api-servers -api-token -api-version -authorization-mode -authorization-policy-file -auth-path -basic-auth-file -bench-pods -bench-quiet -bench-tasks -bench-workers -bind-address -bind-pods-burst -bind-pods-qps -cadvisor-port -cert-dir -certificate-authority -cgroup-root -chaos-chance -cleanup-iptables -client-ca-file -client-certificate -client-key -cloud-config -cloud-provider -cluster-cidr -cluster-dns -cluster-domain -cluster-name -cluster-tag -concurrent-endpoint-syncs -configure-cbr0 -contain-pod-resources -container-port -container-runtime -cors-allowed-origins -create-external-load-balancer -current-release-pr -current-replicas -default-container-cpu-limit -default-container-mem-limit -delay-shutdown -deleting-pods-burst -deleting-pods-qps -deployment-label-key -dest-file -disable-filter -docker-endpoint -docker-exec-handler -dockercfg-path -driver-port -dry-run -duration-sec -e2e-output-dir -enable-debugging-handlers -enable-horizontal-pod-autoscaler -enable-server -etcd-config -etcd-prefix -etcd-server -etcd-servers -event-ttl -executor-bindall -executor-logv -executor-path -executor-suicide-timeout -experimental-keystone-url -experimental-prefix -external-hostname -external-ip -failover-timeout -file-check-frequency -file-suffix -forward-services -framework-name -framework-weburi -func-dest -fuzz-iters -gce-project -gce-zone -gke-cluster -google-json-key -grace-period -ha-domain -healthz-bind-address -healthz-port -horizontal-pod-autoscaler-sync-period -hostname-override -host-network-sources -http-check-frequency -http-port -ignore-not-found -image-gc-high-threshold -image-gc-low-threshold -insecure-bind-address -insecure-port -insecure-skip-tls-verify -iptables-sync-period -ir-data-source -ir-dbname -ir-influxdb-host -ir-password -ir-user -jenkins-host -jenkins-jobs -km-path -kubectl-path -kubelet-cadvisor-port -kubelet-certificate-authority -kubelet-client-certificate -kubelet-client-key -kubelet-docker-endpoint -kubelet-host-network-sources -kubelet-https -kubelet-network-plugin -kubelet-pod-infra-container-image -kubelet-port -kubelet-root-dir -kubelet-sync-frequency -kubelet-timeout -kube-master -label-columns -last-release-pr -legacy-userspace-proxy -log-flush-frequency -long-running-request-regexp -low-diskspace-threshold-mb -manifest-url -manifest-url-header -masquerade-all -master-service-namespace -max-concurrency -max-connection-bytes-per-sec -maximum-dead-containers -maximum-dead-containers-per-container -max-log-age -max-log-backups -max-log-size -max-outgoing-burst -max-outgoing-qps -max-pods -max-requests-inflight -mesos-authentication-principal -mesos-authentication-provider -mesos-authentication-secret-file -mesos-cgroup-prefix -mesos-executor-cpus -mesos-executor-mem -mesos-master -mesos-role -mesos-user -minimum-container-ttl-duration -minion-max-log-age -minion-max-log-backups -minion-max-log-size -minion-path-override -min-pr-number -min-request-timeout -namespace-sync-period -network-plugin -network-plugin-dir -node-instance-group -node-monitor-grace-period -node-monitor-period -node-startup-grace-period -node-status-update-frequency -node-sync-period -no-headers -num-nodes -oidc-ca-file -oidc-client-id -oidc-issuer-url -oidc-username-claim -oom-score-adj -output-version -out-version -path-override -pod-cidr -pod-eviction-timeout -pod-infra-container-image -policy-config-file -poll-interval -portal-net -private-mountns -prom-push-gateway -proxy-bindall -proxy-logv -proxy-port-range -public-address-override -pvclaimbinder-sync-period -read-only-port -really-crash-for-testing -reconcile-cooldown -reconcile-interval -register-node -register-retry-count -registry-burst -registry-qps -reject-methods -reject-paths -repo-root -report-dir -required-contexts -resolv-conf -resource-container -resource-quota-sync-period -resource-version -rkt-path -root-ca-file -root-dir -run-proxy -runtime-config -scheduler-config -secure-port -service-account-key-file -service-account-lookup -service-account-private-key-file -service-address -service-cluster-ip-range -service-node-port-range -service-node-ports -service-sync-period -session-affinity -show-all -shutdown-fd -shutdown-fifo -skip-munges -sort-by -source-file -ssh-keyfile -ssh-user -static-pods-config -stats-port -storage-version -streaming-connection-idle-timeout -suicide-timeout -sync-frequency -system-container -target-port -tcp-services -tls-cert-file -tls-private-key-file -token-auth-file -ttl-secs -type-src -unix-socket -update-period -upgrade-target -use-kubernetes-cluster-service -user-whitelist -watch-cache -watch-only -whitelist-override-label -www-prefix -retry_time -file_content_in_loop -cpu-cfs-quota +accept-hosts +accept-paths +account-for-pod-resources +admission-control +admission-control-config-file +advertise-address +advertised-address +algorithm-provider +all-namespaces +allocate-node-cidrs +allow-privileged +api-burst +api-prefix +api-rate +api-servers +api-token +api-version +authorization-mode +authorization-policy-file +auth-path +basic-auth-file +bench-pods +bench-quiet +bench-tasks +bench-workers +bind-address +bind-pods-burst +bind-pods-qps +cadvisor-port +cert-dir +certificate-authority +cgroup-root +chaos-chance +cleanup-iptables +client-ca-file +client-certificate +client-key +cloud-config +cloud-provider +cluster-cidr +cluster-dns +cluster-domain +cluster-name +cluster-tag +concurrent-endpoint-syncs +configure-cbr0 +contain-pod-resources +container-port +container-runtime +cors-allowed-origins +create-external-load-balancer +current-release-pr +current-replicas +default-container-cpu-limit +default-container-mem-limit +delay-shutdown +deleting-pods-burst +deleting-pods-qps +deployment-label-key +dest-file +disable-filter +docker-endpoint +docker-exec-handler +dockercfg-path +driver-port +dry-run +duration-sec +e2e-output-dir +enable-debugging-handlers +enable-horizontal-pod-autoscaler +enable-server +etcd-config +etcd-prefix +etcd-server +etcd-servers +event-burst +event-qps +event-ttl +executor-bindall +executor-logv +executor-path +executor-suicide-timeout +experimental-keystone-url +experimental-prefix +external-hostname +external-ip +failover-timeout +file-check-frequency +file-suffix +forward-services +framework-name +framework-weburi +func-dest +fuzz-iters +gce-project +gce-zone +gke-cluster +google-json-key +grace-period +ha-domain +healthz-bind-address +healthz-port +horizontal-pod-autoscaler-sync-period +hostname-override +host-network-sources +http-check-frequency +http-port +ignore-not-found +image-gc-high-threshold +image-gc-low-threshold +insecure-bind-address +insecure-port +insecure-skip-tls-verify +iptables-sync-period +ir-data-source +ir-dbname +ir-influxdb-host +ir-password +ir-user +jenkins-host +jenkins-jobs +km-path +kubectl-path +kubelet-cadvisor-port +kubelet-certificate-authority +kubelet-client-certificate +kubelet-client-key +kubelet-docker-endpoint +kubelet-host-network-sources +kubelet-https +kubelet-network-plugin +kubelet-pod-infra-container-image +kubelet-port +kubelet-root-dir +kubelet-sync-frequency +kubelet-timeout +kube-master +label-columns +last-release-pr +legacy-userspace-proxy +log-flush-frequency +long-running-request-regexp +low-diskspace-threshold-mb +manifest-url +manifest-url-header +masquerade-all +master-service-namespace +max-concurrency +max-connection-bytes-per-sec +maximum-dead-containers +maximum-dead-containers-per-container +max-log-age +max-log-backups +max-log-size +max-outgoing-burst +max-outgoing-qps +max-pods +max-requests-inflight +mesos-authentication-principal +mesos-authentication-provider +mesos-authentication-secret-file +mesos-cgroup-prefix +mesos-executor-cpus +mesos-executor-mem +mesos-master +mesos-role +mesos-user +minimum-container-ttl-duration +minion-max-log-age +minion-max-log-backups +minion-max-log-size +minion-path-override +min-pr-number +min-request-timeout +namespace-sync-period +network-plugin +network-plugin-dir +node-instance-group +node-monitor-grace-period +node-monitor-period +node-startup-grace-period +node-status-update-frequency +node-sync-period +no-headers +num-nodes +oidc-ca-file +oidc-client-id +oidc-issuer-url +oidc-username-claim +oom-score-adj +output-version +out-version +path-override +pod-cidr +pod-eviction-timeout +pod-infra-container-image +pod-running +policy-config-file +poll-interval +portal-net +private-mountns +prom-push-gateway +proxy-bindall +proxy-logv +proxy-port-range +public-address-override +pvclaimbinder-sync-period +read-only-port +really-crash-for-testing +reconcile-cooldown +reconcile-interval +register-node +register-retry-count +registry-burst +registry-qps +reject-methods +reject-paths +repo-root +report-dir +required-contexts +resolv-conf +resource-container +resource-quota-sync-period +resource-version +rkt-path +root-ca-file +root-dir +run-proxy +runtime-config +scheduler-config +secure-port +service-account-key-file +service-account-lookup +service-account-private-key-file +service-address +service-cluster-ip-range +service-node-port-range +service-node-ports +service-sync-period +session-affinity +show-all +shutdown-fd +shutdown-fifo +skip-munges +sort-by +source-file +ssh-keyfile +ssh-user +static-pods-config +stats-port +storage-version +streaming-connection-idle-timeout +suicide-timeout +sync-frequency +system-container +target-port +tcp-services +tls-cert-file +tls-private-key-file +token-auth-file +ttl-secs +type-src +unix-socket +update-period +upgrade-target +use-kubernetes-cluster-service +user-whitelist +watch-cache +watch-only +whitelist-override-label +www-prefix +retry_time +file_content_in_loop +cpu-cfs-quota diff --git a/pkg/admission/handler.go b/pkg/admission/handler.go index fe79285fd6a..a0d26c46971 100644 --- a/pkg/admission/handler.go +++ b/pkg/admission/handler.go @@ -17,13 +17,13 @@ limitations under the License. package admission import ( - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // Handler is a base for admission control handlers that // support a predefined set of operations type Handler struct { - operations util.StringSet + operations sets.String } // Handles returns true for methods that this handler supports @@ -34,7 +34,7 @@ func (h *Handler) Handles(operation Operation) bool { // NewHandler creates a new base handler that handles the passed // in operations func NewHandler(ops ...Operation) *Handler { - operations := util.NewStringSet() + operations := sets.NewString() for _, op := range ops { operations.Insert(string(op)) } diff --git a/pkg/admission/plugins.go b/pkg/admission/plugins.go index 7824396e619..3c9e9f1bc0d 100644 --- a/pkg/admission/plugins.go +++ b/pkg/admission/plugins.go @@ -37,7 +37,7 @@ var ( plugins = make(map[string]Factory) ) -// GetPlugins enumerates the +// GetPlugins enumerates the names of all registered plugins. func GetPlugins() []string { pluginsMutex.Lock() defer pluginsMutex.Unlock() @@ -48,7 +48,7 @@ func GetPlugins() []string { return keys } -// RegisterPlugin registers a plugin Factory by name. This +// RegisterPlugin registers a plugin Factory by name. This // is expected to happen during app startup. func RegisterPlugin(name string, plugin Factory) { pluginsMutex.Lock() @@ -61,11 +61,10 @@ func RegisterPlugin(name string, plugin Factory) { plugins[name] = plugin } -// GetPlugin creates an instance of the named plugin, or nil if -// the name is not known. The error return is only used if the named provider -// was known but failed to initialize. The config parameter specifies the -// io.Reader handler of the configuration file for the cloud provider, or nil -// for no configuration. +// GetPlugin creates an instance of the named plugin, or nil if the name is not +// known. The error is returned only when the named provider was known but failed +// to initialize. The config parameter specifies the io.Reader handler of the +// configuration file for the cloud provider, or nil for no configuration. func GetPlugin(name string, client client.Interface, config io.Reader) (Interface, error) { pluginsMutex.Lock() defer pluginsMutex.Unlock() diff --git a/pkg/api/helpers.go b/pkg/api/helpers.go index 56e9836e306..931ff3b84b1 100644 --- a/pkg/api/helpers.go +++ b/pkg/api/helpers.go @@ -28,6 +28,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "github.com/davecgh/go-spew/spew" ) @@ -77,7 +78,7 @@ var Semantic = conversion.EqualitiesOrDie( }, ) -var standardResources = util.NewStringSet( +var standardResources = sets.NewString( string(ResourceMemory), string(ResourceCPU), string(ResourcePods), @@ -111,7 +112,7 @@ func IsServiceIPRequested(service *Service) bool { return service.Spec.ClusterIP == "" } -var standardFinalizers = util.NewStringSet( +var standardFinalizers = sets.NewString( string(FinalizerKubernetes)) func IsStandardFinalizerName(str string) bool { diff --git a/pkg/api/latest/latest.go b/pkg/api/latest/latest.go index 39bd0a319a9..2e53b4f00f3 100644 --- a/pkg/api/latest/latest.go +++ b/pkg/api/latest/latest.go @@ -25,7 +25,7 @@ import ( "k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // Version is the string that represents the current external default version. @@ -79,7 +79,7 @@ func init() { // the list of kinds that are scoped at the root of the api hierarchy // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := util.NewStringSet( + rootScoped := sets.NewString( "Node", "Minion", "Namespace", @@ -87,7 +87,7 @@ func init() { ) // these kinds should be excluded from the list of resources - ignoredKinds := util.NewStringSet( + ignoredKinds := sets.NewString( "ListOptions", "DeleteOptions", "Status", diff --git a/pkg/api/mapper.go b/pkg/api/mapper.go index dba16d36af9..600973aa093 100644 --- a/pkg/api/mapper.go +++ b/pkg/api/mapper.go @@ -20,7 +20,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) var RESTMapper meta.RESTMapper @@ -34,7 +34,7 @@ func RegisterRESTMapper(m meta.RESTMapper) { } func NewDefaultRESTMapper(group string, versions []string, interfacesFunc meta.VersionInterfacesFunc, - importPathPrefix string, ignoredKinds, rootScoped util.StringSet) *meta.DefaultRESTMapper { + importPathPrefix string, ignoredKinds, rootScoped sets.String) *meta.DefaultRESTMapper { mapper := meta.NewDefaultRESTMapper(group, versions, interfacesFunc) // enumerate all supported versions, get the kinds, and register with the mapper how to address diff --git a/pkg/api/resource/quantity.go b/pkg/api/resource/quantity.go index ef8eaef9e6b..577d5b6093c 100644 --- a/pkg/api/resource/quantity.go +++ b/pkg/api/resource/quantity.go @@ -301,6 +301,27 @@ func (q *Quantity) String() string { return number + string(suffix) } +// Cmp compares q and y and returns: +// +// -1 if q < y +// 0 if q == y +// +1 if q > y +// +func (q *Quantity) Cmp(y Quantity) int { + num1 := q.Value() + num2 := y.Value() + if num1 < MaxMilliValue && num2 < MaxMilliValue { + num1 = q.MilliValue() + num2 = y.MilliValue() + } + if num1 < num2 { + return -1 + } else if num1 > num2 { + return 1 + } + return 0 +} + func (q *Quantity) Add(y Quantity) error { if q.Format != y.Format { return fmt.Errorf("format mismatch: %v vs. %v", q.Format, y.Format) diff --git a/pkg/api/resource/quantity_test.go b/pkg/api/resource/quantity_test.go index 70f4836bb1d..da8858ea97d 100644 --- a/pkg/api/resource/quantity_test.go +++ b/pkg/api/resource/quantity_test.go @@ -77,6 +77,26 @@ func TestQuantityCanocicalizeZero(t *testing.T) { } } +func TestQuantityCmp(t *testing.T) { + table := []struct { + x string + y string + expect int + }{ + {"0", "0", 0}, + {"100m", "50m", 1}, + {"50m", "100m", -1}, + {"10000T", "100Gi", 1}, + } + for _, testCase := range table { + q1 := MustParse(testCase.x) + q2 := MustParse(testCase.y) + if result := q1.Cmp(q2); result != testCase.expect { + t.Errorf("X: %v, Y: %v, Expected: %v, Actual: %v", testCase.x, testCase.y, testCase.expect, result) + } + } +} + func TestQuantityParse(t *testing.T) { table := []struct { input string diff --git a/pkg/api/serialization_test.go b/pkg/api/serialization_test.go index 686a33f63d9..beccf9604b5 100644 --- a/pkg/api/serialization_test.go +++ b/pkg/api/serialization_test.go @@ -31,6 +31,7 @@ import ( apitesting "k8s.io/kubernetes/pkg/api/testing" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" _ "k8s.io/kubernetes/pkg/expapi" _ "k8s.io/kubernetes/pkg/expapi/v1" @@ -87,7 +88,7 @@ func roundTrip(t *testing.T, codec runtime.Codec, item runtime.Object) { // roundTripSame verifies the same source object is tested in all API versions. func roundTripSame(t *testing.T, item runtime.Object, except ...string) { - set := util.NewStringSet(except...) + set := sets.NewString(except...) seed := rand.Int63() fuzzInternalObject(t, "", item, seed) version := testapi.Default.Version() @@ -119,8 +120,8 @@ func TestList(t *testing.T) { roundTripSame(t, item) } -var nonRoundTrippableTypes = util.NewStringSet() -var nonInternalRoundTrippableTypes = util.NewStringSet("List", "ListOptions", "PodExecOptions", "PodAttachOptions") +var nonRoundTrippableTypes = sets.NewString() +var nonInternalRoundTrippableTypes = sets.NewString("List", "ListOptions", "PodExecOptions", "PodAttachOptions") var nonRoundTrippableTypesByVersion = map[string][]string{} func TestRoundTripTypes(t *testing.T) { diff --git a/pkg/api/validation/validation.go b/pkg/api/validation/validation.go index 43ea1e7e1be..1829e7a8dbf 100644 --- a/pkg/api/validation/validation.go +++ b/pkg/api/validation/validation.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" errs "k8s.io/kubernetes/pkg/util/fielderrors" + "k8s.io/kubernetes/pkg/util/sets" "github.com/golang/glog" ) @@ -307,10 +308,10 @@ func ValidateObjectMetaUpdate(new, old *api.ObjectMeta) errs.ValidationErrorList return allErrs } -func validateVolumes(volumes []api.Volume) (util.StringSet, errs.ValidationErrorList) { +func validateVolumes(volumes []api.Volume) (sets.String, errs.ValidationErrorList) { allErrs := errs.ValidationErrorList{} - allNames := util.StringSet{} + allNames := sets.String{} for i, vol := range volumes { el := validateSource(&vol.VolumeSource).Prefix("source") if len(vol.Name) == 0 { @@ -497,7 +498,7 @@ func validateGlusterfs(glusterfs *api.GlusterfsVolumeSource) errs.ValidationErro return allErrs } -var validDownwardAPIFieldPathExpressions = util.NewStringSet("metadata.name", "metadata.namespace", "metadata.labels", "metadata.annotations") +var validDownwardAPIFieldPathExpressions = sets.NewString("metadata.name", "metadata.namespace", "metadata.labels", "metadata.annotations") func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} @@ -688,12 +689,12 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVol return allErrs } -var supportedPortProtocols = util.NewStringSet(string(api.ProtocolTCP), string(api.ProtocolUDP)) +var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP)) func validatePorts(ports []api.ContainerPort) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} - allNames := util.StringSet{} + allNames := sets.String{} for i, port := range ports { pErrs := errs.ValidationErrorList{} if len(port.Name) > 0 { @@ -739,7 +740,7 @@ func validateEnv(vars []api.EnvVar) errs.ValidationErrorList { return allErrs } -var validFieldPathExpressionsEnv = util.NewStringSet("metadata.name", "metadata.namespace", "status.podIP") +var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "status.podIP") func validateEnvVarValueFrom(ev api.EnvVar) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} @@ -763,7 +764,7 @@ func validateEnvVarValueFrom(ev api.EnvVar) errs.ValidationErrorList { return allErrs } -func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *util.StringSet) errs.ValidationErrorList { +func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if fs.APIVersion == "" { @@ -782,7 +783,7 @@ func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *util. return allErrs } -func validateVolumeMounts(mounts []api.VolumeMount, volumes util.StringSet) errs.ValidationErrorList { +func validateVolumeMounts(mounts []api.VolumeMount, volumes sets.String) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} for i, mnt := range mounts { @@ -818,7 +819,7 @@ func validateProbe(probe *api.Probe) errs.ValidationErrorList { // AccumulateUniqueHostPorts extracts each HostPort of each Container, // accumulating the results and returning an error if any ports conflict. -func AccumulateUniqueHostPorts(containers []api.Container, accumulator *util.StringSet) errs.ValidationErrorList { +func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} for ci, ctr := range containers { @@ -843,7 +844,7 @@ func AccumulateUniqueHostPorts(containers []api.Container, accumulator *util.Str // checkHostPortConflicts checks for colliding Port.HostPort values across // a slice of containers. func checkHostPortConflicts(containers []api.Container) errs.ValidationErrorList { - allPorts := util.StringSet{} + allPorts := sets.String{} return AccumulateUniqueHostPorts(containers, &allPorts) } @@ -865,7 +866,7 @@ func validateHTTPGetAction(http *api.HTTPGetAction) errs.ValidationErrorList { } else if http.Port.Kind == util.IntstrString && !util.IsValidPortName(http.Port.StrVal) { allErrors = append(allErrors, errs.NewFieldInvalid("port", http.Port.StrVal, portNameErrorMsg)) } - supportedSchemes := util.NewStringSet(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) + supportedSchemes := sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) if !supportedSchemes.Has(string(http.Scheme)) { allErrors = append(allErrors, errs.NewFieldInvalid("scheme", http.Scheme, fmt.Sprintf("must be one of %v", supportedSchemes.List()))) } @@ -930,14 +931,14 @@ func validatePullPolicy(ctr *api.Container) errs.ValidationErrorList { return allErrors } -func validateContainers(containers []api.Container, volumes util.StringSet) errs.ValidationErrorList { +func validateContainers(containers []api.Container, volumes sets.String) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if len(containers) == 0 { return append(allErrs, errs.NewFieldRequired("")) } - allNames := util.StringSet{} + allNames := sets.String{} for i, ctr := range containers { cErrs := errs.ValidationErrorList{} if len(ctr.Name) == 0 { @@ -1130,8 +1131,8 @@ func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) errs.ValidationE return allErrs } -var supportedSessionAffinityType = util.NewStringSet(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) -var supportedServiceType = util.NewStringSet(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), +var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) +var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), string(api.ServiceTypeLoadBalancer)) // ValidateService tests if required fields in the service are set. @@ -1150,7 +1151,7 @@ func ValidateService(service *api.Service) errs.ValidationErrorList { } } } - allPortNames := util.StringSet{} + allPortNames := sets.String{} for i := range service.Spec.Ports { allErrs = append(allErrs, validateServicePort(&service.Spec.Ports[i], len(service.Spec.Ports) > 1, &allPortNames).PrefixIndex(i).Prefix("spec.ports")...) } @@ -1220,7 +1221,7 @@ func ValidateService(service *api.Service) errs.ValidationErrorList { return allErrs } -func validateServicePort(sp *api.ServicePort, requireName bool, allNames *util.StringSet) errs.ValidationErrorList { +func validateServicePort(sp *api.ServicePort, requireName bool, allNames *sets.String) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} if requireName && sp.Name == "" { @@ -1441,80 +1442,76 @@ func ValidateLimitRange(limitRange *api.LimitRange) errs.ValidationErrorList { } limitTypeSet[limit.Type] = true - keys := util.StringSet{} - min := map[string]int64{} - max := map[string]int64{} - defaults := map[string]int64{} - defaultRequests := map[string]int64{} + keys := sets.String{} + min := map[string]resource.Quantity{} + max := map[string]resource.Quantity{} + defaults := map[string]resource.Quantity{} + defaultRequests := map[string]resource.Quantity{} - for k := range limit.Max { + for k, q := range limit.Max { allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].max[%s]", i, k))...) keys.Insert(string(k)) - q := limit.Max[k] - max[string(k)] = q.Value() + max[string(k)] = q } - for k := range limit.Min { + for k, q := range limit.Min { allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].min[%s]", i, k))...) keys.Insert(string(k)) - q := limit.Min[k] - min[string(k)] = q.Value() + min[string(k)] = q } - for k := range limit.Default { + for k, q := range limit.Default { allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].default[%s]", i, k))...) keys.Insert(string(k)) - q := limit.Default[k] - defaults[string(k)] = q.Value() + defaults[string(k)] = q } - for k := range limit.DefaultRequest { + for k, q := range limit.DefaultRequest { allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k))...) keys.Insert(string(k)) - q := limit.DefaultRequest[k] - defaultRequests[string(k)] = q.Value() + defaultRequests[string(k)] = q } for k := range limit.MaxLimitRequestRatio { allErrs = append(allErrs, validateResourceName(string(k), fmt.Sprintf("spec.limits[%d].maxLimitRequestRatio[%s]", i, k))...) } for k := range keys { - minValue, minValueFound := min[k] - maxValue, maxValueFound := max[k] - defaultValue, defaultValueFound := defaults[k] - defaultRequestValue, defaultRequestValueFound := defaultRequests[k] + minQuantity, minQuantityFound := min[k] + maxQuantity, maxQuantityFound := max[k] + defaultQuantity, defaultQuantityFound := defaults[k] + defaultRequestQuantity, defaultRequestQuantityFound := defaultRequests[k] - if minValueFound && maxValueFound && minValue > maxValue { + if minQuantityFound && maxQuantityFound && minQuantity.Cmp(maxQuantity) > 0 { minQuantity := limit.Min[api.ResourceName(k)] maxQuantity := limit.Max[api.ResourceName(k)] - allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].min[%s]", i, k), minValue, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) + allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].min[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than max value %s", minQuantity.String(), maxQuantity.String()))) } - if defaultRequestValueFound && minValueFound && minValue > defaultRequestValue { + if defaultRequestQuantityFound && minQuantityFound && minQuantity.Cmp(defaultRequestQuantity) > 0 { minQuantity := limit.Min[api.ResourceName(k)] defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)] - allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestValue, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) + allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("min value %s is greater than default request value %s", minQuantity.String(), defaultRequestQuantity.String()))) } - if defaultRequestValueFound && maxValueFound && defaultRequestValue > maxValue { + if defaultRequestQuantityFound && maxQuantityFound && defaultRequestQuantity.Cmp(maxQuantity) > 0 { maxQuantity := limit.Max[api.ResourceName(k)] defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)] - allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestValue, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) + allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than max value %s", defaultRequestQuantity.String(), maxQuantity.String()))) } - if defaultRequestValueFound && defaultValueFound && defaultRequestValue > defaultValue { + if defaultRequestQuantityFound && defaultQuantityFound && defaultRequestQuantity.Cmp(defaultQuantity) > 0 { defaultQuantity := limit.Default[api.ResourceName(k)] defaultRequestQuantity := limit.DefaultRequest[api.ResourceName(k)] - allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestValue, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) + allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].defaultRequest[%s]", i, k), defaultRequestQuantity, fmt.Sprintf("default request value %s is greater than default limit value %s", defaultRequestQuantity.String(), defaultQuantity.String()))) } - if defaultValueFound && minValueFound && minValue > defaultValue { + if defaultQuantityFound && minQuantityFound && minQuantity.Cmp(defaultQuantity) > 0 { minQuantity := limit.Min[api.ResourceName(k)] defaultQuantity := limit.Default[api.ResourceName(k)] - allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), minValue, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) + allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), minQuantity, fmt.Sprintf("min value %s is greater than default value %s", minQuantity.String(), defaultQuantity.String()))) } - if defaultValueFound && maxValueFound && defaultValue > maxValue { + if defaultQuantityFound && maxQuantityFound && defaultQuantity.Cmp(maxQuantity) > 0 { maxQuantity := limit.Max[api.ResourceName(k)] defaultQuantity := limit.Default[api.ResourceName(k)] - allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), maxValue, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) + allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("spec.limits[%d].default[%s]", i, k), maxQuantity, fmt.Sprintf("default value %s is greater than max value %s", defaultQuantity.String(), maxQuantity.String()))) } } } @@ -1888,7 +1885,7 @@ func ValidateThirdPartyResource(obj *api.ThirdPartyResource) errs.ValidationErro if len(obj.Name) == 0 { allErrs = append(allErrs, errs.NewFieldInvalid("name", obj.Name, "name must be non-empty")) } - versions := util.StringSet{} + versions := sets.String{} for ix := range obj.Versions { version := &obj.Versions[ix] if len(version.Name) == 0 { diff --git a/pkg/api/validation/validation_test.go b/pkg/api/validation/validation_test.go index 3f71b703950..8e320114e23 100644 --- a/pkg/api/validation/validation_test.go +++ b/pkg/api/validation/validation_test.go @@ -30,6 +30,7 @@ import ( utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/fielderrors" errors "k8s.io/kubernetes/pkg/util/fielderrors" + "k8s.io/kubernetes/pkg/util/sets" ) func expectPrefix(t *testing.T, prefix string, errs fielderrors.ValidationErrorList) { @@ -769,7 +770,7 @@ func TestValidateEnv(t *testing.T) { } func TestValidateVolumeMounts(t *testing.T) { - volumes := util.NewStringSet("abc", "123", "abc-123") + volumes := sets.NewString("abc", "123", "abc-123") successCase := []api.VolumeMount{ {Name: "abc", MountPath: "/foo"}, @@ -896,7 +897,7 @@ func getResourceLimits(cpu, memory string) api.ResourceList { } func TestValidateContainers(t *testing.T) { - volumes := util.StringSet{} + volumes := sets.String{} capabilities.SetForTests(capabilities.Capabilities{ AllowPrivileged: true, }) @@ -2886,138 +2887,58 @@ func TestValidateResourceNames(t *testing.T) { } } +func getResourceList(cpu, memory string) api.ResourceList { + res := api.ResourceList{} + if cpu != "" { + res[api.ResourceCPU] = resource.MustParse(cpu) + } + if memory != "" { + res[api.ResourceMemory] = resource.MustParse(memory) + } + return res +} + func TestValidateLimitRange(t *testing.T) { - spec := api.LimitRangeSpec{ - Limits: []api.LimitRangeItem{ - { - Type: api.LimitTypePod, - Max: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100"), - api.ResourceMemory: resource.MustParse("10000"), - }, - Min: api.ResourceList{ - api.ResourceCPU: resource.MustParse("5"), - api.ResourceMemory: resource.MustParse("100"), - }, - Default: api.ResourceList{ - api.ResourceCPU: resource.MustParse("50"), - api.ResourceMemory: resource.MustParse("500"), - }, - DefaultRequest: api.ResourceList{ - api.ResourceCPU: resource.MustParse("10"), - api.ResourceMemory: resource.MustParse("200"), - }, - MaxLimitRequestRatio: api.ResourceList{ - api.ResourceCPU: resource.MustParse("20"), - }, - }, - }, - } - - invalidSpecDuplicateType := api.LimitRangeSpec{ - Limits: []api.LimitRangeItem{ - { - Type: api.LimitTypePod, - Max: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100"), - api.ResourceMemory: resource.MustParse("10000"), - }, - Min: api.ResourceList{ - api.ResourceCPU: resource.MustParse("0"), - api.ResourceMemory: resource.MustParse("100"), - }, - }, - { - Type: api.LimitTypePod, - Min: api.ResourceList{ - api.ResourceCPU: resource.MustParse("0"), - api.ResourceMemory: resource.MustParse("100"), - }, - }, - }, - } - - invalidSpecRangeMaxLessThanMin := api.LimitRangeSpec{ - Limits: []api.LimitRangeItem{ - { - Type: api.LimitTypePod, - Max: api.ResourceList{ - api.ResourceCPU: resource.MustParse("10"), - }, - Min: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1000"), - }, - }, - }, - } - - invalidSpecRangeDefaultOutsideRange := api.LimitRangeSpec{ - Limits: []api.LimitRangeItem{ - { - Type: api.LimitTypePod, - Max: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1000"), - }, - Min: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100"), - }, - Default: api.ResourceList{ - api.ResourceCPU: resource.MustParse("2000"), - }, - }, - }, - } - - invalidSpecRangeDefaultRequestOutsideRange := api.LimitRangeSpec{ - Limits: []api.LimitRangeItem{ - { - Type: api.LimitTypePod, - Max: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1000"), - }, - Min: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100"), - }, - DefaultRequest: api.ResourceList{ - api.ResourceCPU: resource.MustParse("2000"), - }, - }, - }, - } - - invalidSpecRangeRequestMoreThanDefaultRange := api.LimitRangeSpec{ - Limits: []api.LimitRangeItem{ - { - Type: api.LimitTypePod, - Max: api.ResourceList{ - api.ResourceCPU: resource.MustParse("1000"), - }, - Min: api.ResourceList{ - api.ResourceCPU: resource.MustParse("100"), - }, - Default: api.ResourceList{ - api.ResourceCPU: resource.MustParse("500"), - }, - DefaultRequest: api.ResourceList{ - api.ResourceCPU: resource.MustParse("800"), - }, - }, - }, - } - - successCases := []api.LimitRange{ + successCases := []struct { + name string + spec api.LimitRangeSpec + }{ { - ObjectMeta: api.ObjectMeta{ - Name: "abc", - Namespace: "foo", + name: "all-fields-valid", + spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000Mi"), + Min: getResourceList("5m", "100Mi"), + Default: getResourceList("50m", "500Mi"), + DefaultRequest: getResourceList("10m", "200Mi"), + MaxLimitRequestRatio: getResourceList("10", ""), + }, + }, + }, + }, + { + name: "all-fields-valid-big-numbers", + spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000T"), + Min: getResourceList("5m", "100Mi"), + Default: getResourceList("50m", "500Mi"), + DefaultRequest: getResourceList("10m", "200Mi"), + MaxLimitRequestRatio: getResourceList("10", ""), + }, + }, }, - Spec: spec, }, } for _, successCase := range successCases { - if errs := ValidateLimitRange(&successCase); len(errs) != 0 { - t.Errorf("expected success: %v", errs) + limitRange := &api.LimitRange{ObjectMeta: api.ObjectMeta{Name: successCase.name, Namespace: "foo"}, Spec: successCase.spec} + if errs := ValidateLimitRange(limitRange); len(errs) != 0 { + t.Errorf("Case %v, unexpected error: %v", successCase.name, errs) } } @@ -3025,43 +2946,92 @@ func TestValidateLimitRange(t *testing.T) { R api.LimitRange D string }{ - "zero-length Name": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "", Namespace: "foo"}, Spec: spec}, + "zero-length-name": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "", Namespace: "foo"}, Spec: api.LimitRangeSpec{}}, "name or generateName is required", }, "zero-length-namespace": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: ""}, Spec: spec}, + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: ""}, Spec: api.LimitRangeSpec{}}, "", }, - "invalid Name": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "^Invalid", Namespace: "foo"}, Spec: spec}, + "invalid-name": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "^Invalid", Namespace: "foo"}, Spec: api.LimitRangeSpec{}}, DNSSubdomainErrorMsg, }, - "invalid Namespace": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "^Invalid"}, Spec: spec}, + "invalid-namespace": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "^Invalid"}, Spec: api.LimitRangeSpec{}}, DNS1123LabelErrorMsg, }, - "duplicate limit type": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidSpecDuplicateType}, + "duplicate-limit-type": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("100m", "10000m"), + Min: getResourceList("0m", "100m"), + }, + { + Type: api.LimitTypePod, + Min: getResourceList("0m", "100m"), + }, + }, + }}, "", }, - "min value 1k is greater than max value 10": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidSpecRangeMaxLessThanMin}, - "min value 1k is greater than max value 10", + "min value 100m is greater than max value 10m": { + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("10m", ""), + Min: getResourceList("100m", ""), + }, + }, + }}, + "min value 100m is greater than max value 10m", }, "invalid spec default outside range": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidSpecRangeDefaultOutsideRange}, - "default value 2k is greater than max value 1k", + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("1", ""), + Min: getResourceList("100m", ""), + Default: getResourceList("2000m", ""), + }, + }, + }}, + "default value 2 is greater than max value 1", }, "invalid spec defaultrequest outside range": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidSpecRangeDefaultRequestOutsideRange}, - "default request value 2k is greater than max value 1k", + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypePod, + Max: getResourceList("1", ""), + Min: getResourceList("100m", ""), + DefaultRequest: getResourceList("2000m", ""), + }, + }, + }}, + "default request value 2 is greater than max value 1", }, "invalid spec defaultrequest more than default": { - api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: invalidSpecRangeRequestMoreThanDefaultRange}, - "default request value 800 is greater than default limit value 500", + api.LimitRange{ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: "foo"}, Spec: api.LimitRangeSpec{ + Limits: []api.LimitRangeItem{ + { + Type: api.LimitTypeContainer, + Max: getResourceList("2", ""), + Min: getResourceList("100m", ""), + Default: getResourceList("500m", ""), + DefaultRequest: getResourceList("800m", ""), + }, + }, + }}, + "default request value 800m is greater than default limit value 500m", }, } + for k, v := range errorCases { errs := ValidateLimitRange(&v.R) if len(errs) == 0 { @@ -3074,6 +3044,7 @@ func TestValidateLimitRange(t *testing.T) { } } } + } func TestValidateResourceQuota(t *testing.T) { diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 2f2f11d6921..a15e626d09f 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -42,6 +42,7 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/flushwriter" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/version" "github.com/emicklei/go-restful" @@ -115,7 +116,7 @@ const ( // It is expected that the provided path root prefix will serve all operations. Root MUST NOT end // in a slash. A restful WebService is created for the group and version. func (g *APIGroupVersion) InstallREST(container *restful.Container) error { - info := &APIRequestInfoResolver{util.NewStringSet(strings.TrimPrefix(g.Root, "/")), g.Mapper} + info := &APIRequestInfoResolver{sets.NewString(strings.TrimPrefix(g.Root, "/")), g.Mapper} prefix := path.Join(g.Root, g.Version) installer := &APIInstaller{ @@ -345,8 +346,7 @@ func parseTimeout(str string) time.Duration { } glog.Errorf("Failed to parse %q: %v", str, err) } - // TODO: change back to 30s once #5180 is fixed - return 2 * time.Minute + return 30 * time.Second } func readBody(req *http.Request) ([]byte, error) { diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 80b9dc218e1..85c51a1508a 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -2145,10 +2145,10 @@ func TestUpdateChecksDecode(t *testing.T) { } func TestParseTimeout(t *testing.T) { - if d := parseTimeout(""); d != 2*time.Minute { + if d := parseTimeout(""); d != 30*time.Second { t.Errorf("blank timeout produces %v", d) } - if d := parseTimeout("not a timeout"); d != 2*time.Minute { + if d := parseTimeout("not a timeout"); d != 30*time.Second { t.Errorf("bad timeout produces %v", d) } if d := parseTimeout("10s"); d != 10*time.Second { diff --git a/pkg/apiserver/handlers.go b/pkg/apiserver/handlers.go index 706e42854f4..6dfe6cb8afc 100644 --- a/pkg/apiserver/handlers.go +++ b/pkg/apiserver/handlers.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/auth/authorizer" "k8s.io/kubernetes/pkg/httplog" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // specialVerbs contains just strings which are used in REST paths for special actions that don't fall under the normal @@ -351,7 +352,7 @@ type requestAttributeGetter struct { // NewAttributeGetter returns an object which implements the RequestAttributeGetter interface. func NewRequestAttributeGetter(requestContextMapper api.RequestContextMapper, restMapper meta.RESTMapper, apiRoots ...string) RequestAttributeGetter { - return &requestAttributeGetter{requestContextMapper, &APIRequestInfoResolver{util.NewStringSet(apiRoots...), restMapper}} + return &requestAttributeGetter{requestContextMapper, &APIRequestInfoResolver{sets.NewString(apiRoots...), restMapper}} } func (r *requestAttributeGetter) GetAttribs(req *http.Request) authorizer.Attributes { @@ -417,7 +418,7 @@ type APIRequestInfo struct { } type APIRequestInfoResolver struct { - APIPrefixes util.StringSet + APIPrefixes sets.String RestMapper meta.RESTMapper } diff --git a/pkg/apiserver/handlers_test.go b/pkg/apiserver/handlers_test.go index b9d948d73ba..c296a0cdb65 100644 --- a/pkg/apiserver/handlers_test.go +++ b/pkg/apiserver/handlers_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/testapi" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type fakeRL bool @@ -246,7 +246,7 @@ func TestGetAPIRequestInfo(t *testing.T) { {"PUT", "/namespaces/other/finalize", "update", "", "other", "finalize", "", "", "", []string{"finalize"}}, } - apiRequestInfoResolver := &APIRequestInfoResolver{util.NewStringSet("api"), latest.RESTMapper} + apiRequestInfoResolver := &APIRequestInfoResolver{sets.NewString("api"), latest.RESTMapper} for _, successCase := range successCases { req, _ := http.NewRequest(successCase.method, successCase.url, nil) diff --git a/pkg/client/unversioned/cache/delta_fifo.go b/pkg/client/unversioned/cache/delta_fifo.go index 4b432208ba4..808a854556e 100644 --- a/pkg/client/unversioned/cache/delta_fifo.go +++ b/pkg/client/unversioned/cache/delta_fifo.go @@ -21,7 +21,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "github.com/golang/glog" ) @@ -319,7 +319,7 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error { return nil } - keySet := make(util.StringSet, len(list)) + keySet := make(sets.String, len(list)) for _, item := range list { key, err := f.KeyOf(item) if err != nil { diff --git a/pkg/client/unversioned/cache/expiration_cache_fakes.go b/pkg/client/unversioned/cache/expiration_cache_fakes.go index 5fc380abc42..2e9a25d121a 100644 --- a/pkg/client/unversioned/cache/expiration_cache_fakes.go +++ b/pkg/client/unversioned/cache/expiration_cache_fakes.go @@ -18,6 +18,7 @@ package cache import ( "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type fakeThreadSafeMap struct { @@ -32,7 +33,7 @@ func (c *fakeThreadSafeMap) Delete(key string) { } type FakeExpirationPolicy struct { - NeverExpire util.StringSet + NeverExpire sets.String RetrieveKeyFunc KeyFunc } diff --git a/pkg/client/unversioned/cache/expiration_cache_test.go b/pkg/client/unversioned/cache/expiration_cache_test.go index 4ecccc54c2b..375ffcceaa3 100644 --- a/pkg/client/unversioned/cache/expiration_cache_test.go +++ b/pkg/client/unversioned/cache/expiration_cache_test.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func TestTTLExpirationBasic(t *testing.T) { @@ -30,7 +31,7 @@ func TestTTLExpirationBasic(t *testing.T) { ttlStore := NewFakeExpirationStore( testStoreKeyFunc, deleteChan, &FakeExpirationPolicy{ - NeverExpire: util.NewStringSet(), + NeverExpire: sets.NewString(), RetrieveKeyFunc: func(obj interface{}) (string, error) { return obj.(*timestampedEntry).obj.(testStoreObject).id, nil }, @@ -66,14 +67,14 @@ func TestTTLList(t *testing.T) { {id: "foo1", val: "bar1"}, {id: "foo2", val: "bar2"}, } - expireKeys := util.NewStringSet(testObjs[0].id, testObjs[2].id) + expireKeys := sets.NewString(testObjs[0].id, testObjs[2].id) deleteChan := make(chan string) defer close(deleteChan) ttlStore := NewFakeExpirationStore( testStoreKeyFunc, deleteChan, &FakeExpirationPolicy{ - NeverExpire: util.NewStringSet(testObjs[1].id), + NeverExpire: sets.NewString(testObjs[1].id), RetrieveKeyFunc: func(obj interface{}) (string, error) { return obj.(*timestampedEntry).obj.(testStoreObject).id, nil }, diff --git a/pkg/client/unversioned/cache/index.go b/pkg/client/unversioned/cache/index.go index 6e189c2bf60..0730ca459e4 100644 --- a/pkg/client/unversioned/cache/index.go +++ b/pkg/client/unversioned/cache/index.go @@ -20,7 +20,7 @@ import ( "fmt" "k8s.io/kubernetes/pkg/api/meta" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // Indexer is a storage interface that lets you list objects using multiple indexing functions @@ -63,7 +63,7 @@ func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { } // Index maps the indexed value to a set of keys in the store that match on that value -type Index map[string]util.StringSet +type Index map[string]sets.String // Indexers maps a name to a IndexFunc type Indexers map[string]IndexFunc diff --git a/pkg/client/unversioned/cache/listers.go b/pkg/client/unversioned/cache/listers.go index 0f92daf59c0..cba3e5b3003 100644 --- a/pkg/client/unversioned/cache/listers.go +++ b/pkg/client/unversioned/cache/listers.go @@ -221,59 +221,59 @@ func (s *StoreToReplicationControllerLister) GetPodControllers(pod *api.Pod) (co controllers = append(controllers, rc) } if len(controllers) == 0 { - err = fmt.Errorf("Could not find controllers for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + err = fmt.Errorf("Could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return } -// StoreToDaemonLister gives a store List and Exists methods. The store must contain only Daemons. -type StoreToDaemonLister struct { +// StoreToDaemonSetLister gives a store List and Exists methods. The store must contain only DaemonSets. +type StoreToDaemonSetLister struct { Store } -// Exists checks if the given dc exists in the store. -func (s *StoreToDaemonLister) Exists(daemon *expapi.Daemon) (bool, error) { - _, exists, err := s.Store.Get(daemon) +// Exists checks if the given daemon set exists in the store. +func (s *StoreToDaemonSetLister) Exists(ds *expapi.DaemonSet) (bool, error) { + _, exists, err := s.Store.Get(ds) if err != nil { return false, err } return exists, nil } -// StoreToDaemonLister lists all daemons in the store. +// List lists all daemon sets in the store. // TODO: converge on the interface in pkg/client -func (s *StoreToDaemonLister) List() (daemons []expapi.Daemon, err error) { +func (s *StoreToDaemonSetLister) List() (dss []expapi.DaemonSet, err error) { for _, c := range s.Store.List() { - daemons = append(daemons, *(c.(*expapi.Daemon))) + dss = append(dss, *(c.(*expapi.DaemonSet))) } - return daemons, nil + return dss, nil } -// GetPodDaemons returns a list of daemons managing a pod. Returns an error iff no matching daemons are found. -func (s *StoreToDaemonLister) GetPodDaemons(pod *api.Pod) (daemons []expapi.Daemon, err error) { +// GetPodDaemonSets returns a list of daemon sets managing a pod. Returns an error iff no matching daemon sets are found. +func (s *StoreToDaemonSetLister) GetPodDaemonSets(pod *api.Pod) (daemonSets []expapi.DaemonSet, err error) { var selector labels.Selector - var daemonController expapi.Daemon + var daemonSet expapi.DaemonSet if len(pod.Labels) == 0 { - err = fmt.Errorf("No daemons found for pod %v because it has no labels", pod.Name) + err = fmt.Errorf("No daemon sets found for pod %v because it has no labels", pod.Name) return } for _, m := range s.Store.List() { - daemonController = *m.(*expapi.Daemon) - if daemonController.Namespace != pod.Namespace { + daemonSet = *m.(*expapi.DaemonSet) + if daemonSet.Namespace != pod.Namespace { continue } - selector = labels.Set(daemonController.Spec.Selector).AsSelector() + selector = labels.Set(daemonSet.Spec.Selector).AsSelector() - // If a daemonController with a nil or empty selector creeps in, it should match nothing, not everything. + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { continue } - daemons = append(daemons, daemonController) + daemonSets = append(daemonSets, daemonSet) } - if len(daemons) == 0 { - err = fmt.Errorf("Could not find daemons for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + if len(daemonSets) == 0 { + err = fmt.Errorf("Could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) } return } diff --git a/pkg/client/unversioned/cache/listers_test.go b/pkg/client/unversioned/cache/listers_test.go index 112b10416d2..f9505d26136 100644 --- a/pkg/client/unversioned/cache/listers_test.go +++ b/pkg/client/unversioned/cache/listers_test.go @@ -22,12 +22,12 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func TestStoreToMinionLister(t *testing.T) { store := NewStore(MetaNamespaceKeyFunc) - ids := util.NewStringSet("foo", "bar", "baz") + ids := sets.NewString("foo", "bar", "baz") for id := range ids { store.Add(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}}) } @@ -52,7 +52,7 @@ func TestStoreToReplicationControllerLister(t *testing.T) { testCases := []struct { inRCs []*api.ReplicationController list func() ([]api.ReplicationController, error) - outRCNames util.StringSet + outRCNames sets.String expectErr bool }{ // Basic listing with all labels and no selectors @@ -63,7 +63,7 @@ func TestStoreToReplicationControllerLister(t *testing.T) { list: func() ([]api.ReplicationController, error) { return lister.List() }, - outRCNames: util.NewStringSet("basic"), + outRCNames: sets.NewString("basic"), }, // No pod labels { @@ -81,7 +81,7 @@ func TestStoreToReplicationControllerLister(t *testing.T) { } return lister.GetPodControllers(pod) }, - outRCNames: util.NewStringSet(), + outRCNames: sets.NewString(), expectErr: true, }, // No RC selectors @@ -101,7 +101,7 @@ func TestStoreToReplicationControllerLister(t *testing.T) { } return lister.GetPodControllers(pod) }, - outRCNames: util.NewStringSet(), + outRCNames: sets.NewString(), expectErr: true, }, // Matching labels to selectors and namespace @@ -130,7 +130,7 @@ func TestStoreToReplicationControllerLister(t *testing.T) { } return lister.GetPodControllers(pod) }, - outRCNames: util.NewStringSet("bar"), + outRCNames: sets.NewString("bar"), }, } for _, c := range testCases { @@ -156,64 +156,64 @@ func TestStoreToReplicationControllerLister(t *testing.T) { } } -func TestStoreToDaemonLister(t *testing.T) { +func TestStoreToDaemonSetLister(t *testing.T) { store := NewStore(MetaNamespaceKeyFunc) - lister := StoreToDaemonLister{store} + lister := StoreToDaemonSetLister{store} testCases := []struct { - inDCs []*expapi.Daemon - list func() ([]expapi.Daemon, error) - outDCNames util.StringSet - expectErr bool + inDSs []*expapi.DaemonSet + list func() ([]expapi.DaemonSet, error) + outDaemonSetNames sets.String + expectErr bool }{ // Basic listing { - inDCs: []*expapi.Daemon{ + inDSs: []*expapi.DaemonSet{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}, }, - list: func() ([]expapi.Daemon, error) { + list: func() ([]expapi.DaemonSet, error) { return lister.List() }, - outDCNames: util.NewStringSet("basic"), + outDaemonSetNames: sets.NewString("basic"), }, - // Listing multiple controllers + // Listing multiple daemon sets { - inDCs: []*expapi.Daemon{ + inDSs: []*expapi.DaemonSet{ {ObjectMeta: api.ObjectMeta{Name: "basic"}}, {ObjectMeta: api.ObjectMeta{Name: "complex"}}, {ObjectMeta: api.ObjectMeta{Name: "complex2"}}, }, - list: func() ([]expapi.Daemon, error) { + list: func() ([]expapi.DaemonSet, error) { return lister.List() }, - outDCNames: util.NewStringSet("basic", "complex", "complex2"), + outDaemonSetNames: sets.NewString("basic", "complex", "complex2"), }, // No pod labels { - inDCs: []*expapi.Daemon{ + inDSs: []*expapi.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: map[string]string{"foo": "baz"}, }, }, }, - list: func() ([]expapi.Daemon, error) { + list: func() ([]expapi.DaemonSet, error) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{Name: "pod1", Namespace: "ns"}, } - return lister.GetPodDaemons(pod) + return lister.GetPodDaemonSets(pod) }, - outDCNames: util.NewStringSet(), - expectErr: true, + outDaemonSetNames: sets.NewString(), + expectErr: true, }, - // No RC selectors + // No DS selectors { - inDCs: []*expapi.Daemon{ + inDSs: []*expapi.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "basic", Namespace: "ns"}, }, }, - list: func() ([]expapi.Daemon, error) { + list: func() ([]expapi.DaemonSet, error) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod1", @@ -221,28 +221,28 @@ func TestStoreToDaemonLister(t *testing.T) { Labels: map[string]string{"foo": "bar"}, }, } - return lister.GetPodDaemons(pod) + return lister.GetPodDaemonSets(pod) }, - outDCNames: util.NewStringSet(), - expectErr: true, + outDaemonSetNames: sets.NewString(), + expectErr: true, }, // Matching labels to selectors and namespace { - inDCs: []*expapi.Daemon{ + inDSs: []*expapi.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "foo"}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: map[string]string{"foo": "bar"}, }, }, { ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: map[string]string{"foo": "bar"}, }, }, }, - list: func() ([]expapi.Daemon, error) { + list: func() ([]expapi.DaemonSet, error) { pod := &api.Pod{ ObjectMeta: api.ObjectMeta{ Name: "pod1", @@ -250,17 +250,17 @@ func TestStoreToDaemonLister(t *testing.T) { Namespace: "ns", }, } - return lister.GetPodDaemons(pod) + return lister.GetPodDaemonSets(pod) }, - outDCNames: util.NewStringSet("bar"), + outDaemonSetNames: sets.NewString("bar"), }, } for _, c := range testCases { - for _, r := range c.inDCs { + for _, r := range c.inDSs { store.Add(r) } - gotControllers, err := c.list() + daemonSets, err := c.list() if err != nil && c.expectErr { continue } else if c.expectErr { @@ -268,12 +268,12 @@ func TestStoreToDaemonLister(t *testing.T) { } else if err != nil { t.Fatalf("Unexpected error %#v", err) } - gotNames := make([]string, len(gotControllers)) - for ix := range gotControllers { - gotNames[ix] = gotControllers[ix].Name + daemonSetNames := make([]string, len(daemonSets)) + for ix := range daemonSets { + daemonSetNames[ix] = daemonSets[ix].Name } - if !c.outDCNames.HasAll(gotNames...) || len(gotNames) != len(c.outDCNames) { - t.Errorf("Unexpected got controllers %+v expected %+v", gotNames, c.outDCNames) + if !c.outDaemonSetNames.HasAll(daemonSetNames...) || len(daemonSetNames) != len(c.outDaemonSetNames) { + t.Errorf("Unexpected got controllers %+v expected %+v", daemonSetNames, c.outDaemonSetNames) } } } diff --git a/pkg/client/unversioned/cache/store_test.go b/pkg/client/unversioned/cache/store_test.go index 2d3b153af7a..07275f493de 100644 --- a/pkg/client/unversioned/cache/store_test.go +++ b/pkg/client/unversioned/cache/store_test.go @@ -19,7 +19,7 @@ package cache import ( "testing" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // Test public interface @@ -54,7 +54,7 @@ func doTestStore(t *testing.T, store Store) { store.Add(mkObj("c", "d")) store.Add(mkObj("e", "e")) { - found := util.StringSet{} + found := sets.String{} for _, item := range store.List() { found.Insert(item.(testStoreObject).val) } @@ -73,7 +73,7 @@ func doTestStore(t *testing.T, store Store) { }, "0") { - found := util.StringSet{} + found := sets.String{} for _, item := range store.List() { found.Insert(item.(testStoreObject).val) } @@ -93,17 +93,17 @@ func doTestIndex(t *testing.T, indexer Indexer) { } // Test Index - expected := map[string]util.StringSet{} - expected["b"] = util.NewStringSet("a", "c") - expected["f"] = util.NewStringSet("e") - expected["h"] = util.NewStringSet("g") + expected := map[string]sets.String{} + expected["b"] = sets.NewString("a", "c") + expected["f"] = sets.NewString("e") + expected["h"] = sets.NewString("g") indexer.Add(mkObj("a", "b")) indexer.Add(mkObj("c", "b")) indexer.Add(mkObj("e", "f")) indexer.Add(mkObj("g", "h")) { for k, v := range expected { - found := util.StringSet{} + found := sets.String{} indexResults, err := indexer.Index("by_val", mkObj("", k)) if err != nil { t.Errorf("Unexpected error %v", err) diff --git a/pkg/client/unversioned/cache/thread_safe_store.go b/pkg/client/unversioned/cache/thread_safe_store.go index 20113937890..653b9f297b5 100644 --- a/pkg/client/unversioned/cache/thread_safe_store.go +++ b/pkg/client/unversioned/cache/thread_safe_store.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // ThreadSafeStore is an interface that allows concurrent access to a storage backend. @@ -142,7 +142,7 @@ func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, index := c.indices[indexName] // need to de-dupe the return list. Since multiple keys are allowed, this can happen. - returnKeySet := util.StringSet{} + returnKeySet := sets.String{} for _, indexKey := range indexKeys { set := index[indexKey] for _, key := range set.List() { @@ -208,7 +208,7 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke for _, indexValue := range indexValues { set := index[indexValue] if set == nil { - set = util.StringSet{} + set = sets.String{} index[indexValue] = set } set.Insert(key) diff --git a/pkg/client/unversioned/daemon.go b/pkg/client/unversioned/daemon.go deleted file mode 100644 index 3b99fcdd3b0..00000000000 --- a/pkg/client/unversioned/daemon.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package unversioned - -import ( - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/watch" -) - -// DaemonsNamespacer has methods to work with Daemon resources in a namespace -type DaemonsNamespacer interface { - Daemons(namespace string) DaemonInterface -} - -type DaemonInterface interface { - List(selector labels.Selector) (*expapi.DaemonList, error) - Get(name string) (*expapi.Daemon, error) - Create(ctrl *expapi.Daemon) (*expapi.Daemon, error) - Update(ctrl *expapi.Daemon) (*expapi.Daemon, error) - Delete(name string) error - Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) -} - -// daemons implements DaemonsNamespacer interface -type daemons struct { - r *ExperimentalClient - ns string -} - -func newDaemons(c *ExperimentalClient, namespace string) *daemons { - return &daemons{c, namespace} -} - -// Ensure statically that daemons implements DaemonInterface. -var _ DaemonInterface = &daemons{} - -func (c *daemons) List(selector labels.Selector) (result *expapi.DaemonList, err error) { - result = &expapi.DaemonList{} - err = c.r.Get().Namespace(c.ns).Resource("daemons").LabelsSelectorParam(selector).Do().Into(result) - return -} - -// Get returns information about a particular daemon. -func (c *daemons) Get(name string) (result *expapi.Daemon, err error) { - result = &expapi.Daemon{} - err = c.r.Get().Namespace(c.ns).Resource("daemons").Name(name).Do().Into(result) - return -} - -// Create creates a new daemon. -func (c *daemons) Create(daemon *expapi.Daemon) (result *expapi.Daemon, err error) { - result = &expapi.Daemon{} - err = c.r.Post().Namespace(c.ns).Resource("daemons").Body(daemon).Do().Into(result) - return -} - -// Update updates an existing daemon. -func (c *daemons) Update(daemon *expapi.Daemon) (result *expapi.Daemon, err error) { - result = &expapi.Daemon{} - err = c.r.Put().Namespace(c.ns).Resource("daemons").Name(daemon.Name).Body(daemon).Do().Into(result) - return -} - -// Delete deletes an existing daemon. -func (c *daemons) Delete(name string) error { - return c.r.Delete().Namespace(c.ns).Resource("daemons").Name(name).Do().Error() -} - -// Watch returns a watch.Interface that watches the requested daemons. -func (c *daemons) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - return c.r.Get(). - Prefix("watch"). - Namespace(c.ns). - Resource("daemons"). - Param("resourceVersion", resourceVersion). - LabelsSelectorParam(label). - FieldsSelectorParam(field). - Watch() -} diff --git a/pkg/client/unversioned/daemon_sets.go b/pkg/client/unversioned/daemon_sets.go new file mode 100644 index 00000000000..321b34cc465 --- /dev/null +++ b/pkg/client/unversioned/daemon_sets.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unversioned + +import ( + "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/watch" +) + +// DaemonsSetsNamespacer has methods to work with DaemonSet resources in a namespace +type DaemonSetsNamespacer interface { + DaemonSets(namespace string) DaemonSetInterface +} + +type DaemonSetInterface interface { + List(selector labels.Selector) (*expapi.DaemonSetList, error) + Get(name string) (*expapi.DaemonSet, error) + Create(ctrl *expapi.DaemonSet) (*expapi.DaemonSet, error) + Update(ctrl *expapi.DaemonSet) (*expapi.DaemonSet, error) + Delete(name string) error + Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) +} + +// daemonSets implements DaemonsSetsNamespacer interface +type daemonSets struct { + r *ExperimentalClient + ns string +} + +func newDaemonSets(c *ExperimentalClient, namespace string) *daemonSets { + return &daemonSets{c, namespace} +} + +// Ensure statically that daemonSets implements DaemonSetsInterface. +var _ DaemonSetInterface = &daemonSets{} + +func (c *daemonSets) List(selector labels.Selector) (result *expapi.DaemonSetList, err error) { + result = &expapi.DaemonSetList{} + err = c.r.Get().Namespace(c.ns).Resource("daemonsets").LabelsSelectorParam(selector).Do().Into(result) + return +} + +// Get returns information about a particular daemon set. +func (c *daemonSets) Get(name string) (result *expapi.DaemonSet, err error) { + result = &expapi.DaemonSet{} + err = c.r.Get().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Into(result) + return +} + +// Create creates a new daemon set. +func (c *daemonSets) Create(daemon *expapi.DaemonSet) (result *expapi.DaemonSet, err error) { + result = &expapi.DaemonSet{} + err = c.r.Post().Namespace(c.ns).Resource("daemonsets").Body(daemon).Do().Into(result) + return +} + +// Update updates an existing daemon set. +func (c *daemonSets) Update(daemon *expapi.DaemonSet) (result *expapi.DaemonSet, err error) { + result = &expapi.DaemonSet{} + err = c.r.Put().Namespace(c.ns).Resource("daemonsets").Name(daemon.Name).Body(daemon).Do().Into(result) + return +} + +// Delete deletes an existing daemon set. +func (c *daemonSets) Delete(name string) error { + return c.r.Delete().Namespace(c.ns).Resource("daemonsets").Name(name).Do().Error() +} + +// Watch returns a watch.Interface that watches the requested daemon sets. +func (c *daemonSets) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { + return c.r.Get(). + Prefix("watch"). + Namespace(c.ns). + Resource("daemonsets"). + Param("resourceVersion", resourceVersion). + LabelsSelectorParam(label). + FieldsSelectorParam(field). + Watch() +} diff --git a/pkg/client/unversioned/daemon_test.go b/pkg/client/unversioned/daemon_sets_test.go similarity index 63% rename from pkg/client/unversioned/daemon_test.go rename to pkg/client/unversioned/daemon_sets_test.go index 0f1f96b1cda..6df19940149 100644 --- a/pkg/client/unversioned/daemon_test.go +++ b/pkg/client/unversioned/daemon_sets_test.go @@ -25,20 +25,20 @@ import ( "k8s.io/kubernetes/pkg/labels" ) -func getDCResourceName() string { - return "daemons" +func getDSResourceName() string { + return "daemonsets" } -func TestListDaemons(t *testing.T) { +func TestListDaemonSets(t *testing.T) { ns := api.NamespaceAll c := &testClient{ Request: testRequest{ Method: "GET", - Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, ""), + Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, ""), }, Response: Response{StatusCode: 200, - Body: &expapi.DaemonList{ - Items: []expapi.Daemon{ + Body: &expapi.DaemonSetList{ + Items: []expapi.DaemonSet{ { ObjectMeta: api.ObjectMeta{ Name: "foo", @@ -47,7 +47,7 @@ func TestListDaemons(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, @@ -55,18 +55,18 @@ func TestListDaemons(t *testing.T) { }, }, } - receivedControllerList, err := c.Setup(t).Experimental().Daemons(ns).List(labels.Everything()) - c.Validate(t, receivedControllerList, err) + receivedDSs, err := c.Setup(t).Experimental().DaemonSets(ns).List(labels.Everything()) + c.Validate(t, receivedDSs, err) } -func TestGetDaemon(t *testing.T) { +func TestGetDaemonSet(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "GET", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "GET", Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, - Body: &expapi.Daemon{ + Body: &expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ @@ -74,20 +74,20 @@ func TestGetDaemon(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, }, } - receivedController, err := c.Setup(t).Experimental().Daemons(ns).Get("foo") - c.Validate(t, receivedController, err) + receivedDaemonSet, err := c.Setup(t).Experimental().DaemonSets(ns).Get("foo") + c.Validate(t, receivedDaemonSet, err) } -func TestGetDaemonWithNoName(t *testing.T) { +func TestGetDaemonSetWithNoName(t *testing.T) { ns := api.NamespaceDefault c := &testClient{Error: true} - receivedPod, err := c.Setup(t).Experimental().Daemons(ns).Get("") + receivedPod, err := c.Setup(t).Experimental().DaemonSets(ns).Get("") if (err != nil) && (err.Error() != nameRequiredError) { t.Errorf("Expected error: %v, but got %v", nameRequiredError, err) } @@ -95,16 +95,16 @@ func TestGetDaemonWithNoName(t *testing.T) { c.Validate(t, receivedPod, err) } -func TestUpdateDaemon(t *testing.T) { +func TestUpdateDaemonSet(t *testing.T) { ns := api.NamespaceDefault - requestController := &expapi.Daemon{ + requestDaemonSet := &expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "foo", ResourceVersion: "1"}, } c := &testClient{ - Request: testRequest{Method: "PUT", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "PUT", Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, - Body: &expapi.Daemon{ + Body: &expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ @@ -112,36 +112,36 @@ func TestUpdateDaemon(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, }, } - receivedController, err := c.Setup(t).Experimental().Daemons(ns).Update(requestController) - c.Validate(t, receivedController, err) + receivedDaemonSet, err := c.Setup(t).Experimental().DaemonSets(ns).Update(requestDaemonSet) + c.Validate(t, receivedDaemonSet, err) } func TestDeleteDaemon(t *testing.T) { ns := api.NamespaceDefault c := &testClient{ - Request: testRequest{Method: "DELETE", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, + Request: testRequest{Method: "DELETE", Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, "foo"), Query: buildQueryValues(nil)}, Response: Response{StatusCode: 200}, } - err := c.Setup(t).Experimental().Daemons(ns).Delete("foo") + err := c.Setup(t).Experimental().DaemonSets(ns).Delete("foo") c.Validate(t, nil, err) } -func TestCreateDaemon(t *testing.T) { +func TestCreateDaemonSet(t *testing.T) { ns := api.NamespaceDefault - requestController := &expapi.Daemon{ + requestDaemonSet := &expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "foo"}, } c := &testClient{ - Request: testRequest{Method: "POST", Path: testapi.Experimental.ResourcePath(getDCResourceName(), ns, ""), Body: requestController, Query: buildQueryValues(nil)}, + Request: testRequest{Method: "POST", Path: testapi.Experimental.ResourcePath(getDSResourceName(), ns, ""), Body: requestDaemonSet, Query: buildQueryValues(nil)}, Response: Response{ StatusCode: 200, - Body: &expapi.Daemon{ + Body: &expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Labels: map[string]string{ @@ -149,12 +149,12 @@ func TestCreateDaemon(t *testing.T) { "name": "baz", }, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Template: &api.PodTemplateSpec{}, }, }, }, } - receivedController, err := c.Setup(t).Experimental().Daemons(ns).Create(requestController) - c.Validate(t, receivedController, err) + receivedDaemonSet, err := c.Setup(t).Experimental().DaemonSets(ns).Create(requestDaemonSet) + c.Validate(t, receivedDaemonSet, err) } diff --git a/pkg/client/unversioned/debugging.go b/pkg/client/unversioned/debugging.go index ae68ed43bed..df43e8984d0 100644 --- a/pkg/client/unversioned/debugging.go +++ b/pkg/client/unversioned/debugging.go @@ -23,7 +23,7 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // RequestInfo keeps track of information about a request/response combination @@ -75,7 +75,7 @@ func (r RequestInfo) ToCurl() string { type DebuggingRoundTripper struct { delegatedRoundTripper http.RoundTripper - Levels util.StringSet + Levels sets.String } const ( @@ -88,7 +88,7 @@ const ( ) func NewDebuggingRoundTripper(rt http.RoundTripper, levels ...string) *DebuggingRoundTripper { - return &DebuggingRoundTripper{rt, util.NewStringSet(levels...)} + return &DebuggingRoundTripper{rt, sets.NewString(levels...)} } func (rt *DebuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { diff --git a/pkg/client/unversioned/experimental.go b/pkg/client/unversioned/experimental.go index bd30ace7d8b..f23083bd571 100644 --- a/pkg/client/unversioned/experimental.go +++ b/pkg/client/unversioned/experimental.go @@ -34,7 +34,7 @@ type ExperimentalInterface interface { VersionInterface HorizontalPodAutoscalersNamespacer ScaleNamespacer - DaemonsNamespacer + DaemonSetsNamespacer DeploymentsNamespacer } @@ -82,8 +82,8 @@ func (c *ExperimentalClient) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } -func (c *ExperimentalClient) Daemons(namespace string) DaemonInterface { - return newDaemons(c, namespace) +func (c *ExperimentalClient) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) } func (c *ExperimentalClient) Deployments(namespace string) DeploymentInterface { diff --git a/pkg/client/unversioned/flags_test.go b/pkg/client/unversioned/flags_test.go index ea8da3546db..ab0f94d0412 100644 --- a/pkg/client/unversioned/flags_test.go +++ b/pkg/client/unversioned/flags_test.go @@ -20,12 +20,12 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type fakeFlagSet struct { t *testing.T - set util.StringSet + set sets.String } func (f *fakeFlagSet) StringVar(p *string, name, value, usage string) { diff --git a/pkg/client/unversioned/helper.go b/pkg/client/unversioned/helper.go index cae4fafc38e..081e5f9214a 100644 --- a/pkg/client/unversioned/helper.go +++ b/pkg/client/unversioned/helper.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/version" ) @@ -187,7 +188,7 @@ func NegotiateVersion(client *Client, c *Config, version string, clientRegistere return "", err } } - clientVersions := util.StringSet{} + clientVersions := sets.String{} for _, v := range clientRegisteredVersions { clientVersions.Insert(v) } @@ -195,7 +196,7 @@ func NegotiateVersion(client *Client, c *Config, version string, clientRegistere if err != nil { return "", fmt.Errorf("couldn't read version from server: %v", err) } - serverVersions := util.StringSet{} + serverVersions := sets.String{} for _, v := range apiVersions.Versions { serverVersions.Insert(v) } diff --git a/pkg/client/unversioned/request.go b/pkg/client/unversioned/request.go index b48448bc1a0..00c3afdfe6f 100644 --- a/pkg/client/unversioned/request.go +++ b/pkg/client/unversioned/request.go @@ -39,13 +39,14 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/httpstream" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" watchjson "k8s.io/kubernetes/pkg/watch/json" ) // specialParams lists parameters that are handled specially and which users of Request // are therefore not allowed to set manually. -var specialParams = util.NewStringSet("timeout") +var specialParams = sets.NewString("timeout") // HTTPClient is an interface for testing a request object. type HTTPClient interface { diff --git a/pkg/client/unversioned/testclient/fake_daemon_sets.go b/pkg/client/unversioned/testclient/fake_daemon_sets.go new file mode 100644 index 00000000000..f50ec708488 --- /dev/null +++ b/pkg/client/unversioned/testclient/fake_daemon_sets.go @@ -0,0 +1,76 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + kClientLib "k8s.io/kubernetes/pkg/client/unversioned" + "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeDaemonSet implements DaemonInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the method you want to test easier. +type FakeDaemonSets struct { + Fake *FakeExperimental + Namespace string +} + +// Ensure statically that FakeDaemonSets implements DaemonInterface. +var _ kClientLib.DaemonSetInterface = &FakeDaemonSets{} + +func (c *FakeDaemonSets) Get(name string) (*expapi.DaemonSet, error) { + obj, err := c.Fake.Invokes(NewGetAction("daemonsets", c.Namespace, name), &expapi.DaemonSet{}) + if obj == nil { + return nil, err + } + return obj.(*expapi.DaemonSet), err +} + +func (c *FakeDaemonSets) List(label labels.Selector) (*expapi.DaemonSetList, error) { + obj, err := c.Fake.Invokes(NewListAction("daemonsets", c.Namespace, label, nil), &expapi.DaemonSetList{}) + if obj == nil { + return nil, err + } + return obj.(*expapi.DaemonSetList), err +} + +func (c *FakeDaemonSets) Create(daemon *expapi.DaemonSet) (*expapi.DaemonSet, error) { + obj, err := c.Fake.Invokes(NewCreateAction("daemonsets", c.Namespace, daemon), &expapi.DaemonSet{}) + if obj == nil { + return nil, err + } + return obj.(*expapi.DaemonSet), err +} + +func (c *FakeDaemonSets) Update(daemon *expapi.DaemonSet) (*expapi.DaemonSet, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("daemonsets", c.Namespace, daemon), &expapi.DaemonSet{}) + if obj == nil { + return nil, err + } + return obj.(*expapi.DaemonSet), err +} + +func (c *FakeDaemonSets) Delete(name string) error { + _, err := c.Fake.Invokes(NewDeleteAction("daemonsets", c.Namespace, name), &expapi.DaemonSet{}) + return err +} + +func (c *FakeDaemonSets) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("daemonsets", c.Namespace, label, field, resourceVersion)) +} diff --git a/pkg/client/unversioned/testclient/fake_daemons.go b/pkg/client/unversioned/testclient/fake_daemons.go deleted file mode 100644 index 8b0b3bc8014..00000000000 --- a/pkg/client/unversioned/testclient/fake_daemons.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package testclient - -import ( - kClientLib "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/expapi" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/watch" -) - -// FakeDaemons implements DaemonInterface. Meant to be embedded into a struct to get a default -// implementation. This makes faking out just the method you want to test easier. -type FakeDaemons struct { - Fake *FakeExperimental - Namespace string -} - -// Ensure statically that FakeDaemons implements DaemonInterface. -var _ kClientLib.DaemonInterface = &FakeDaemons{} - -func (c *FakeDaemons) Get(name string) (*expapi.Daemon, error) { - obj, err := c.Fake.Invokes(NewGetAction("daemons", c.Namespace, name), &expapi.Daemon{}) - if obj == nil { - return nil, err - } - return obj.(*expapi.Daemon), err -} - -func (c *FakeDaemons) List(label labels.Selector) (*expapi.DaemonList, error) { - obj, err := c.Fake.Invokes(NewListAction("daemons", c.Namespace, label, nil), &expapi.DaemonList{}) - if obj == nil { - return nil, err - } - return obj.(*expapi.DaemonList), err -} - -func (c *FakeDaemons) Create(daemon *expapi.Daemon) (*expapi.Daemon, error) { - obj, err := c.Fake.Invokes(NewCreateAction("daemons", c.Namespace, daemon), &expapi.Daemon{}) - if obj == nil { - return nil, err - } - return obj.(*expapi.Daemon), err -} - -func (c *FakeDaemons) Update(daemon *expapi.Daemon) (*expapi.Daemon, error) { - obj, err := c.Fake.Invokes(NewUpdateAction("daemons", c.Namespace, daemon), &expapi.Daemon{}) - if obj == nil { - return nil, err - } - return obj.(*expapi.Daemon), err -} - -func (c *FakeDaemons) Delete(name string) error { - _, err := c.Fake.Invokes(NewDeleteAction("daemons", c.Namespace, name), &expapi.Daemon{}) - return err -} - -func (c *FakeDaemons) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - return c.Fake.InvokesWatch(NewWatchAction("daemons", c.Namespace, label, field, resourceVersion)) -} diff --git a/pkg/client/unversioned/testclient/fake_deployments.go b/pkg/client/unversioned/testclient/fake_deployments.go new file mode 100644 index 00000000000..38942476ffd --- /dev/null +++ b/pkg/client/unversioned/testclient/fake_deployments.go @@ -0,0 +1,82 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeDeployments implements DeploymentsInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the methods you want to test easier. +type FakeDeployments struct { + Fake *FakeExperimental + Namespace string +} + +func (c *FakeDeployments) Get(name string) (*expapi.Deployment, error) { + obj, err := c.Fake.Invokes(NewGetAction("deployments", c.Namespace, name), &expapi.Deployment{}) + if obj == nil { + return nil, err + } + + return obj.(*expapi.Deployment), err +} + +func (c *FakeDeployments) List(label labels.Selector, field fields.Selector) (*expapi.DeploymentList, error) { + obj, err := c.Fake.Invokes(NewListAction("deployments", c.Namespace, label, field), &expapi.DeploymentList{}) + if obj == nil { + return nil, err + } + list := &expapi.DeploymentList{} + for _, deployment := range obj.(*expapi.DeploymentList).Items { + if label.Matches(labels.Set(deployment.Labels)) { + list.Items = append(list.Items, deployment) + } + } + return list, err +} + +func (c *FakeDeployments) Create(deployment *expapi.Deployment) (*expapi.Deployment, error) { + obj, err := c.Fake.Invokes(NewCreateAction("deployments", c.Namespace, deployment), deployment) + if obj == nil { + return nil, err + } + + return obj.(*expapi.Deployment), err +} + +func (c *FakeDeployments) Update(deployment *expapi.Deployment) (*expapi.Deployment, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("deployments", c.Namespace, deployment), deployment) + if obj == nil { + return nil, err + } + + return obj.(*expapi.Deployment), err +} + +func (c *FakeDeployments) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("deployments", c.Namespace, name), &expapi.Deployment{}) + return err +} + +func (c *FakeDeployments) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("deployments", c.Namespace, label, field, resourceVersion)) +} diff --git a/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go b/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go new file mode 100644 index 00000000000..4c3b7369d9d --- /dev/null +++ b/pkg/client/unversioned/testclient/fake_horizontal_pod_autoscalers.go @@ -0,0 +1,82 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/expapi" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/watch" +) + +// FakeHorizontalPodAutoscalers implements HorizontalPodAutoscalerInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the methods you want to test easier. +type FakeHorizontalPodAutoscalers struct { + Fake *FakeExperimental + Namespace string +} + +func (c *FakeHorizontalPodAutoscalers) Get(name string) (*expapi.HorizontalPodAutoscaler, error) { + obj, err := c.Fake.Invokes(NewGetAction("horizontalpodautoscalers", c.Namespace, name), &expapi.HorizontalPodAutoscaler{}) + if obj == nil { + return nil, err + } + + return obj.(*expapi.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) List(label labels.Selector, field fields.Selector) (*expapi.HorizontalPodAutoscalerList, error) { + obj, err := c.Fake.Invokes(NewListAction("horizontalpodautoscalers", c.Namespace, label, field), &expapi.HorizontalPodAutoscalerList{}) + if obj == nil { + return nil, err + } + list := &expapi.HorizontalPodAutoscalerList{} + for _, a := range obj.(*expapi.HorizontalPodAutoscalerList).Items { + if label.Matches(labels.Set(a.Labels)) { + list.Items = append(list.Items, a) + } + } + return list, err +} + +func (c *FakeHorizontalPodAutoscalers) Create(a *expapi.HorizontalPodAutoscaler) (*expapi.HorizontalPodAutoscaler, error) { + obj, err := c.Fake.Invokes(NewCreateAction("horizontalpodautoscalers", c.Namespace, a), a) + if obj == nil { + return nil, err + } + + return obj.(*expapi.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Update(a *expapi.HorizontalPodAutoscaler) (*expapi.HorizontalPodAutoscaler, error) { + obj, err := c.Fake.Invokes(NewUpdateAction("horizontalpodautoscalers", c.Namespace, a), a) + if obj == nil { + return nil, err + } + + return obj.(*expapi.HorizontalPodAutoscaler), err +} + +func (c *FakeHorizontalPodAutoscalers) Delete(name string, options *api.DeleteOptions) error { + _, err := c.Fake.Invokes(NewDeleteAction("horizontalpodautoscalers", c.Namespace, name), &expapi.HorizontalPodAutoscaler{}) + return err +} + +func (c *FakeHorizontalPodAutoscalers) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { + return c.Fake.InvokesWatch(NewWatchAction("horizontalpodautoscalers", c.Namespace, label, field, resourceVersion)) +} diff --git a/pkg/client/unversioned/testclient/fake_scales.go b/pkg/client/unversioned/testclient/fake_scales.go new file mode 100644 index 00000000000..95d7220791f --- /dev/null +++ b/pkg/client/unversioned/testclient/fake_scales.go @@ -0,0 +1,52 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testclient + +import ( + "k8s.io/kubernetes/pkg/expapi" +) + +// FakeScales implements ScaleInterface. Meant to be embedded into a struct to get a default +// implementation. This makes faking out just the methods you want to test easier. +type FakeScales struct { + Fake *FakeExperimental + Namespace string +} + +func (c *FakeScales) Get(kind string, name string) (result *expapi.Scale, err error) { + action := GetActionImpl{} + action.Verb = "get" + action.Namespace = c.Namespace + action.Resource = kind + action.Subresource = "scale" + action.Name = name + obj, err := c.Fake.Invokes(action, &expapi.Scale{}) + result = obj.(*expapi.Scale) + return +} + +func (c *FakeScales) Update(kind string, scale *expapi.Scale) (result *expapi.Scale, err error) { + action := UpdateActionImpl{} + action.Verb = "update" + action.Namespace = c.Namespace + action.Resource = kind + action.Subresource = "scale" + action.Object = scale + obj, err := c.Fake.Invokes(action, scale) + result = obj.(*expapi.Scale) + return +} diff --git a/pkg/client/unversioned/testclient/testclient.go b/pkg/client/unversioned/testclient/testclient.go index 2773890482b..6bb59bfc9e1 100644 --- a/pkg/client/unversioned/testclient/testclient.go +++ b/pkg/client/unversioned/testclient/testclient.go @@ -246,18 +246,18 @@ type FakeExperimental struct { *Fake } -func (c *FakeExperimental) Daemons(namespace string) client.DaemonInterface { - return &FakeDaemons{Fake: c, Namespace: namespace} +func (c *FakeExperimental) DaemonSets(namespace string) client.DaemonSetInterface { + return &FakeDaemonSets{Fake: c, Namespace: namespace} } func (c *FakeExperimental) HorizontalPodAutoscalers(namespace string) client.HorizontalPodAutoscalerInterface { - panic("unimplemented") -} - -func (c *FakeExperimental) Scales(namespace string) client.ScaleInterface { - panic("unimplemented") + return &FakeHorizontalPodAutoscalers{Fake: c, Namespace: namespace} } func (c *FakeExperimental) Deployments(namespace string) client.DeploymentInterface { - panic("unimplemented") + return &FakeDeployments{Fake: c, Namespace: namespace} +} + +func (c *FakeExperimental) Scales(namespace string) client.ScaleInterface { + return &FakeScales{Fake: c, Namespace: namespace} } diff --git a/pkg/cloudprovider/plugins.go b/pkg/cloudprovider/plugins.go index 86e5f8dbd32..ad39e34051c 100644 --- a/pkg/cloudprovider/plugins.go +++ b/pkg/cloudprovider/plugins.go @@ -65,15 +65,16 @@ func GetCloudProvider(name string, config io.Reader) (Interface, error) { // InitCloudProvider creates an instance of the named cloud provider. func InitCloudProvider(name string, configFilePath string) (Interface, error) { var cloud Interface + var err error if name == "" { glog.Info("No cloud provider specified.") return nil, nil } - var err error if configFilePath != "" { - config, err := os.Open(configFilePath) + var config *os.File + config, err = os.Open(configFilePath) if err != nil { glog.Fatalf("Couldn't open cloud provider configuration %s: %#v", configFilePath, err) diff --git a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go index eef723be8cf..c3469bcddb2 100644 --- a/pkg/cloudprovider/providers/aws/aws_loadbalancer.go +++ b/pkg/cloudprovider/providers/aws/aws_loadbalancer.go @@ -24,7 +24,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elb" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func (s *AWSCloud) ensureLoadBalancer(region, name string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string) (*elb.LoadBalancerDescription, error) { @@ -61,7 +61,7 @@ func (s *AWSCloud) ensureLoadBalancer(region, name string, listeners []*elb.List } else { { // Sync subnets - expected := util.NewStringSet(subnetIDs...) + expected := sets.NewString(subnetIDs...) actual := stringSetFromPointers(loadBalancer.Subnets) additions := expected.Difference(actual) @@ -94,7 +94,7 @@ func (s *AWSCloud) ensureLoadBalancer(region, name string, listeners []*elb.List { // Sync security groups - expected := util.NewStringSet(securityGroupIDs...) + expected := sets.NewString(securityGroupIDs...) actual := stringSetFromPointers(loadBalancer.SecurityGroups) if !expected.Equal(actual) { @@ -255,12 +255,12 @@ func (s *AWSCloud) ensureLoadBalancerHealthCheck(region string, loadBalancer *el // Makes sure that exactly the specified hosts are registered as instances with the load balancer func (s *AWSCloud) ensureLoadBalancerInstances(elbClient ELB, loadBalancerName string, lbInstances []*elb.Instance, instances []*ec2.Instance) error { - expected := util.NewStringSet() + expected := sets.NewString() for _, instance := range instances { expected.Insert(orEmpty(instance.InstanceID)) } - actual := util.NewStringSet() + actual := sets.NewString() for _, lbInstance := range lbInstances { actual.Insert(orEmpty(lbInstance.InstanceID)) } diff --git a/pkg/cloudprovider/providers/aws/aws_utils.go b/pkg/cloudprovider/providers/aws/aws_utils.go index 99baeeac3ef..1704d2988da 100644 --- a/pkg/cloudprovider/providers/aws/aws_utils.go +++ b/pkg/cloudprovider/providers/aws/aws_utils.go @@ -18,10 +18,10 @@ package aws_cloud import ( "github.com/aws/aws-sdk-go/aws" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) -func stringSetToPointers(in util.StringSet) []*string { +func stringSetToPointers(in sets.String) []*string { if in == nil { return nil } @@ -32,11 +32,11 @@ func stringSetToPointers(in util.StringSet) []*string { return out } -func stringSetFromPointers(in []*string) util.StringSet { +func stringSetFromPointers(in []*string) sets.String { if in == nil { return nil } - out := util.NewStringSet() + out := sets.NewString() for i := range in { out.Insert(orEmpty(in[i])) } diff --git a/pkg/cloudprovider/providers/gce/gce.go b/pkg/cloudprovider/providers/gce/gce.go index 3829956b98e..1926b2683ba 100644 --- a/pkg/cloudprovider/providers/gce/gce.go +++ b/pkg/cloudprovider/providers/gce/gce.go @@ -28,7 +28,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "github.com/golang/glog" @@ -464,7 +464,7 @@ func (gce *GCECloud) UpdateTCPLoadBalancer(name, region string, hosts []string) if err != nil { return err } - existing := util.NewStringSet() + existing := sets.NewString() for _, instance := range pool.Instances { existing.Insert(hostURLToComparablePath(instance)) } diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go b/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go index abdb061e99d..11e98ccadca 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go +++ b/pkg/controller/autoscaler/horizontalpodautoscaler_controller.go @@ -42,17 +42,15 @@ const ( type HorizontalPodAutoscalerController struct { client client.Interface - expClient client.ExperimentalInterface metricsClient metrics.MetricsClient } var downscaleForbiddenWindow, _ = time.ParseDuration("20m") var upscaleForbiddenWindow, _ = time.ParseDuration("3m") -func New(client client.Interface, expClient client.ExperimentalInterface, metricsClient metrics.MetricsClient) *HorizontalPodAutoscalerController { +func New(client client.Interface, metricsClient metrics.MetricsClient) *HorizontalPodAutoscalerController { return &HorizontalPodAutoscalerController{ client: client, - expClient: expClient, metricsClient: metricsClient, } } @@ -67,14 +65,14 @@ func (a *HorizontalPodAutoscalerController) Run(syncPeriod time.Duration) { func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { ns := api.NamespaceAll - list, err := a.expClient.HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) + list, err := a.client.Experimental().HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) if err != nil { return fmt.Errorf("error listing nodes: %v", err) } for _, hpa := range list.Items { reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name) - scale, err := a.expClient.Scales(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name) + scale, err := a.client.Experimental().Scales(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name) if err != nil { glog.Warningf("Failed to query scale subresource for %s: %v", reference, err) continue @@ -127,7 +125,7 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { if rescale { scale.Spec.Replicas = desiredReplicas - _, err = a.expClient.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) + _, err = a.client.Experimental().Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale) if err != nil { glog.Warningf("Failed to rescale %s: %v", reference, err) continue @@ -147,7 +145,7 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error { hpa.Status.LastScaleTimestamp = &now } - _, err = a.expClient.HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa) + _, err = a.client.Experimental().HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa) if err != nil { glog.Warningf("Failed to update HorizontalPodAutoscaler %s: %v", hpa.Name, err) continue diff --git a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go b/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go index 1147cc3087c..bdffbbb18ba 100644 --- a/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go +++ b/pkg/controller/autoscaler/horizontalpodautoscaler_controller_test.go @@ -177,14 +177,12 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { defer testServer.Close() kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()}) - expClient := client.NewExperimentalOrDie(&client.Config{Host: testServer.URL, Version: testapi.Experimental.Version()}) - fakeRC := fakeResourceConsumptionClient{metrics: map[api.ResourceName]expapi.ResourceConsumption{ api.ResourceCPU: {Resource: api.ResourceCPU, Quantity: resource.MustParse("650m")}, }} fake := fakeMetricsClient{consumption: &fakeRC} - hpaController := New(kubeClient, expClient, &fake) + hpaController := New(kubeClient, &fake) err := hpaController.reconcileAutoscalers() if err != nil { @@ -193,7 +191,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { for _, h := range handlers { h.ValidateRequestCount(t, 1) } - obj, err := expClient.Codec.Decode([]byte(handlers[updateHpaHandler].RequestBody)) + obj, err := kubeClient.Codec.Decode([]byte(handlers[updateHpaHandler].RequestBody)) if err != nil { t.Fatal("Failed to decode: %v %v", err) } diff --git a/pkg/controller/controller_utils_test.go b/pkg/controller/controller_utils_test.go index ef6d69036df..215c95b10bd 100644 --- a/pkg/controller/controller_utils_test.go +++ b/pkg/controller/controller_utils_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // NewFakeControllerExpectationsLookup creates a fake store for PodExpectations. @@ -224,13 +225,13 @@ func TestActivePodFiltering(t *testing.T) { podList := newPodList(nil, 5, api.PodRunning, rc) podList.Items[0].Status.Phase = api.PodSucceeded podList.Items[1].Status.Phase = api.PodFailed - expectedNames := util.NewStringSet() + expectedNames := sets.NewString() for _, pod := range podList.Items[2:] { expectedNames.Insert(pod.Name) } got := FilterActivePods(podList.Items) - gotNames := util.NewStringSet() + gotNames := sets.NewString() for _, pod := range got { gotNames.Insert(pod.Name) } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index b6db5593049..49a439508f6 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/workqueue" "k8s.io/kubernetes/pkg/watch" @@ -141,8 +142,8 @@ func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) { e.queue.ShutDown() } -func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (util.StringSet, error) { - set := util.StringSet{} +func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (sets.String, error) { + set := sets.String{} services, err := e.serviceStore.GetPodServices(pod) if err != nil { // don't log this error because this function makes pointless diff --git a/pkg/controller/framework/controller_test.go b/pkg/controller/framework/controller_test.go index 619b994accf..836790b3c83 100644 --- a/pkg/controller/framework/controller_test.go +++ b/pkg/controller/framework/controller_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "github.com/google/gofuzz" ) @@ -104,7 +104,7 @@ func Example() { } // Let's wait for the controller to process the things we just added. - outputSet := util.StringSet{} + outputSet := sets.String{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } @@ -161,7 +161,7 @@ func ExampleInformer() { } // Let's wait for the controller to process the things we just added. - outputSet := util.StringSet{} + outputSet := sets.String{} for i := 0; i < len(testIDs); i++ { outputSet.Insert(<-deletionCounter) } @@ -235,7 +235,7 @@ func TestHammerController(t *testing.T) { go func() { defer wg.Done() // Let's add a few objects to the source. - currentNames := util.StringSet{} + currentNames := sets.String{} rs := rand.NewSource(rand.Int63()) f := fuzz.New().NilChance(.5).NumElements(0, 2).RandSource(rs) r := rand.New(rs) // Mustn't use r and f concurrently! diff --git a/pkg/controller/namespace/namespace_controller.go b/pkg/controller/namespace/namespace_controller.go index 425cf830c34..4b95ba32131 100644 --- a/pkg/controller/namespace/namespace_controller.go +++ b/pkg/controller/namespace/namespace_controller.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" "github.com/golang/glog" @@ -41,7 +42,7 @@ type NamespaceController struct { } // NewNamespaceController creates a new NamespaceController -func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceController { +func NewNamespaceController(kubeClient client.Interface, experimentalMode bool, resyncPeriod time.Duration) *NamespaceController { var controller *framework.Controller _, controller = framework.NewInformer( &cache.ListWatch{ @@ -57,7 +58,7 @@ func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Durat framework.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { namespace := obj.(*api.Namespace) - if err := syncNamespace(kubeClient, *namespace); err != nil { + if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil { if estimate, ok := err.(*contentRemainingError); ok { go func() { // Estimate is the aggregate total of TerminationGracePeriodSeconds, which defaults to 30s @@ -79,7 +80,7 @@ func NewNamespaceController(kubeClient client.Interface, resyncPeriod time.Durat }, UpdateFunc: func(oldObj, newObj interface{}) { namespace := newObj.(*api.Namespace) - if err := syncNamespace(kubeClient, *namespace); err != nil { + if err := syncNamespace(kubeClient, experimentalMode, *namespace); err != nil { if estimate, ok := err.(*contentRemainingError); ok { go func() { t := estimate.Estimate/2 + 1 @@ -128,7 +129,7 @@ func finalize(kubeClient client.Interface, namespace api.Namespace) (*api.Namesp namespaceFinalize := api.Namespace{} namespaceFinalize.ObjectMeta = namespace.ObjectMeta namespaceFinalize.Spec = namespace.Spec - finalizerSet := util.NewStringSet() + finalizerSet := sets.NewString() for i := range namespace.Spec.Finalizers { if namespace.Spec.Finalizers[i] != api.FinalizerKubernetes { finalizerSet.Insert(string(namespace.Spec.Finalizers[i])) @@ -152,7 +153,7 @@ func (e *contentRemainingError) Error() string { // deleteAllContent will delete all content known to the system in a namespace. It returns an estimate // of the time remaining before the remaining resources are deleted. If estimate > 0 not all resources // are guaranteed to be gone. -func deleteAllContent(kubeClient client.Interface, namespace string, before util.Time) (estimate int64, err error) { +func deleteAllContent(kubeClient client.Interface, experimentalMode bool, namespace string, before util.Time) (estimate int64, err error) { err = deleteServiceAccounts(kubeClient, namespace) if err != nil { return estimate, err @@ -189,12 +190,26 @@ func deleteAllContent(kubeClient client.Interface, namespace string, before util if err != nil { return estimate, err } - + // If experimental mode, delete all experimental resources for the namespace. + if experimentalMode { + err = deleteHorizontalPodAutoscalers(kubeClient.Experimental(), namespace) + if err != nil { + return estimate, err + } + err = deleteDaemons(kubeClient.Experimental(), namespace) + if err != nil { + return estimate, err + } + err = deleteDeployments(kubeClient.Experimental(), namespace) + if err != nil { + return estimate, err + } + } return estimate, nil } // syncNamespace makes namespace life-cycle decisions -func syncNamespace(kubeClient client.Interface, namespace api.Namespace) (err error) { +func syncNamespace(kubeClient client.Interface, experimentalMode bool, namespace api.Namespace) (err error) { if namespace.DeletionTimestamp == nil { return nil } @@ -224,7 +239,7 @@ func syncNamespace(kubeClient client.Interface, namespace api.Namespace) (err er } // there may still be content for us to remove - estimate, err := deleteAllContent(kubeClient, namespace.Name, *namespace.DeletionTimestamp) + estimate, err := deleteAllContent(kubeClient, experimentalMode, namespace.Name, *namespace.DeletionTimestamp) if err != nil { return err } @@ -389,3 +404,45 @@ func deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error } return nil } + +func deleteHorizontalPodAutoscalers(expClient client.ExperimentalInterface, ns string) error { + items, err := expClient.HorizontalPodAutoscalers(ns).List(labels.Everything(), fields.Everything()) + if err != nil { + return err + } + for i := range items.Items { + err := expClient.HorizontalPodAutoscalers(ns).Delete(items.Items[i].Name, nil) + if err != nil && !errors.IsNotFound(err) { + return err + } + } + return nil +} + +func deleteDaemons(expClient client.ExperimentalInterface, ns string) error { + items, err := expClient.DaemonSets(ns).List(labels.Everything()) + if err != nil { + return err + } + for i := range items.Items { + err := expClient.DaemonSets(ns).Delete(items.Items[i].Name) + if err != nil && !errors.IsNotFound(err) { + return err + } + } + return nil +} + +func deleteDeployments(expClient client.ExperimentalInterface, ns string) error { + items, err := expClient.Deployments(ns).List(labels.Everything(), fields.Everything()) + if err != nil { + return err + } + for i := range items.Items { + err := expClient.Deployments(ns).Delete(items.Items[i].Name, nil) + if err != nil && !errors.IsNotFound(err) { + return err + } + } + return nil +} diff --git a/pkg/controller/namespace/namespace_controller_test.go b/pkg/controller/namespace/namespace_controller_test.go index b98e038c5b3..ccdd96b7b85 100644 --- a/pkg/controller/namespace/namespace_controller_test.go +++ b/pkg/controller/namespace/namespace_controller_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func TestFinalized(t *testing.T) { @@ -69,7 +70,7 @@ func TestFinalize(t *testing.T) { } } -func TestSyncNamespaceThatIsTerminating(t *testing.T) { +func testSyncNamespaceThatIsTerminating(t *testing.T, experimentalMode bool) { mockClient := &testclient.Fake{} now := util.Now() testNamespace := api.Namespace{ @@ -85,12 +86,12 @@ func TestSyncNamespaceThatIsTerminating(t *testing.T) { Phase: api.NamespaceTerminating, }, } - err := syncNamespace(mockClient, testNamespace) + err := syncNamespace(mockClient, experimentalMode, testNamespace) if err != nil { t.Errorf("Unexpected error when synching namespace %v", err) } // TODO: Reuse the constants for all these strings from testclient - expectedActionSet := util.NewStringSet( + expectedActionSet := sets.NewString( strings.Join([]string{"list", "replicationcontrollers", ""}, "-"), strings.Join([]string{"list", "services", ""}, "-"), strings.Join([]string{"list", "pods", ""}, "-"), @@ -98,16 +99,38 @@ func TestSyncNamespaceThatIsTerminating(t *testing.T) { strings.Join([]string{"list", "secrets", ""}, "-"), strings.Join([]string{"list", "limitranges", ""}, "-"), strings.Join([]string{"list", "events", ""}, "-"), + strings.Join([]string{"list", "serviceaccounts", ""}, "-"), + strings.Join([]string{"list", "persistentvolumeclaims", ""}, "-"), strings.Join([]string{"create", "namespaces", "finalize"}, "-"), strings.Join([]string{"delete", "namespaces", ""}, "-"), ) - actionSet := util.NewStringSet() + + if experimentalMode { + expectedActionSet.Insert( + strings.Join([]string{"list", "horizontalpodautoscalers", ""}, "-"), + strings.Join([]string{"list", "daemonsets", ""}, "-"), + strings.Join([]string{"list", "deployments", ""}, "-"), + ) + } + + actionSet := sets.NewString() for _, action := range mockClient.Actions() { actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource(), action.GetSubresource()}, "-")) } if !actionSet.HasAll(expectedActionSet.List()...) { t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet) } + if !expectedActionSet.HasAll(actionSet.List()...) { + t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet) + } +} + +func TestSyncNamespaceThatIsTerminatingNonExperimental(t *testing.T) { + testSyncNamespaceThatIsTerminating(t, false) +} + +func TestSyncNamespaceThatIsTerminatingExperimental(t *testing.T) { + testSyncNamespaceThatIsTerminating(t, true) } func TestSyncNamespaceThatIsActive(t *testing.T) { @@ -124,7 +147,7 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { Phase: api.NamespaceActive, }, } - err := syncNamespace(mockClient, testNamespace) + err := syncNamespace(mockClient, false, testNamespace) if err != nil { t.Errorf("Unexpected error when synching namespace %v", err) } @@ -135,7 +158,7 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { func TestRunStop(t *testing.T) { mockClient := &testclient.Fake{} - nsController := NewNamespaceController(mockClient, 1*time.Second) + nsController := NewNamespaceController(mockClient, false, 1*time.Second) if nsController.StopEverything != nil { t.Errorf("Non-running manager should not have a stop channel. Got %v", nsController.StopEverything) diff --git a/pkg/controller/node/doc.go b/pkg/controller/node/doc.go index 0cc00b6fff8..3174bef7c09 100644 --- a/pkg/controller/node/doc.go +++ b/pkg/controller/node/doc.go @@ -15,5 +15,5 @@ limitations under the License. */ // Package nodecontroller contains code for syncing cloud instances with -// minion registry +// node registry package nodecontroller diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 2da83b9ab3d..18541f5ea82 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) var ( @@ -58,7 +59,7 @@ type NodeController struct { cloud cloudprovider.Interface clusterCIDR *net.IPNet deletingPodsRateLimiter util.RateLimiter - knownNodeSet util.StringSet + knownNodeSet sets.String kubeClient client.Interface // Method for easy mocking in unittest. lookupIP func(host string) ([]net.IP, error) @@ -126,7 +127,7 @@ func NewNodeController( evictorLock := sync.Mutex{} return &NodeController{ cloud: cloud, - knownNodeSet: make(util.StringSet), + knownNodeSet: make(sets.String), kubeClient: kubeClient, recorder: recorder, podEvictionTimeout: podEvictionTimeout, @@ -211,8 +212,8 @@ func (nc *NodeController) Run(period time.Duration) { } // Generates num pod CIDRs that could be assigned to nodes. -func generateCIDRs(clusterCIDR *net.IPNet, num int) util.StringSet { - res := util.NewStringSet() +func generateCIDRs(clusterCIDR *net.IPNet, num int) sets.String { + res := sets.NewString() cidrIP := clusterCIDR.IP.To4() for i := 0; i < num; i++ { // TODO: Make the CIDRs configurable. @@ -256,7 +257,7 @@ func (nc *NodeController) monitorNodeStatus() error { // If there's a difference between lengths of known Nodes and observed nodes // we must have removed some Node. if len(nc.knownNodeSet) != len(nodes.Items) { - observedSet := make(util.StringSet) + observedSet := make(sets.String) for _, node := range nodes.Items { observedSet.Insert(node.Name) } diff --git a/pkg/controller/node/rate_limited_queue.go b/pkg/controller/node/rate_limited_queue.go index 2fcd0963d1b..71950f54a47 100644 --- a/pkg/controller/node/rate_limited_queue.go +++ b/pkg/controller/node/rate_limited_queue.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // TimedValue is a value that should be processed at a designated time. @@ -58,7 +59,7 @@ func (h *TimedQueue) Pop() interface{} { type UniqueQueue struct { lock sync.Mutex queue TimedQueue - set util.StringSet + set sets.String } // Adds a new value to the queue if it wasn't added before, or was explicitly removed by the @@ -143,7 +144,7 @@ func NewRateLimitedTimedQueue(limiter util.RateLimiter) *RateLimitedTimedQueue { return &RateLimitedTimedQueue{ queue: UniqueQueue{ queue: TimedQueue{}, - set: util.NewStringSet(), + set: sets.NewString(), }, limiter: limiter, } diff --git a/pkg/controller/node/rate_limited_queue_test.go b/pkg/controller/node/rate_limited_queue_test.go index 5dc8a4d81a4..762e8263822 100644 --- a/pkg/controller/node/rate_limited_queue_test.go +++ b/pkg/controller/node/rate_limited_queue_test.go @@ -22,6 +22,7 @@ import ( "time" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func CheckQueueEq(lhs []string, rhs TimedQueue) bool { @@ -33,7 +34,7 @@ func CheckQueueEq(lhs []string, rhs TimedQueue) bool { return true } -func CheckSetEq(lhs, rhs util.StringSet) bool { +func CheckSetEq(lhs, rhs sets.String) bool { return lhs.HasAll(rhs.List()...) && rhs.HasAll(lhs.List()...) } @@ -51,7 +52,7 @@ func TestAddNode(t *testing.T) { t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) } - setPattern := util.NewStringSet("first", "second", "third") + setPattern := sets.NewString("first", "second", "third") if len(evictor.queue.set) != len(setPattern) { t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) } @@ -75,7 +76,7 @@ func TestDelNode(t *testing.T) { t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) } - setPattern := util.NewStringSet("second", "third") + setPattern := sets.NewString("second", "third") if len(evictor.queue.set) != len(setPattern) { t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) } @@ -97,7 +98,7 @@ func TestDelNode(t *testing.T) { t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) } - setPattern = util.NewStringSet("first", "third") + setPattern = sets.NewString("first", "third") if len(evictor.queue.set) != len(setPattern) { t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) } @@ -119,7 +120,7 @@ func TestDelNode(t *testing.T) { t.Errorf("Invalid queue. Got %v, expected %v", evictor.queue.queue, queuePattern) } - setPattern = util.NewStringSet("first", "second") + setPattern = sets.NewString("first", "second") if len(evictor.queue.set) != len(setPattern) { t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) } @@ -135,13 +136,13 @@ func TestTry(t *testing.T) { evictor.Add("third") evictor.Remove("second") - deletedMap := util.NewStringSet() + deletedMap := sets.NewString() evictor.Try(func(value TimedValue) (bool, time.Duration) { deletedMap.Insert(value.Value) return true, 0 }) - setPattern := util.NewStringSet("first", "third") + setPattern := sets.NewString("first", "third") if len(deletedMap) != len(setPattern) { t.Fatalf("Map %v should have length %d", evictor.queue.set, len(setPattern)) } diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index 5376a69bf09..cd58f9e68c2 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" ) @@ -626,7 +627,7 @@ func TestUpdatePods(t *testing.T) { // both controllers manager.updatePod(&pod1, &pod2) - expected := util.NewStringSet(testControllerSpec1.Name, testControllerSpec2.Name) + expected := sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name) for _, name := range expected.List() { t.Logf("Expecting update for %+v", name) select { diff --git a/pkg/controller/resourcequota/resource_quota_controller_test.go b/pkg/controller/resourcequota/resource_quota_controller_test.go index 346d6bdf438..4cb0b1f4f73 100644 --- a/pkg/controller/resourcequota/resource_quota_controller_test.go +++ b/pkg/controller/resourcequota/resource_quota_controller_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/client/unversioned/testclient" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func getResourceList(cpu, memory string) api.ResourceList { @@ -103,11 +103,11 @@ func TestFilterQuotaPods(t *testing.T) { Status: api.PodStatus{Phase: api.PodFailed}, }, } - expectedResults := util.NewStringSet("pod-running", + expectedResults := sets.NewString("pod-running", "pod-pending", "pod-unknown", "pod-failed-with-restart-always", "pod-failed-with-restart-on-failure") - actualResults := util.StringSet{} + actualResults := sets.String{} result := FilterQuotaPods(pods) for i := range result { actualResults.Insert(result[i].Name) diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller.go b/pkg/controller/serviceaccount/serviceaccounts_controller.go index 7da8f89e495..90c2702bd56 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller.go @@ -29,7 +29,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" ) @@ -45,7 +45,7 @@ func nameIndexFunc(obj interface{}) ([]string, error) { // ServiceAccountsControllerOptions contains options for running a ServiceAccountsController type ServiceAccountsControllerOptions struct { // Names is the set of service account names to ensure exist in every namespace - Names util.StringSet + Names sets.String // ServiceAccountResync is the interval between full resyncs of ServiceAccounts. // If non-zero, all service accounts will be re-listed this often. @@ -59,7 +59,7 @@ type ServiceAccountsControllerOptions struct { } func DefaultServiceAccountsControllerOptions() ServiceAccountsControllerOptions { - return ServiceAccountsControllerOptions{Names: util.NewStringSet("default")} + return ServiceAccountsControllerOptions{Names: sets.NewString("default")} } // NewServiceAccountsController returns a new *ServiceAccountsController. @@ -117,7 +117,7 @@ type ServiceAccountsController struct { stopChan chan struct{} client client.Interface - names util.StringSet + names sets.String serviceAccounts cache.Indexer namespaces cache.Indexer diff --git a/pkg/controller/serviceaccount/serviceaccounts_controller_test.go b/pkg/controller/serviceaccount/serviceaccounts_controller_test.go index e1525bd8aa7..f04892b9869 100644 --- a/pkg/controller/serviceaccount/serviceaccounts_controller_test.go +++ b/pkg/controller/serviceaccount/serviceaccounts_controller_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type serverResponse struct { @@ -101,7 +102,7 @@ func TestServiceAccountCreation(t *testing.T) { "new active namespace missing serviceaccounts": { ExistingServiceAccounts: []*api.ServiceAccount{}, AddedNamespace: activeNS, - ExpectCreatedServiceAccounts: util.NewStringSet(defaultName, managedName).List(), + ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(), }, "new active namespace missing serviceaccount": { ExistingServiceAccounts: []*api.ServiceAccount{managedServiceAccount}, @@ -123,7 +124,7 @@ func TestServiceAccountCreation(t *testing.T) { "updated active namespace missing serviceaccounts": { ExistingServiceAccounts: []*api.ServiceAccount{}, UpdatedNamespace: activeNS, - ExpectCreatedServiceAccounts: util.NewStringSet(defaultName, managedName).List(), + ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(), }, "updated active namespace missing serviceaccount": { ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount}, @@ -170,7 +171,7 @@ func TestServiceAccountCreation(t *testing.T) { for k, tc := range testcases { client := testclient.NewSimpleFake(defaultServiceAccount, managedServiceAccount) options := DefaultServiceAccountsControllerOptions() - options.Names = util.NewStringSet(defaultName, managedName) + options.Names = sets.NewString(defaultName, managedName) controller := NewServiceAccountsController(client, options) if tc.ExistingNamespace != nil { diff --git a/pkg/controller/serviceaccount/tokens_controller.go b/pkg/controller/serviceaccount/tokens_controller.go index 7fd568a0629..7de46edef14 100644 --- a/pkg/controller/serviceaccount/tokens_controller.go +++ b/pkg/controller/serviceaccount/tokens_controller.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/secret" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" ) @@ -495,8 +495,8 @@ func serviceAccountNameAndUID(secret *api.Secret) (string, string) { return secret.Annotations[api.ServiceAccountNameKey], secret.Annotations[api.ServiceAccountUIDKey] } -func getSecretReferences(serviceAccount *api.ServiceAccount) util.StringSet { - references := util.NewStringSet() +func getSecretReferences(serviceAccount *api.ServiceAccount) sets.String { + references := sets.NewString() for _, secret := range serviceAccount.Secrets { references.Insert(secret.Name) } diff --git a/pkg/credentialprovider/keyring.go b/pkg/credentialprovider/keyring.go index 87a8c862e79..404e8ed4aae 100644 --- a/pkg/credentialprovider/keyring.go +++ b/pkg/credentialprovider/keyring.go @@ -28,7 +28,7 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // DockerKeyring tracks a set of docker registry credentials, maintaining a @@ -90,7 +90,7 @@ func (dk *BasicDockerKeyring) Add(cfg DockerConfig) { } } - eliminateDupes := util.NewStringSet(dk.index...) + eliminateDupes := sets.NewString(dk.index...) dk.index = eliminateDupes.List() // Update the index used to identify which credentials to use for a given diff --git a/pkg/expapi/deep_copy_generated.go b/pkg/expapi/deep_copy_generated.go index 648d3ae414c..1879cc8e3ac 100644 --- a/pkg/expapi/deep_copy_generated.go +++ b/pkg/expapi/deep_copy_generated.go @@ -763,23 +763,23 @@ func deepCopy_expapi_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cl return nil } -func deepCopy_expapi_Daemon(in Daemon, out *Daemon, c *conversion.Cloner) error { +func deepCopy_expapi_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := deepCopy_api_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_expapi_DaemonSpec(in.Spec, &out.Spec, c); err != nil { + if err := deepCopy_expapi_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_expapi_DaemonStatus(in.Status, &out.Status, c); err != nil { + if err := deepCopy_expapi_DaemonSetStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_expapi_DaemonList(in DaemonList, out *DaemonList, c *conversion.Cloner) error { +func deepCopy_expapi_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { if err := deepCopy_api_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -787,9 +787,9 @@ func deepCopy_expapi_DaemonList(in DaemonList, out *DaemonList, c *conversion.Cl return err } if in.Items != nil { - out.Items = make([]Daemon, len(in.Items)) + out.Items = make([]DaemonSet, len(in.Items)) for i := range in.Items { - if err := deepCopy_expapi_Daemon(in.Items[i], &out.Items[i], c); err != nil { + if err := deepCopy_expapi_DaemonSet(in.Items[i], &out.Items[i], c); err != nil { return err } } @@ -799,7 +799,7 @@ func deepCopy_expapi_DaemonList(in DaemonList, out *DaemonList, c *conversion.Cl return nil } -func deepCopy_expapi_DaemonSpec(in DaemonSpec, out *DaemonSpec, c *conversion.Cloner) error { +func deepCopy_expapi_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { @@ -819,7 +819,7 @@ func deepCopy_expapi_DaemonSpec(in DaemonSpec, out *DaemonSpec, c *conversion.Cl return nil } -func deepCopy_expapi_DaemonStatus(in DaemonStatus, out *DaemonStatus, c *conversion.Cloner) error { +func deepCopy_expapi_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -1193,10 +1193,10 @@ func init() { deepCopy_api_VolumeSource, deepCopy_resource_Quantity, deepCopy_expapi_APIVersion, - deepCopy_expapi_Daemon, - deepCopy_expapi_DaemonList, - deepCopy_expapi_DaemonSpec, - deepCopy_expapi_DaemonStatus, + deepCopy_expapi_DaemonSet, + deepCopy_expapi_DaemonSetList, + deepCopy_expapi_DaemonSetSpec, + deepCopy_expapi_DaemonSetStatus, deepCopy_expapi_Deployment, deepCopy_expapi_DeploymentList, deepCopy_expapi_DeploymentSpec, diff --git a/pkg/expapi/latest/latest.go b/pkg/expapi/latest/latest.go index 11b4ff2536c..13dbc2c70d8 100644 --- a/pkg/expapi/latest/latest.go +++ b/pkg/expapi/latest/latest.go @@ -26,7 +26,7 @@ import ( _ "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/expapi/v1" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) var ( @@ -51,9 +51,9 @@ func init() { // the list of kinds that are scoped at the root of the api hierarchy // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := util.NewStringSet() + rootScoped := sets.NewString() - ignoredKinds := util.NewStringSet() + ignoredKinds := sets.NewString() RESTMapper = api.NewDefaultRESTMapper("experimental", Versions, InterfacesFor, importPrefix, ignoredKinds, rootScoped) api.RegisterRESTMapper(RESTMapper) diff --git a/pkg/expapi/register.go b/pkg/expapi/register.go index 35d9246e43e..10e656da2cb 100644 --- a/pkg/expapi/register.go +++ b/pkg/expapi/register.go @@ -36,8 +36,8 @@ func addKnownTypes() { &Scale{}, &ThirdPartyResource{}, &ThirdPartyResourceList{}, - &DaemonList{}, - &Daemon{}, + &DaemonSetList{}, + &DaemonSet{}, &ThirdPartyResourceData{}, &ThirdPartyResourceDataList{}, ) @@ -51,7 +51,7 @@ func (*ReplicationControllerDummy) IsAnAPIObject() {} func (*Scale) IsAnAPIObject() {} func (*ThirdPartyResource) IsAnAPIObject() {} func (*ThirdPartyResourceList) IsAnAPIObject() {} -func (*Daemon) IsAnAPIObject() {} -func (*DaemonList) IsAnAPIObject() {} +func (*DaemonSet) IsAnAPIObject() {} +func (*DaemonSetList) IsAnAPIObject() {} func (*ThirdPartyResourceData) IsAnAPIObject() {} func (*ThirdPartyResourceDataList) IsAnAPIObject() {} diff --git a/pkg/expapi/types.go b/pkg/expapi/types.go index 1612f2e2768..e136b93a890 100644 --- a/pkg/expapi/types.go +++ b/pkg/expapi/types.go @@ -293,65 +293,65 @@ type DeploymentList struct { Items []Deployment `json:"items"` } -// DaemonSpec is the specification of a daemon. -type DaemonSpec struct { - // Selector is a label query over pods that are managed by the daemon. +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector map[string]string `json:"selector,omitempty"` // Template is the object that describes the pod that will be created. - // The Daemon will create exactly one copy of this pod on every node + // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template Template *api.PodTemplateSpec `json:"template,omitempty"` } -// DaemonStatus represents the current status of a daemon. -type DaemonStatus struct { - // CurrentNumberScheduled is the number of nodes that are running exactly 1 copy of the - // daemon and are supposed to run the daemon. +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // CurrentNumberScheduled is the number of nodes that are running exactly 1 + // daemon pod and are supposed to run the daemon pod. CurrentNumberScheduled int `json:"currentNumberScheduled"` - // NumberMisscheduled is the number of nodes that are running the daemon, but are - // not supposed to run the daemon. + // NumberMisscheduled is the number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. NumberMisscheduled int `json:"numberMisscheduled"` // DesiredNumberScheduled is the total number of nodes that should be running the daemon - // (including nodes correctly running the daemon). + // pod (including nodes correctly running the daemon pod). DesiredNumberScheduled int `json:"desiredNumberScheduled"` } -// Daemon represents the configuration of a daemon. -type Daemon struct { +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { api.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata api.ObjectMeta `json:"metadata,omitempty"` - // Spec defines the desired behavior of this daemon. + // Spec defines the desired behavior of this daemon set. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - Spec DaemonSpec `json:"spec,omitempty"` + Spec DaemonSetSpec `json:"spec,omitempty"` - // Status is the current status of this daemon. This data may be + // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - Status DaemonStatus `json:"status,omitempty"` + Status DaemonSetStatus `json:"status,omitempty"` } -// DaemonList is a collection of daemon. -type DaemonList struct { +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { api.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata api.ListMeta `json:"metadata,omitempty"` - // Items is a list of daemons. - Items []Daemon `json:"items"` + // Items is a list of daemon sets. + Items []DaemonSet `json:"items"` } type ThirdPartyResourceDataList struct { diff --git a/pkg/expapi/v1/conversion_generated.go b/pkg/expapi/v1/conversion_generated.go index a05cdeb760c..12289ef2363 100644 --- a/pkg/expapi/v1/conversion_generated.go +++ b/pkg/expapi/v1/conversion_generated.go @@ -1571,9 +1571,9 @@ func convert_expapi_APIVersion_To_v1_APIVersion(in *expapi.APIVersion, out *APIV return nil } -func convert_expapi_Daemon_To_v1_Daemon(in *expapi.Daemon, out *Daemon, s conversion.Scope) error { +func convert_expapi_DaemonSet_To_v1_DaemonSet(in *expapi.DaemonSet, out *DaemonSet, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.Daemon))(in) + defaulting.(func(*expapi.DaemonSet))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1581,18 +1581,18 @@ func convert_expapi_Daemon_To_v1_Daemon(in *expapi.Daemon, out *Daemon, s conver if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_expapi_DaemonSpec_To_v1_DaemonSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_expapi_DaemonSetSpec_To_v1_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_expapi_DaemonStatus_To_v1_DaemonStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_expapi_DaemonSetStatus_To_v1_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_expapi_DaemonList_To_v1_DaemonList(in *expapi.DaemonList, out *DaemonList, s conversion.Scope) error { +func convert_expapi_DaemonSetList_To_v1_DaemonSetList(in *expapi.DaemonSetList, out *DaemonSetList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DaemonList))(in) + defaulting.(func(*expapi.DaemonSetList))(in) } if err := convert_api_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1601,9 +1601,9 @@ func convert_expapi_DaemonList_To_v1_DaemonList(in *expapi.DaemonList, out *Daem return err } if in.Items != nil { - out.Items = make([]Daemon, len(in.Items)) + out.Items = make([]DaemonSet, len(in.Items)) for i := range in.Items { - if err := convert_expapi_Daemon_To_v1_Daemon(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_expapi_DaemonSet_To_v1_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -1613,9 +1613,9 @@ func convert_expapi_DaemonList_To_v1_DaemonList(in *expapi.DaemonList, out *Daem return nil } -func convert_expapi_DaemonSpec_To_v1_DaemonSpec(in *expapi.DaemonSpec, out *DaemonSpec, s conversion.Scope) error { +func convert_expapi_DaemonSetSpec_To_v1_DaemonSetSpec(in *expapi.DaemonSetSpec, out *DaemonSetSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DaemonSpec))(in) + defaulting.(func(*expapi.DaemonSetSpec))(in) } if in.Selector != nil { out.Selector = make(map[string]string) @@ -1636,9 +1636,9 @@ func convert_expapi_DaemonSpec_To_v1_DaemonSpec(in *expapi.DaemonSpec, out *Daem return nil } -func convert_expapi_DaemonStatus_To_v1_DaemonStatus(in *expapi.DaemonStatus, out *DaemonStatus, s conversion.Scope) error { +func convert_expapi_DaemonSetStatus_To_v1_DaemonSetStatus(in *expapi.DaemonSetStatus, out *DaemonSetStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*expapi.DaemonStatus))(in) + defaulting.(func(*expapi.DaemonSetStatus))(in) } out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled @@ -1959,9 +1959,9 @@ func convert_v1_APIVersion_To_expapi_APIVersion(in *APIVersion, out *expapi.APIV return nil } -func convert_v1_Daemon_To_expapi_Daemon(in *Daemon, out *expapi.Daemon, s conversion.Scope) error { +func convert_v1_DaemonSet_To_expapi_DaemonSet(in *DaemonSet, out *expapi.DaemonSet, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*Daemon))(in) + defaulting.(func(*DaemonSet))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1969,18 +1969,18 @@ func convert_v1_Daemon_To_expapi_Daemon(in *Daemon, out *expapi.Daemon, s conver if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil { return err } - if err := convert_v1_DaemonSpec_To_expapi_DaemonSpec(&in.Spec, &out.Spec, s); err != nil { + if err := convert_v1_DaemonSetSpec_To_expapi_DaemonSetSpec(&in.Spec, &out.Spec, s); err != nil { return err } - if err := convert_v1_DaemonStatus_To_expapi_DaemonStatus(&in.Status, &out.Status, s); err != nil { + if err := convert_v1_DaemonSetStatus_To_expapi_DaemonSetStatus(&in.Status, &out.Status, s); err != nil { return err } return nil } -func convert_v1_DaemonList_To_expapi_DaemonList(in *DaemonList, out *expapi.DaemonList, s conversion.Scope) error { +func convert_v1_DaemonSetList_To_expapi_DaemonSetList(in *DaemonSetList, out *expapi.DaemonSetList, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonList))(in) + defaulting.(func(*DaemonSetList))(in) } if err := convert_v1_TypeMeta_To_api_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil { return err @@ -1989,9 +1989,9 @@ func convert_v1_DaemonList_To_expapi_DaemonList(in *DaemonList, out *expapi.Daem return err } if in.Items != nil { - out.Items = make([]expapi.Daemon, len(in.Items)) + out.Items = make([]expapi.DaemonSet, len(in.Items)) for i := range in.Items { - if err := convert_v1_Daemon_To_expapi_Daemon(&in.Items[i], &out.Items[i], s); err != nil { + if err := convert_v1_DaemonSet_To_expapi_DaemonSet(&in.Items[i], &out.Items[i], s); err != nil { return err } } @@ -2001,9 +2001,9 @@ func convert_v1_DaemonList_To_expapi_DaemonList(in *DaemonList, out *expapi.Daem return nil } -func convert_v1_DaemonSpec_To_expapi_DaemonSpec(in *DaemonSpec, out *expapi.DaemonSpec, s conversion.Scope) error { +func convert_v1_DaemonSetSpec_To_expapi_DaemonSetSpec(in *DaemonSetSpec, out *expapi.DaemonSetSpec, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonSpec))(in) + defaulting.(func(*DaemonSetSpec))(in) } if in.Selector != nil { out.Selector = make(map[string]string) @@ -2024,9 +2024,9 @@ func convert_v1_DaemonSpec_To_expapi_DaemonSpec(in *DaemonSpec, out *expapi.Daem return nil } -func convert_v1_DaemonStatus_To_expapi_DaemonStatus(in *DaemonStatus, out *expapi.DaemonStatus, s conversion.Scope) error { +func convert_v1_DaemonSetStatus_To_expapi_DaemonSetStatus(in *DaemonSetStatus, out *expapi.DaemonSetStatus, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { - defaulting.(func(*DaemonStatus))(in) + defaulting.(func(*DaemonSetStatus))(in) } out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled @@ -2379,10 +2379,10 @@ func init() { convert_api_VolumeSource_To_v1_VolumeSource, convert_api_Volume_To_v1_Volume, convert_expapi_APIVersion_To_v1_APIVersion, - convert_expapi_DaemonList_To_v1_DaemonList, - convert_expapi_DaemonSpec_To_v1_DaemonSpec, - convert_expapi_DaemonStatus_To_v1_DaemonStatus, - convert_expapi_Daemon_To_v1_Daemon, + convert_expapi_DaemonSetList_To_v1_DaemonSetList, + convert_expapi_DaemonSetSpec_To_v1_DaemonSetSpec, + convert_expapi_DaemonSetStatus_To_v1_DaemonSetStatus, + convert_expapi_DaemonSet_To_v1_DaemonSet, convert_expapi_DeploymentList_To_v1_DeploymentList, convert_expapi_DeploymentStatus_To_v1_DeploymentStatus, convert_expapi_Deployment_To_v1_Deployment, @@ -2407,10 +2407,10 @@ func init() { convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, convert_v1_ContainerPort_To_api_ContainerPort, convert_v1_Container_To_api_Container, - convert_v1_DaemonList_To_expapi_DaemonList, - convert_v1_DaemonSpec_To_expapi_DaemonSpec, - convert_v1_DaemonStatus_To_expapi_DaemonStatus, - convert_v1_Daemon_To_expapi_Daemon, + convert_v1_DaemonSetList_To_expapi_DaemonSetList, + convert_v1_DaemonSetSpec_To_expapi_DaemonSetSpec, + convert_v1_DaemonSetStatus_To_expapi_DaemonSetStatus, + convert_v1_DaemonSet_To_expapi_DaemonSet, convert_v1_DeploymentList_To_expapi_DeploymentList, convert_v1_DeploymentStatus_To_expapi_DeploymentStatus, convert_v1_Deployment_To_expapi_Deployment, diff --git a/pkg/expapi/v1/deep_copy_generated.go b/pkg/expapi/v1/deep_copy_generated.go index a61f1a17114..0e5ba1cb976 100644 --- a/pkg/expapi/v1/deep_copy_generated.go +++ b/pkg/expapi/v1/deep_copy_generated.go @@ -765,23 +765,23 @@ func deepCopy_v1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner return nil } -func deepCopy_v1_Daemon(in Daemon, out *Daemon, c *conversion.Cloner) error { +func deepCopy_v1_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { return err } - if err := deepCopy_v1_DaemonSpec(in.Spec, &out.Spec, c); err != nil { + if err := deepCopy_v1_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { return err } - if err := deepCopy_v1_DaemonStatus(in.Status, &out.Status, c); err != nil { + if err := deepCopy_v1_DaemonSetStatus(in.Status, &out.Status, c); err != nil { return err } return nil } -func deepCopy_v1_DaemonList(in DaemonList, out *DaemonList, c *conversion.Cloner) error { +func deepCopy_v1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { if err := deepCopy_v1_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { return err } @@ -789,9 +789,9 @@ func deepCopy_v1_DaemonList(in DaemonList, out *DaemonList, c *conversion.Cloner return err } if in.Items != nil { - out.Items = make([]Daemon, len(in.Items)) + out.Items = make([]DaemonSet, len(in.Items)) for i := range in.Items { - if err := deepCopy_v1_Daemon(in.Items[i], &out.Items[i], c); err != nil { + if err := deepCopy_v1_DaemonSet(in.Items[i], &out.Items[i], c); err != nil { return err } } @@ -801,7 +801,7 @@ func deepCopy_v1_DaemonList(in DaemonList, out *DaemonList, c *conversion.Cloner return nil } -func deepCopy_v1_DaemonSpec(in DaemonSpec, out *DaemonSpec, c *conversion.Cloner) error { +func deepCopy_v1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { if in.Selector != nil { out.Selector = make(map[string]string) for key, val := range in.Selector { @@ -821,7 +821,7 @@ func deepCopy_v1_DaemonSpec(in DaemonSpec, out *DaemonSpec, c *conversion.Cloner return nil } -func deepCopy_v1_DaemonStatus(in DaemonStatus, out *DaemonStatus, c *conversion.Cloner) error { +func deepCopy_v1_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { out.CurrentNumberScheduled = in.CurrentNumberScheduled out.NumberMisscheduled = in.NumberMisscheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled @@ -1215,10 +1215,10 @@ func init() { deepCopy_v1_VolumeMount, deepCopy_v1_VolumeSource, deepCopy_v1_APIVersion, - deepCopy_v1_Daemon, - deepCopy_v1_DaemonList, - deepCopy_v1_DaemonSpec, - deepCopy_v1_DaemonStatus, + deepCopy_v1_DaemonSet, + deepCopy_v1_DaemonSetList, + deepCopy_v1_DaemonSetSpec, + deepCopy_v1_DaemonSetStatus, deepCopy_v1_Deployment, deepCopy_v1_DeploymentList, deepCopy_v1_DeploymentSpec, diff --git a/pkg/expapi/v1/defaults.go b/pkg/expapi/v1/defaults.go index 105a49ee3de..cc141513c25 100644 --- a/pkg/expapi/v1/defaults.go +++ b/pkg/expapi/v1/defaults.go @@ -28,7 +28,7 @@ func addDefaultingFuncs() { obj.APIGroup = "experimental" } }, - func(obj *Daemon) { + func(obj *DaemonSet) { var labels map[string]string if obj.Spec.Template != nil { labels = obj.Spec.Template.Labels diff --git a/pkg/expapi/v1/defaults_test.go b/pkg/expapi/v1/defaults_test.go index 3463534fc9d..afc00939fa2 100644 --- a/pkg/expapi/v1/defaults_test.go +++ b/pkg/expapi/v1/defaults_test.go @@ -26,14 +26,14 @@ import ( "k8s.io/kubernetes/pkg/util" ) -func TestSetDefaultDaemon(t *testing.T) { +func TestSetDefaultDaemonSet(t *testing.T) { tests := []struct { - dc *Daemon + ds *DaemonSet expectLabelsChange bool }{ { - dc: &Daemon{ - Spec: DaemonSpec{ + ds: &DaemonSet{ + Spec: DaemonSetSpec{ Template: &v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{ @@ -46,13 +46,13 @@ func TestSetDefaultDaemon(t *testing.T) { expectLabelsChange: true, }, { - dc: &Daemon{ + ds: &DaemonSet{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{ "bar": "foo", }, }, - Spec: DaemonSpec{ + Spec: DaemonSetSpec{ Template: &v1.PodTemplateSpec{ ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{ @@ -67,18 +67,18 @@ func TestSetDefaultDaemon(t *testing.T) { } for _, test := range tests { - dc := test.dc - obj2 := roundTrip(t, runtime.Object(dc)) - dc2, ok := obj2.(*Daemon) + ds := test.ds + obj2 := roundTrip(t, runtime.Object(ds)) + ds2, ok := obj2.(*DaemonSet) if !ok { - t.Errorf("unexpected object: %v", dc2) + t.Errorf("unexpected object: %v", ds2) t.FailNow() } - if test.expectLabelsChange != reflect.DeepEqual(dc2.Labels, dc2.Spec.Template.Labels) { + if test.expectLabelsChange != reflect.DeepEqual(ds2.Labels, ds2.Spec.Template.Labels) { if test.expectLabelsChange { - t.Errorf("expected: %v, got: %v", dc2.Spec.Template.Labels, dc2.Labels) + t.Errorf("expected: %v, got: %v", ds2.Spec.Template.Labels, ds2.Labels) } else { - t.Errorf("unexpected equality: %v", dc.Labels) + t.Errorf("unexpected equality: %v", ds.Labels) } } } diff --git a/pkg/expapi/v1/register.go b/pkg/expapi/v1/register.go index e6fab8c212e..30bb3d923d4 100644 --- a/pkg/expapi/v1/register.go +++ b/pkg/expapi/v1/register.go @@ -40,8 +40,8 @@ func addKnownTypes() { &Scale{}, &ThirdPartyResource{}, &ThirdPartyResourceList{}, - &DaemonList{}, - &Daemon{}, + &DaemonSetList{}, + &DaemonSet{}, &ThirdPartyResourceData{}, &ThirdPartyResourceDataList{}, ) @@ -55,7 +55,7 @@ func (*ReplicationControllerDummy) IsAnAPIObject() {} func (*Scale) IsAnAPIObject() {} func (*ThirdPartyResource) IsAnAPIObject() {} func (*ThirdPartyResourceList) IsAnAPIObject() {} -func (*Daemon) IsAnAPIObject() {} -func (*DaemonList) IsAnAPIObject() {} +func (*DaemonSet) IsAnAPIObject() {} +func (*DaemonSetList) IsAnAPIObject() {} func (*ThirdPartyResourceData) IsAnAPIObject() {} func (*ThirdPartyResourceDataList) IsAnAPIObject() {} diff --git a/pkg/expapi/v1/types.go b/pkg/expapi/v1/types.go index 19fb02b15a1..e3d120f9e12 100644 --- a/pkg/expapi/v1/types.go +++ b/pkg/expapi/v1/types.go @@ -292,65 +292,65 @@ type DeploymentList struct { Items []Deployment `json:"items"` } -// DaemonSpec is the specification of a daemon. -type DaemonSpec struct { - // Selector is a label query over pods that are managed by the daemon. +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // Selector is a label query over pods that are managed by the daemon set. // Must match in order to be controlled. // If empty, defaulted to labels on Pod template. // More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors Selector map[string]string `json:"selector,omitempty"` // Template is the object that describes the pod that will be created. - // The Daemon will create exactly one copy of this pod on every node + // The DaemonSet will create exactly one copy of this pod on every node // that matches the template's node selector (or on every node if no node // selector is specified). // More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template Template *v1.PodTemplateSpec `json:"template,omitempty"` } -// DaemonStatus represents the current status of a daemon. -type DaemonStatus struct { - // CurrentNumberScheduled is the number of nodes that are running exactly 1 copy of the - // daemon and are supposed to run the daemon. +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // CurrentNumberScheduled is the number of nodes that are running exactly 1 + // daemon pod and are supposed to run the daemon pod. CurrentNumberScheduled int `json:"currentNumberScheduled"` - // NumberMisscheduled is the number of nodes that are running the daemon, but are - // not supposed to run the daemon. + // NumberMisscheduled is the number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. NumberMisscheduled int `json:"numberMisscheduled"` // DesiredNumberScheduled is the total number of nodes that should be running the daemon - // (including nodes correctly running the daemon). + // pod (including nodes correctly running the daemon pod). DesiredNumberScheduled int `json:"desiredNumberScheduled"` } -// Daemon represents the configuration of a daemon. -type Daemon struct { +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { v1.TypeMeta `json:",inline"` // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ObjectMeta `json:"metadata,omitempty"` - // Spec defines the desired behavior of this daemon. + // Spec defines the desired behavior of this daemon set. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - Spec DaemonSpec `json:"spec,omitempty"` + Spec DaemonSetSpec `json:"spec,omitempty"` - // Status is the current status of this daemon. This data may be + // Status is the current status of this daemon set. This data may be // out of date by some window of time. // Populated by the system. // Read-only. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status - Status DaemonStatus `json:"status,omitempty"` + Status DaemonSetStatus `json:"status,omitempty"` } -// DaemonList is a list of Daemons. -type DaemonList struct { +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { v1.TypeMeta `json:",inline"` // Standard list metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata v1.ListMeta `json:"metadata,omitempty"` - // Items is the list of Daemons. - Items []Daemon `json:"items"` + // Items is a list of daemon sets. + Items []DaemonSet `json:"items"` } // ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. diff --git a/pkg/expapi/v1/types_swagger_doc_generated.go b/pkg/expapi/v1/types_swagger_doc_generated.go index 584d693f198..ced86254859 100644 --- a/pkg/expapi/v1/types_swagger_doc_generated.go +++ b/pkg/expapi/v1/types_swagger_doc_generated.go @@ -37,46 +37,46 @@ func (APIVersion) SwaggerDoc() map[string]string { return map_APIVersion } -var map_Daemon = map[string]string{ - "": "Daemon represents the configuration of a daemon.", +var map_DaemonSet = map[string]string{ + "": "DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired behavior of this daemon. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", - "status": "Status is the current status of this daemon. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "spec": "Spec defines the desired behavior of this daemon set. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", + "status": "Status is the current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status", } -func (Daemon) SwaggerDoc() map[string]string { - return map_Daemon +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet } -var map_DaemonList = map[string]string{ - "": "DaemonList is a list of Daemons.", +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata", - "items": "Items is the list of Daemons.", + "items": "Items is a list of daemon sets.", } -func (DaemonList) SwaggerDoc() map[string]string { - return map_DaemonList +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList } -var map_DaemonSpec = map[string]string{ - "": "DaemonSpec is the specification of a daemon.", - "selector": "Selector is a label query over pods that are managed by the daemon. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", - "template": "Template is the object that describes the pod that will be created. The Daemon will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "Selector is a label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: http://releases.k8s.io/HEAD/docs/user-guide/labels.md#label-selectors", + "template": "Template is the object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: http://releases.k8s.io/HEAD/docs/user-guide/replication-controller.md#pod-template", } -func (DaemonSpec) SwaggerDoc() map[string]string { - return map_DaemonSpec +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec } -var map_DaemonStatus = map[string]string{ - "": "DaemonStatus represents the current status of a daemon.", - "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running exactly 1 copy of the daemon and are supposed to run the daemon.", - "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon, but are not supposed to run the daemon.", - "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon (including nodes correctly running the daemon).", +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "CurrentNumberScheduled is the number of nodes that are running exactly 1 daemon pod and are supposed to run the daemon pod.", + "numberMisscheduled": "NumberMisscheduled is the number of nodes that are running the daemon pod, but are not supposed to run the daemon pod.", + "desiredNumberScheduled": "DesiredNumberScheduled is the total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod).", } -func (DaemonStatus) SwaggerDoc() map[string]string { - return map_DaemonStatus +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus } var map_Deployment = map[string]string{ diff --git a/pkg/expapi/validation/validation.go b/pkg/expapi/validation/validation.go index 7cc986691c9..2e1c653808d 100644 --- a/pkg/expapi/validation/validation.go +++ b/pkg/expapi/validation/validation.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" errs "k8s.io/kubernetes/pkg/util/fielderrors" + "k8s.io/kubernetes/pkg/util/sets" ) // ValidateHorizontalPodAutoscaler can be used to check whether the given autoscaler name is valid. @@ -79,7 +80,7 @@ func ValidateThirdPartyResource(obj *expapi.ThirdPartyResource) errs.ValidationE if len(obj.Name) == 0 { allErrs = append(allErrs, errs.NewFieldInvalid("name", obj.Name, "name must be non-empty")) } - versions := util.StringSet{} + versions := sets.String{} for ix := range obj.Versions { version := &obj.Versions[ix] if len(version.Name) == 0 { @@ -93,25 +94,25 @@ func ValidateThirdPartyResource(obj *expapi.ThirdPartyResource) errs.ValidationE return allErrs } -// ValidateDaemon tests if required fields in the daemon are set. -func ValidateDaemon(controller *expapi.Daemon) errs.ValidationErrorList { +// ValidateDaemonSet tests if required fields in the DaemonSet are set. +func ValidateDaemonSet(controller *expapi.DaemonSet) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&controller.ObjectMeta, true, apivalidation.ValidateReplicationControllerName).Prefix("metadata")...) - allErrs = append(allErrs, ValidateDaemonSpec(&controller.Spec).Prefix("spec")...) + allErrs = append(allErrs, ValidateDaemonSetSpec(&controller.Spec).Prefix("spec")...) return allErrs } -// ValidateDaemonUpdate tests if required fields in the daemon are set. -func ValidateDaemonUpdate(oldController, controller *expapi.Daemon) errs.ValidationErrorList { +// ValidateDaemonSetUpdate tests if required fields in the DaemonSet are set. +func ValidateDaemonSetUpdate(oldController, controller *expapi.DaemonSet) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta).Prefix("metadata")...) - allErrs = append(allErrs, ValidateDaemonSpec(&controller.Spec).Prefix("spec")...) - allErrs = append(allErrs, ValidateDaemonTemplateUpdate(oldController.Spec.Template, controller.Spec.Template).Prefix("spec.template")...) + allErrs = append(allErrs, ValidateDaemonSetSpec(&controller.Spec).Prefix("spec")...) + allErrs = append(allErrs, ValidateDaemonSetTemplateUpdate(oldController.Spec.Template, controller.Spec.Template).Prefix("spec.template")...) return allErrs } -// ValidateDaemonTemplateUpdate tests that certain fields in the daemon's pod template are not updated. -func ValidateDaemonTemplateUpdate(oldPodTemplate, podTemplate *api.PodTemplateSpec) errs.ValidationErrorList { +// ValidateDaemonSetTemplateUpdate tests that certain fields in the daemon set's pod template are not updated. +func ValidateDaemonSetTemplateUpdate(oldPodTemplate, podTemplate *api.PodTemplateSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} podSpec := podTemplate.Spec // podTemplate.Spec is not a pointer, so we can modify NodeSelector and NodeName directly. @@ -125,8 +126,8 @@ func ValidateDaemonTemplateUpdate(oldPodTemplate, podTemplate *api.PodTemplateSp return allErrs } -// ValidateDaemonSpec tests if required fields in the daemon spec are set. -func ValidateDaemonSpec(spec *expapi.DaemonSpec) errs.ValidationErrorList { +// ValidateDaemonSetSpec tests if required fields in the DaemonSetSpec are set. +func ValidateDaemonSetSpec(spec *expapi.DaemonSetSpec) errs.ValidationErrorList { allErrs := errs.ValidationErrorList{} selector := labels.Set(spec.Selector).AsSelector() @@ -152,10 +153,10 @@ func ValidateDaemonSpec(spec *expapi.DaemonSpec) errs.ValidationErrorList { return allErrs } -// ValidateDaemonName can be used to check whether the given daemon name is valid. +// ValidateDaemonSetName can be used to check whether the given daemon set name is valid. // Prefix indicates this name will be used as part of generation, in which case // trailing dashes are allowed. -func ValidateDaemonName(name string, prefix bool) (bool, string) { +func ValidateDaemonSetName(name string, prefix bool) (bool, string) { return apivalidation.NameIsDNSSubdomain(name, prefix) } diff --git a/pkg/expapi/validation/validation_test.go b/pkg/expapi/validation/validation_test.go index 14ca8974201..91c562feb71 100644 --- a/pkg/expapi/validation/validation_test.go +++ b/pkg/expapi/validation/validation_test.go @@ -130,7 +130,7 @@ func TestValidateHorizontalPodAutoscaler(t *testing.T) { } } -func TestValidateDaemonUpdate(t *testing.T) { +func TestValidateDaemonSetUpdate(t *testing.T) { validSelector := map[string]string{"a": "b"} validSelector2 := map[string]string{"c": "d"} invalidSelector := map[string]string{"NoUppercaseOrSpecialCharsLike=Equals": "b"} @@ -211,54 +211,54 @@ func TestValidateDaemonUpdate(t *testing.T) { }, } - type dcUpdateTest struct { - old expapi.Daemon - update expapi.Daemon + type dsUpdateTest struct { + old expapi.DaemonSet + update expapi.DaemonSet } - successCases := []dcUpdateTest{ + successCases := []dsUpdateTest{ { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, }, { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector2, Template: &validPodTemplateAbc2.Template, }, }, }, { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateNodeSelector.Template, }, @@ -268,86 +268,86 @@ func TestValidateDaemonUpdate(t *testing.T) { for _, successCase := range successCases { successCase.old.ObjectMeta.ResourceVersion = "1" successCase.update.ObjectMeta.ResourceVersion = "1" - if errs := ValidateDaemonUpdate(&successCase.old, &successCase.update); len(errs) != 0 { + if errs := ValidateDaemonSetUpdate(&successCase.old, &successCase.update); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } - errorCases := map[string]dcUpdateTest{ + errorCases := map[string]dsUpdateTest{ "change daemon name": { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, }, "invalid selector": { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: invalidSelector, Template: &validPodTemplateAbc.Template, }, }, }, "invalid pod": { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &invalidPodTemplate.Template, }, }, }, "change container image": { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateDef.Template, }, }, }, "read-write volume": { - old: expapi.Daemon{ + old: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplateAbc.Template, }, }, - update: expapi.Daemon{ + update: expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &readWriteVolumePodTemplate.Template, }, @@ -355,13 +355,13 @@ func TestValidateDaemonUpdate(t *testing.T) { }, } for testName, errorCase := range errorCases { - if errs := ValidateDaemonUpdate(&errorCase.old, &errorCase.update); len(errs) == 0 { + if errs := ValidateDaemonSetUpdate(&errorCase.old, &errorCase.update); len(errs) == 0 { t.Errorf("expected failure: %s", testName) } } } -func TestValidateDaemon(t *testing.T) { +func TestValidateDaemonSet(t *testing.T) { validSelector := map[string]string{"a": "b"} validPodTemplate := api.PodTemplate{ Template: api.PodTemplateSpec{ @@ -387,59 +387,59 @@ func TestValidateDaemon(t *testing.T) { }, }, } - successCases := []expapi.Daemon{ + successCases := []expapi.DaemonSet{ { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, { ObjectMeta: api.ObjectMeta{Name: "abc-123", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, } for _, successCase := range successCases { - if errs := ValidateDaemon(&successCase); len(errs) != 0 { + if errs := ValidateDaemonSet(&successCase); len(errs) != 0 { t.Errorf("expected success: %v", errs) } } - errorCases := map[string]expapi.Daemon{ + errorCases := map[string]expapi.DaemonSet{ "zero-length ID": { ObjectMeta: api.ObjectMeta{Name: "", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, "missing-namespace": { ObjectMeta: api.ObjectMeta{Name: "abc-123"}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, }, "empty selector": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Template: &validPodTemplate.Template, }, }, "selector_doesnt_match": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: map[string]string{"foo": "bar"}, Template: &validPodTemplate.Template, }, }, "invalid manifest": { ObjectMeta: api.ObjectMeta{Name: "abc", Namespace: api.NamespaceDefault}, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, }, }, @@ -451,7 +451,7 @@ func TestValidateDaemon(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, @@ -464,7 +464,7 @@ func TestValidateDaemon(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Template: &invalidPodTemplate.Template, }, }, @@ -476,7 +476,7 @@ func TestValidateDaemon(t *testing.T) { "NoUppercaseOrSpecialCharsLike=Equals": "bar", }, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &validPodTemplate.Template, }, @@ -486,7 +486,7 @@ func TestValidateDaemon(t *testing.T) { Name: "abc-123", Namespace: api.NamespaceDefault, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &api.PodTemplateSpec{ Spec: api.PodSpec{ @@ -505,7 +505,7 @@ func TestValidateDaemon(t *testing.T) { Name: "abc-123", Namespace: api.NamespaceDefault, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: validSelector, Template: &api.PodTemplateSpec{ Spec: api.PodSpec{ @@ -521,7 +521,7 @@ func TestValidateDaemon(t *testing.T) { }, } for k, v := range errorCases { - errs := ValidateDaemon(&v) + errs := ValidateDaemonSet(&v) if len(errs) == 0 { t.Errorf("expected failure for %s", k) } diff --git a/pkg/kubectl/cmd/config/navigation_step_parser.go b/pkg/kubectl/cmd/config/navigation_step_parser.go index 835f1998645..1d4272a813d 100644 --- a/pkg/kubectl/cmd/config/navigation_step_parser.go +++ b/pkg/kubectl/cmd/config/navigation_step_parser.go @@ -22,7 +22,7 @@ import ( "strings" clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type navigationSteps struct { @@ -55,7 +55,7 @@ func newNavigationSteps(path string) (*navigationSteps, error) { if err != nil { return nil, err } - nextPart := findNameStep(individualParts[currPartIndex:], util.KeySet(reflect.ValueOf(mapValueOptions))) + nextPart := findNameStep(individualParts[currPartIndex:], sets.KeySet(reflect.ValueOf(mapValueOptions))) steps = append(steps, navigationStep{nextPart, mapValueType}) currPartIndex += len(strings.Split(nextPart, ".")) @@ -103,7 +103,7 @@ func (s *navigationSteps) moreStepsRemaining() bool { // findNameStep takes the list of parts and a set of valid tags that can be used after the name. It then walks the list of parts // until it find a valid "next" tag or until it reaches the end of the parts and then builds the name back up out of the individual parts -func findNameStep(parts []string, typeOptions util.StringSet) string { +func findNameStep(parts []string, typeOptions sets.String) string { if len(parts) == 0 { return "" } @@ -141,7 +141,7 @@ func getPotentialTypeValues(typeValue reflect.Type) (map[string]reflect.Type, er return ret, nil } -func findKnownValue(parts []string, valueOptions util.StringSet) int { +func findKnownValue(parts []string, valueOptions sets.String) int { for i := range parts { if valueOptions.Has(parts[i]) { return i diff --git a/pkg/kubectl/cmd/describe.go b/pkg/kubectl/cmd/describe.go index c5b6fc4668a..4a026f883e5 100644 --- a/pkg/kubectl/cmd/describe.go +++ b/pkg/kubectl/cmd/describe.go @@ -52,7 +52,7 @@ exists, it will output details for every resource that has a name prefixed with Possible resource types include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), resourcequotas (quota), -namespaces (ns) or secrets.` +namespaces (ns), serviceaccounts or secrets.` describe_example = `# Describe a node $ kubectl describe nodes kubernetes-minion-emt8.c.myproject.internal diff --git a/pkg/kubectl/cmd/get.go b/pkg/kubectl/cmd/get.go index e0780a5b2bd..fea289d95dd 100644 --- a/pkg/kubectl/cmd/get.go +++ b/pkg/kubectl/cmd/get.go @@ -39,7 +39,7 @@ const ( Possible resource types include (case insensitive): pods (po), services (svc), replicationcontrollers (rc), nodes (no), events (ev), componentstatuses (cs), limitranges (limits), persistentvolumes (pv), persistentvolumeclaims (pvc), -resourcequotas (quota), namespaces (ns), endpoints (ep) or secrets. +resourcequotas (quota), namespaces (ns), endpoints (ep), serviceaccounts or secrets. By specifying the output as 'template' and providing a Go template as the value of the --template flag, you can filter the attributes of the fetched resource(s).` @@ -130,6 +130,7 @@ func RunGet(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string SingleResourceType(). Latest(). Do() + err := r.Err() if err != nil { return err } diff --git a/pkg/kubectl/cmd/log.go b/pkg/kubectl/cmd/log.go index bb58e5d9b40..d5c601dd24b 100644 --- a/pkg/kubectl/cmd/log.go +++ b/pkg/kubectl/cmd/log.go @@ -26,7 +26,7 @@ import ( "github.com/spf13/cobra" "k8s.io/kubernetes/pkg/api" cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" - libutil "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) const ( @@ -42,7 +42,7 @@ $ kubectl logs -f 123456-7890 ruby-container` func selectContainer(pod *api.Pod, in io.Reader, out io.Writer) string { fmt.Fprintf(out, "Please select a container:\n") - options := libutil.StringSet{} + options := sets.String{} for ix := range pod.Spec.Containers { fmt.Fprintf(out, "[%d] %s\n", ix+1, pod.Spec.Containers[ix].Name) options.Insert(pod.Spec.Containers[ix].Name) diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index 2adb8113e44..4b39f616f3b 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -62,13 +62,37 @@ func AddSourceToErr(verb string, source string, err error) error { return err } +var fatalErrHandler = fatal + +// BehaviorOnFatal allows you to override the default behavior when a fatal +// error occurs, which is call os.Exit(1). You can pass 'panic' as a function +// here if you prefer the panic() over os.Exit(1). +func BehaviorOnFatal(f func(string)) { + fatalErrHandler = f +} + +// fatal prints the message and then exits. If V(2) or greater, glog.Fatal +// is invoked for extended information. +func fatal(msg string) { + // add newline if needed + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + + if glog.V(2) { + glog.FatalDepth(2, msg) + } + fmt.Fprint(os.Stderr, msg) + os.Exit(1) +} + // CheckErr prints a user friendly error to STDERR and exits with a non-zero // exit code. Unrecognized errors will be printed with an "error: " prefix. // // This method is generic to the command in use and may be used by non-Kubectl // commands. func CheckErr(err error) { - checkErr(err, fatal) + checkErr(err, fatalErrHandler) } func checkErr(err error, handleErr func(string)) { @@ -180,21 +204,6 @@ func messageForError(err error) string { return msg } -// fatal prints the message and then exits. If V(2) or greater, glog.Fatal -// is invoked for extended information. -func fatal(msg string) { - // add newline if needed - if !strings.HasSuffix(msg, "\n") { - msg += "\n" - } - - if glog.V(2) { - glog.FatalDepth(2, msg) - } - fmt.Fprint(os.Stderr, msg) - os.Exit(1) -} - func UsageError(cmd *cobra.Command, format string, args ...interface{}) error { msg := fmt.Sprintf(format, args...) return fmt.Errorf("%s\nsee '%s -h' for help.", msg, cmd.CommandPath()) diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 4c872542ed5..63a078fbf47 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -33,7 +33,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/types" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // Describer generates output for the named resource or an error @@ -933,7 +933,7 @@ func describeService(service *api.Service, endpoints *api.Endpoints, events *api if sp.NodePort != 0 { fmt.Fprintf(out, "NodePort:\t%s\t%d/%s\n", name, sp.NodePort, sp.Protocol) } - fmt.Fprintf(out, "Endpoints:\t%s\n", formatEndpoints(endpoints, util.NewStringSet(sp.Name))) + fmt.Fprintf(out, "Endpoints:\t%s\n", formatEndpoints(endpoints, sets.NewString(sp.Name))) } fmt.Fprintf(out, "Session Affinity:\t%s\n", service.Spec.SessionAffinity) if events != nil { diff --git a/pkg/kubectl/resource/builder.go b/pkg/kubectl/resource/builder.go index 4386ed8d723..9da9c106666 100644 --- a/pkg/kubectl/resource/builder.go +++ b/pkg/kubectl/resource/builder.go @@ -28,8 +28,8 @@ import ( "k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/sets" ) var FileExtensions = []string{".json", ".stdin", ".yaml", ".yml"} @@ -685,7 +685,7 @@ func (b *Builder) Do() *Result { // strings in the original order. func SplitResourceArgument(arg string) []string { out := []string{} - set := util.NewStringSet() + set := sets.NewString() for _, s := range strings.Split(arg, ",") { if set.Has(s) { continue diff --git a/pkg/kubectl/resource/result.go b/pkg/kubectl/resource/result.go index f8a8c688e19..3ac6d720157 100644 --- a/pkg/kubectl/resource/result.go +++ b/pkg/kubectl/resource/result.go @@ -24,8 +24,8 @@ import ( "k8s.io/kubernetes/pkg/api/latest" "k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/errors" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" ) @@ -123,7 +123,7 @@ func (r *Result) Object() (runtime.Object, error) { return nil, err } - versions := util.StringSet{} + versions := sets.String{} objects := []runtime.Object{} for _, info := range infos { if info.Object != nil { diff --git a/pkg/kubectl/resource_printer.go b/pkg/kubectl/resource_printer.go index fc9ea6b58dd..c847d7e4ed8 100644 --- a/pkg/kubectl/resource_printer.go +++ b/pkg/kubectl/resource_printer.go @@ -41,6 +41,7 @@ import ( "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/jsonpath" + "k8s.io/kubernetes/pkg/util/sets" ) // GetPrinter takes a format type, an optional format argument. It will return true @@ -435,7 +436,7 @@ func (h *HumanReadablePrinter) printHeader(columnNames []string, w io.Writer) er } // Pass ports=nil for all ports. -func formatEndpoints(endpoints *api.Endpoints, ports util.StringSet) string { +func formatEndpoints(endpoints *api.Endpoints, ports sets.String) string { if len(endpoints.Subsets) == 0 { return "" } diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index 4197ea98e84..bb76beb692d 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "github.com/ghodss/yaml" ) @@ -484,9 +485,9 @@ func TestPrinters(t *testing.T) { }}}, } // map of printer name to set of objects it should fail on. - expectedErrors := map[string]util.StringSet{ - "template2": util.NewStringSet("pod", "emptyPodList", "endpoints"), - "jsonpath": util.NewStringSet("emptyPodList", "nonEmptyPodList", "endpoints"), + expectedErrors := map[string]sets.String{ + "template2": sets.NewString("pod", "emptyPodList", "endpoints"), + "jsonpath": sets.NewString("emptyPodList", "nonEmptyPodList", "endpoints"), } for pName, p := range printers { @@ -545,21 +546,21 @@ func TestPrintEventsResultSorted(t *testing.T) { VerifyDatesInOrder(out, "\n" /* rowDelimiter */, " " /* columnDelimiter */, t) } -func TestPrintMinionStatus(t *testing.T) { +func TestPrintNodeStatus(t *testing.T) { printer := NewHumanReadablePrinter(false, false, false, false, []string{}) table := []struct { - minion api.Node + node api.Node status string }{ { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo1"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}, }, status: "Ready", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo2"}, Spec: api.NodeSpec{Unschedulable: true}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}, @@ -567,7 +568,7 @@ func TestPrintMinionStatus(t *testing.T) { status: "Ready,SchedulingDisabled", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo3"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{ {Type: api.NodeReady, Status: api.ConditionTrue}, @@ -576,14 +577,14 @@ func TestPrintMinionStatus(t *testing.T) { status: "Ready", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo4"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}}, }, status: "NotReady", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo5"}, Spec: api.NodeSpec{Unschedulable: true}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}}, @@ -591,21 +592,21 @@ func TestPrintMinionStatus(t *testing.T) { status: "NotReady,SchedulingDisabled", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo6"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}}, }, status: "Unknown", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo7"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}}, }, status: "Unknown", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo8"}, Spec: api.NodeSpec{Unschedulable: true}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: "InvalidValue", Status: api.ConditionTrue}}}, @@ -613,7 +614,7 @@ func TestPrintMinionStatus(t *testing.T) { status: "Unknown,SchedulingDisabled", }, { - minion: api.Node{ + node: api.Node{ ObjectMeta: api.ObjectMeta{Name: "foo9"}, Spec: api.NodeSpec{Unschedulable: true}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}}, @@ -624,12 +625,12 @@ func TestPrintMinionStatus(t *testing.T) { for _, test := range table { buffer := &bytes.Buffer{} - err := printer.PrintObj(&test.minion, buffer) + err := printer.PrintObj(&test.node, buffer) if err != nil { - t.Fatalf("An error occurred printing Minion: %#v", err) + t.Fatalf("An error occurred printing Node: %#v", err) } if !contains(strings.Fields(buffer.String()), test.status) { - t.Fatalf("Expect printing minion %s with status %#v, got: %#v", test.minion.Name, test.status, buffer.String()) + t.Fatalf("Expect printing node %s with status %#v, got: %#v", test.node.Name, test.status, buffer.String()) } } } diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 4b6b6406e93..2ca9ab6ef58 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -32,6 +32,7 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/testclient" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func oldRc(replicas int, original int) *api.ReplicationController { @@ -1140,7 +1141,7 @@ func TestAddDeploymentHash(t *testing.T) { }, } - seen := util.StringSet{} + seen := sets.String{} updatedRc := false fakeClient := &client.FakeRESTClient{ Codec: codec, diff --git a/pkg/kubelet/config/config.go b/pkg/kubelet/config/config.go index 0de727354bd..5d0e5bc4722 100644 --- a/pkg/kubelet/config/config.go +++ b/pkg/kubelet/config/config.go @@ -29,10 +29,10 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubeletTypes "k8s.io/kubernetes/pkg/kubelet/types" kubeletUtil "k8s.io/kubernetes/pkg/kubelet/util" - "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/config" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/fielderrors" + "k8s.io/kubernetes/pkg/util/sets" ) // PodConfigNotificationMode describes how changes are sent to the update channel. @@ -61,7 +61,7 @@ type PodConfig struct { // contains the list of all configured sources sourcesLock sync.Mutex - sources util.StringSet + sources sets.String } // NewPodConfig creates an object that can merge many configuration sources into a stream @@ -73,7 +73,7 @@ func NewPodConfig(mode PodConfigNotificationMode, recorder record.EventRecorder) pods: storage, mux: config.NewMux(storage), updates: updates, - sources: util.StringSet{}, + sources: sets.String{}, } return podConfig } @@ -124,7 +124,7 @@ type podStorage struct { // contains the set of all sources that have sent at least one SET sourcesSeenLock sync.Mutex - sourcesSeen util.StringSet + sourcesSeen sets.String // the EventRecorder to use recorder record.EventRecorder @@ -138,7 +138,7 @@ func newPodStorage(updates chan<- kubelet.PodUpdate, mode PodConfigNotificationM pods: make(map[string]map[string]*api.Pod), mode: mode, updates: updates, - sourcesSeen: util.StringSet{}, + sourcesSeen: sets.String{}, recorder: recorder, } } @@ -306,7 +306,7 @@ func (s *podStorage) seenSources(sources ...string) bool { } func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) { - names := util.StringSet{} + names := sets.String{} for i, pod := range pods { var errlist []error if errs := validation.ValidatePod(pod); len(errs) != 0 { diff --git a/pkg/kubelet/dockertools/fake_docker_client.go b/pkg/kubelet/dockertools/fake_docker_client.go index ca93a0feb2a..d9de133117d 100644 --- a/pkg/kubelet/dockertools/fake_docker_client.go +++ b/pkg/kubelet/dockertools/fake_docker_client.go @@ -27,7 +27,7 @@ import ( docker "github.com/fsouza/go-dockerclient" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup. @@ -45,7 +45,7 @@ type FakeDockerClient struct { pulled []string Created []string Removed []string - RemovedImages util.StringSet + RemovedImages sets.String VersionInfo docker.Env Information docker.Env ExecInspect *docker.ExecInspect diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index f1b8a6da4d0..10c72fb81fc 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -51,6 +51,7 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" + "k8s.io/kubernetes/pkg/util/sets" ) const ( @@ -402,7 +403,7 @@ func (dm *DockerManager) GetPodStatus(pod *api.Pod) (*api.PodStatus, error) { return nil, err } - containerDone := util.NewStringSet() + containerDone := sets.NewString() // Loop through list of running and exited docker containers to construct // the statuses. We assume docker returns a list of containers sorted in // reverse by time. diff --git a/pkg/kubelet/dockertools/manager_test.go b/pkg/kubelet/dockertools/manager_test.go index ef289f4e309..800233b4710 100644 --- a/pkg/kubelet/dockertools/manager_test.go +++ b/pkg/kubelet/dockertools/manager_test.go @@ -42,6 +42,7 @@ import ( "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util" uexec "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/sets" ) type fakeHTTP struct { @@ -74,7 +75,7 @@ func (*fakeOptionGenerator) GenerateRunContainerOptions(pod *api.Pod, container } func newTestDockerManagerWithHTTPClient(fakeHTTPClient *fakeHTTP) (*DockerManager, *FakeDockerClient) { - fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}, Errors: make(map[string]error), RemovedImages: util.StringSet{}} + fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}, Errors: make(map[string]error), RemovedImages: sets.String{}} fakeRecorder := &record.FakeRecorder{} readinessManager := kubecontainer.NewReadinessManager() containerRefManager := kubecontainer.NewRefManager() @@ -324,14 +325,14 @@ func TestGetPods(t *testing.T) { func TestListImages(t *testing.T) { manager, fakeDocker := newTestDockerManager() dockerImages := []docker.APIImages{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}} - expected := util.NewStringSet([]string{"1111", "2222", "3333"}...) + expected := sets.NewString([]string{"1111", "2222", "3333"}...) fakeDocker.Images = dockerImages actualImages, err := manager.ListImages() if err != nil { t.Fatalf("unexpected error %v", err) } - actual := util.NewStringSet() + actual := sets.NewString() for _, i := range actualImages { actual.Insert(i.ID) } diff --git a/pkg/kubelet/image_manager.go b/pkg/kubelet/image_manager.go index 42a00d9bfec..e17c647328b 100644 --- a/pkg/kubelet/image_manager.go +++ b/pkg/kubelet/image_manager.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/dockertools" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // Manages lifecycle of all images. @@ -141,14 +142,14 @@ func (im *realImageManager) detectImages(detected time.Time) error { } // Make a set of images in use by containers. - imagesInUse := util.NewStringSet() + imagesInUse := sets.NewString() for _, container := range containers { imagesInUse.Insert(container.Image) } // Add new images and record those being used. now := time.Now() - currentImages := util.NewStringSet() + currentImages := sets.NewString() im.imageRecordsLock.Lock() defer im.imageRecordsLock.Unlock() for _, image := range images { @@ -286,7 +287,7 @@ func (ev byLastUsedAndDetected) Less(i, j int) bool { } } -func isImageUsed(image *docker.APIImages, imagesInUse util.StringSet) bool { +func isImageUsed(image *docker.APIImages, imagesInUse sets.String) bool { // Check the image ID and all the RepoTags. if _, ok := imagesInUse[image.ID]; ok { return true diff --git a/pkg/kubelet/image_manager_test.go b/pkg/kubelet/image_manager_test.go index aa0f97e3d15..1b96807d7a1 100644 --- a/pkg/kubelet/image_manager_test.go +++ b/pkg/kubelet/image_manager_test.go @@ -28,14 +28,14 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/record" "k8s.io/kubernetes/pkg/kubelet/cadvisor" "k8s.io/kubernetes/pkg/kubelet/dockertools" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) var zero time.Time func newRealImageManager(policy ImageGCPolicy) (*realImageManager, *dockertools.FakeDockerClient, *cadvisor.Mock) { fakeDocker := &dockertools.FakeDockerClient{ - RemovedImages: util.NewStringSet(), + RemovedImages: sets.NewString(), } mockCadvisor := new(cadvisor.Mock) return &realImageManager{ diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 243ccc8b446..f93edf0e80e 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -64,6 +64,7 @@ import ( nodeutil "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/oom" "k8s.io/kubernetes/pkg/util/procfs" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/watch" @@ -139,6 +140,8 @@ func NewMainKubelet( resyncInterval time.Duration, pullQPS float32, pullBurst int, + eventQPS float32, + eventBurst int, containerGCPolicy ContainerGCPolicy, sourcesReady SourcesReadyFn, registerNode bool, @@ -214,8 +217,8 @@ func NewMainKubelet( } nodeLister := &cache.StoreToNodeLister{Store: nodeStore} - // TODO: get the real minion object of ourself, - // and use the real minion name and UID. + // TODO: get the real node object of ourself, + // and use the real node name and UID. // TODO: what is namespace for node? nodeRef := &api.ObjectReference{ Kind: "Node", @@ -981,7 +984,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Cont return opts, nil } -var masterServices = util.NewStringSet("kubernetes") +var masterServices = sets.NewString("kubernetes") // getServiceEnvVarMap makes a map[string]string of env vars for services a pod in namespace ns should see func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) { @@ -1406,7 +1409,7 @@ func getDesiredVolumes(pods []*api.Pod) map[string]api.Volume { // cleanupOrphanedPodDirs removes a pod directory if the pod is not in the // desired set of pods and there is no running containers in the pod. func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*api.Pod, runningPods []*kubecontainer.Pod) error { - active := util.NewStringSet() + active := sets.NewString() for _, pod := range pods { active.Insert(string(pod.UID)) } @@ -1444,7 +1447,7 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error { if err != nil { return err } - possibleCIDRs := util.StringSet{} + possibleCIDRs := sets.String{} for ix := range allPods { pod := allPods[ix] ingress, egress, err := extractBandwidthResources(pod) @@ -1484,7 +1487,7 @@ func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, runningPods []*kubeco desiredVolumes := getDesiredVolumes(pods) currentVolumes := kl.getPodVolumesFromDisk() - runningSet := util.StringSet{} + runningSet := sets.String{} for _, pod := range runningPods { runningSet.Insert(string(pod.ID)) } @@ -1722,7 +1725,7 @@ func (kl *Kubelet) HandlePodCleanups() error { // podKiller launches a goroutine to kill a pod received from the channel if // another goroutine isn't already in action. func (kl *Kubelet) podKiller() { - killing := util.NewStringSet() + killing := sets.NewString() resultCh := make(chan types.UID) defer close(resultCh) for { @@ -1769,7 +1772,7 @@ func (s podsByCreationTime) Less(i, j int) bool { // checkHostPortConflicts detects pods with conflicted host ports. func hasHostPortConflicts(pods []*api.Pod) bool { - ports := util.StringSet{} + ports := sets.String{} for _, pod := range pods { if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports); len(errs) > 0 { glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", kubecontainer.GetPodFullName(pod), errs) diff --git a/pkg/kubelet/mirror_client_test.go b/pkg/kubelet/mirror_client_test.go index 2ae9c845b28..9c372d5b33d 100644 --- a/pkg/kubelet/mirror_client_test.go +++ b/pkg/kubelet/mirror_client_test.go @@ -22,14 +22,14 @@ import ( "k8s.io/kubernetes/pkg/api" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type fakeMirrorClient struct { mirrorPodLock sync.RWMutex // Note that a real mirror manager does not store the mirror pods in // itself. This fake manager does this to track calls. - mirrorPods util.StringSet + mirrorPods sets.String createCounts map[string]int deleteCounts map[string]int } @@ -53,7 +53,7 @@ func (fmc *fakeMirrorClient) DeleteMirrorPod(podFullName string) error { func newFakeMirrorClient() *fakeMirrorClient { m := fakeMirrorClient{} - m.mirrorPods = util.NewStringSet() + m.mirrorPods = sets.NewString() m.createCounts = make(map[string]int) m.deleteCounts = make(map[string]int) return &m diff --git a/pkg/labels/selector.go b/pkg/labels/selector.go index 28bc6d838af..7d811cc4830 100644 --- a/pkg/labels/selector.go +++ b/pkg/labels/selector.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/fielderrors" + "k8s.io/kubernetes/pkg/util/sets" ) // Selector represents a label selector. @@ -79,7 +80,7 @@ func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } type Requirement struct { key string operator Operator - strValues util.StringSet + strValues sets.String } // NewRequirement is the constructor for a Requirement. @@ -91,7 +92,7 @@ type Requirement struct { // of characters. See validateLabelKey for more details. // // The empty string is a valid value in the input values set. -func NewRequirement(key string, op Operator, vals util.StringSet) (*Requirement, error) { +func NewRequirement(key string, op Operator, vals sets.String) (*Requirement, error) { if err := validateLabelKey(key); err != nil { return nil, err } @@ -198,7 +199,7 @@ func (lsel LabelSelector) Add(key string, operator Operator, values []string) Se for _, item := range lsel { reqs = append(reqs, item) } - if r, err := NewRequirement(key, operator, util.NewStringSet(values...)); err == nil { + if r, err := NewRequirement(key, operator, sets.NewString(values...)); err == nil { reqs = append(reqs, *r) } return LabelSelector(reqs) @@ -480,7 +481,7 @@ func (p *Parser) parseRequirement() (*Requirement, error) { if err != nil { return nil, err } - var values util.StringSet + var values sets.String switch operator { case InOperator, NotInOperator: values, err = p.parseValues() @@ -535,7 +536,7 @@ func (p *Parser) parseOperator() (op Operator, err error) { } // parseValues parses the values for set based matching (x,y,z) -func (p *Parser) parseValues() (util.StringSet, error) { +func (p *Parser) parseValues() (sets.String, error) { tok, lit := p.consume(Values) if tok != OpenParToken { return nil, fmt.Errorf("found '%s' expected: '('", lit) @@ -553,7 +554,7 @@ func (p *Parser) parseValues() (util.StringSet, error) { return s, nil case ClosedParToken: // handles "()" p.consume(Values) - return util.NewStringSet(""), nil + return sets.NewString(""), nil default: return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) } @@ -561,8 +562,8 @@ func (p *Parser) parseValues() (util.StringSet, error) { // parseIdentifiersList parses a (possibly empty) list of // of comma separated (possibly empty) identifiers -func (p *Parser) parseIdentifiersList() (util.StringSet, error) { - s := util.NewStringSet() +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() for { tok, lit := p.consume(Values) switch tok { @@ -597,8 +598,8 @@ func (p *Parser) parseIdentifiersList() (util.StringSet, error) { } // parseExactValue parses the only value for exact match style -func (p *Parser) parseExactValue() (util.StringSet, error) { - s := util.NewStringSet() +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() tok, lit := p.consume(Values) if tok == IdentifierToken { s.Insert(lit) @@ -670,7 +671,7 @@ func SelectorFromSet(ls Set) Selector { } var requirements []Requirement for label, value := range ls { - if r, err := NewRequirement(label, EqualsOperator, util.NewStringSet(value)); err != nil { + if r, err := NewRequirement(label, EqualsOperator, sets.NewString(value)); err != nil { //TODO: double check errors when input comes from serialization? return LabelSelector{} } else { diff --git a/pkg/labels/selector_test.go b/pkg/labels/selector_test.go index 987bafc116f..181cb26890a 100644 --- a/pkg/labels/selector_test.go +++ b/pkg/labels/selector_test.go @@ -21,7 +21,7 @@ import ( "strings" "testing" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func TestSelectorParse(t *testing.T) { @@ -273,16 +273,16 @@ func TestRequirementConstructor(t *testing.T) { requirementConstructorTests := []struct { Key string Op Operator - Vals util.StringSet + Vals sets.String Success bool }{ {"x", InOperator, nil, false}, - {"x", NotInOperator, util.NewStringSet(), false}, - {"x", InOperator, util.NewStringSet("foo"), true}, - {"x", NotInOperator, util.NewStringSet("foo"), true}, + {"x", NotInOperator, sets.NewString(), false}, + {"x", InOperator, sets.NewString("foo"), true}, + {"x", NotInOperator, sets.NewString("foo"), true}, {"x", ExistsOperator, nil, true}, - {"1foo", InOperator, util.NewStringSet("bar"), true}, - {"1234", InOperator, util.NewStringSet("bar"), true}, + {"1foo", InOperator, sets.NewString("bar"), true}, + {"1234", InOperator, sets.NewString("bar"), true}, {strings.Repeat("a", 254), ExistsOperator, nil, false}, //breaks DNS rule that len(key) <= 253 } for _, rc := range requirementConstructorTests { @@ -302,23 +302,23 @@ func TestToString(t *testing.T) { Valid bool }{ {&LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet("abc", "def"), t), - getRequirement("y", NotInOperator, util.NewStringSet("jkl"), t), + getRequirement("x", InOperator, sets.NewString("abc", "def"), t), + getRequirement("y", NotInOperator, sets.NewString("jkl"), t), getRequirement("z", ExistsOperator, nil, t)}, "x in (abc,def),y notin (jkl),z", true}, {&LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet("abc", "def"), t), + getRequirement("x", InOperator, sets.NewString("abc", "def"), t), req}, // adding empty req for the trailing ',' "x in (abc,def),", false}, {&LabelSelector{ - getRequirement("x", NotInOperator, util.NewStringSet("abc"), t), - getRequirement("y", InOperator, util.NewStringSet("jkl", "mno"), t), - getRequirement("z", NotInOperator, util.NewStringSet(""), t)}, + getRequirement("x", NotInOperator, sets.NewString("abc"), t), + getRequirement("y", InOperator, sets.NewString("jkl", "mno"), t), + getRequirement("z", NotInOperator, sets.NewString(""), t)}, "x notin (abc),y in (jkl,mno),z notin ()", true}, {&LabelSelector{ - getRequirement("x", EqualsOperator, util.NewStringSet("abc"), t), - getRequirement("y", DoubleEqualsOperator, util.NewStringSet("jkl"), t), - getRequirement("z", NotEqualsOperator, util.NewStringSet("a"), t)}, + getRequirement("x", EqualsOperator, sets.NewString("abc"), t), + getRequirement("y", DoubleEqualsOperator, sets.NewString("jkl"), t), + getRequirement("z", NotEqualsOperator, sets.NewString("a"), t)}, "x=abc,y==jkl,z!=a", true}, } for _, ts := range toStringTests { @@ -341,19 +341,19 @@ func TestRequirementLabelSelectorMatching(t *testing.T) { req, }, false}, {Set{"x": "foo", "y": "baz"}, &LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet("foo"), t), - getRequirement("y", NotInOperator, util.NewStringSet("alpha"), t), + getRequirement("x", InOperator, sets.NewString("foo"), t), + getRequirement("y", NotInOperator, sets.NewString("alpha"), t), }, true}, {Set{"x": "foo", "y": "baz"}, &LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet("foo"), t), - getRequirement("y", InOperator, util.NewStringSet("alpha"), t), + getRequirement("x", InOperator, sets.NewString("foo"), t), + getRequirement("y", InOperator, sets.NewString("alpha"), t), }, false}, {Set{"y": ""}, &LabelSelector{ - getRequirement("x", NotInOperator, util.NewStringSet(""), t), + getRequirement("x", NotInOperator, sets.NewString(""), t), getRequirement("y", ExistsOperator, nil, t), }, true}, {Set{"y": "baz"}, &LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet(""), t), + getRequirement("x", InOperator, sets.NewString(""), t), }, false}, } for _, lsm := range labelSelectorMatchingTests { @@ -378,60 +378,60 @@ func TestSetSelectorParser(t *testing.T) { getRequirement("this-is-a-dns.domain.com/key-with-dash", ExistsOperator, nil, t), }, true, true}, {"this-is-another-dns.domain.com/key-with-dash in (so,what)", LabelSelector{ - getRequirement("this-is-another-dns.domain.com/key-with-dash", InOperator, util.NewStringSet("so", "what"), t), + getRequirement("this-is-another-dns.domain.com/key-with-dash", InOperator, sets.NewString("so", "what"), t), }, true, true}, {"0.1.2.domain/99 notin (10.10.100.1, tick.tack.clock)", LabelSelector{ - getRequirement("0.1.2.domain/99", NotInOperator, util.NewStringSet("10.10.100.1", "tick.tack.clock"), t), + getRequirement("0.1.2.domain/99", NotInOperator, sets.NewString("10.10.100.1", "tick.tack.clock"), t), }, true, true}, {"foo in (abc)", LabelSelector{ - getRequirement("foo", InOperator, util.NewStringSet("abc"), t), + getRequirement("foo", InOperator, sets.NewString("abc"), t), }, true, true}, {"x notin\n (abc)", LabelSelector{ - getRequirement("x", NotInOperator, util.NewStringSet("abc"), t), + getRequirement("x", NotInOperator, sets.NewString("abc"), t), }, true, true}, {"x notin \t (abc,def)", LabelSelector{ - getRequirement("x", NotInOperator, util.NewStringSet("abc", "def"), t), + getRequirement("x", NotInOperator, sets.NewString("abc", "def"), t), }, true, true}, {"x in (abc,def)", LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet("abc", "def"), t), + getRequirement("x", InOperator, sets.NewString("abc", "def"), t), }, true, true}, {"x in (abc,)", LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet("abc", ""), t), + getRequirement("x", InOperator, sets.NewString("abc", ""), t), }, true, true}, {"x in ()", LabelSelector{ - getRequirement("x", InOperator, util.NewStringSet(""), t), + getRequirement("x", InOperator, sets.NewString(""), t), }, true, true}, {"x notin (abc,,def),bar,z in (),w", LabelSelector{ getRequirement("bar", ExistsOperator, nil, t), getRequirement("w", ExistsOperator, nil, t), - getRequirement("x", NotInOperator, util.NewStringSet("abc", "", "def"), t), - getRequirement("z", InOperator, util.NewStringSet(""), t), + getRequirement("x", NotInOperator, sets.NewString("abc", "", "def"), t), + getRequirement("z", InOperator, sets.NewString(""), t), }, true, true}, {"x,y in (a)", LabelSelector{ - getRequirement("y", InOperator, util.NewStringSet("a"), t), + getRequirement("y", InOperator, sets.NewString("a"), t), getRequirement("x", ExistsOperator, nil, t), }, false, true}, {"x=a", LabelSelector{ - getRequirement("x", EqualsOperator, util.NewStringSet("a"), t), + getRequirement("x", EqualsOperator, sets.NewString("a"), t), }, true, true}, {"x=a,y!=b", LabelSelector{ - getRequirement("x", EqualsOperator, util.NewStringSet("a"), t), - getRequirement("y", NotEqualsOperator, util.NewStringSet("b"), t), + getRequirement("x", EqualsOperator, sets.NewString("a"), t), + getRequirement("y", NotEqualsOperator, sets.NewString("b"), t), }, true, true}, {"x=a,y!=b,z in (h,i,j)", LabelSelector{ - getRequirement("x", EqualsOperator, util.NewStringSet("a"), t), - getRequirement("y", NotEqualsOperator, util.NewStringSet("b"), t), - getRequirement("z", InOperator, util.NewStringSet("h", "i", "j"), t), + getRequirement("x", EqualsOperator, sets.NewString("a"), t), + getRequirement("y", NotEqualsOperator, sets.NewString("b"), t), + getRequirement("z", InOperator, sets.NewString("h", "i", "j"), t), }, true, true}, {"x=a||y=b", LabelSelector{}, false, false}, {"x,,y", nil, true, false}, {",x,y", nil, true, false}, {"x nott in (y)", nil, true, false}, {"x notin ( )", LabelSelector{ - getRequirement("x", NotInOperator, util.NewStringSet(""), t), + getRequirement("x", NotInOperator, sets.NewString(""), t), }, true, true}, {"x notin (, a)", LabelSelector{ - getRequirement("x", NotInOperator, util.NewStringSet("", "a"), t), + getRequirement("x", NotInOperator, sets.NewString("", "a"), t), }, true, true}, {"a in (xyz),", nil, true, false}, {"a in (xyz)b notin ()", nil, true, false}, @@ -439,7 +439,7 @@ func TestSetSelectorParser(t *testing.T) { getRequirement("a", ExistsOperator, nil, t), }, true, true}, {"a in (x,y,notin, z,in)", LabelSelector{ - getRequirement("a", InOperator, util.NewStringSet("in", "notin", "x", "y", "z"), t), + getRequirement("a", InOperator, sets.NewString("in", "notin", "x", "y", "z"), t), }, true, true}, // operator 'in' inside list of identifiers {"a in (xyz abc)", nil, false, false}, // no comma {"a notin(", nil, true, false}, // bad formed @@ -458,7 +458,7 @@ func TestSetSelectorParser(t *testing.T) { } } -func getRequirement(key string, op Operator, vals util.StringSet, t *testing.T) Requirement { +func getRequirement(key string, op Operator, vals sets.String, t *testing.T) Requirement { req, err := NewRequirement(key, op, vals) if err != nil { t.Errorf("NewRequirement(%v, %v, %v) resulted in error:%v", key, op, vals, err) @@ -480,16 +480,16 @@ func TestAdd(t *testing.T) { "key", InOperator, []string{"value"}, - LabelSelector{Requirement{"key", InOperator, util.NewStringSet("value")}}, + LabelSelector{Requirement{"key", InOperator, sets.NewString("value")}}, }, { - LabelSelector{Requirement{"key", InOperator, util.NewStringSet("value")}}, + LabelSelector{Requirement{"key", InOperator, sets.NewString("value")}}, "key2", EqualsOperator, []string{"value2"}, LabelSelector{ - Requirement{"key", InOperator, util.NewStringSet("value")}, - Requirement{"key2", EqualsOperator, util.NewStringSet("value2")}, + Requirement{"key", InOperator, sets.NewString("value")}, + Requirement{"key2", EqualsOperator, sets.NewString("value2")}, }, }, } diff --git a/pkg/master/master.go b/pkg/master/master.go index 0c6569a7e3a..e3344954838 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -56,10 +56,10 @@ import ( eventetcd "k8s.io/kubernetes/pkg/registry/event/etcd" expcontrolleretcd "k8s.io/kubernetes/pkg/registry/experimental/controller/etcd" limitrangeetcd "k8s.io/kubernetes/pkg/registry/limitrange/etcd" - "k8s.io/kubernetes/pkg/registry/minion" - nodeetcd "k8s.io/kubernetes/pkg/registry/minion/etcd" "k8s.io/kubernetes/pkg/registry/namespace" namespaceetcd "k8s.io/kubernetes/pkg/registry/namespace/etcd" + "k8s.io/kubernetes/pkg/registry/node" + nodeetcd "k8s.io/kubernetes/pkg/registry/node/etcd" pvetcd "k8s.io/kubernetes/pkg/registry/persistentvolume/etcd" pvcetcd "k8s.io/kubernetes/pkg/registry/persistentvolumeclaim/etcd" podetcd "k8s.io/kubernetes/pkg/registry/pod/etcd" @@ -79,8 +79,9 @@ import ( "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/ui" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" - daemonetcd "k8s.io/kubernetes/pkg/registry/daemon/etcd" + daemonetcd "k8s.io/kubernetes/pkg/registry/daemonset/etcd" horizontalpodautoscaleretcd "k8s.io/kubernetes/pkg/registry/horizontalpodautoscaler/etcd" "github.com/emicklei/go-restful" @@ -100,7 +101,7 @@ type Config struct { DatabaseStorage storage.Interface ExpDatabaseStorage storage.Interface EventTTL time.Duration - MinionRegexp string + NodeRegexp string KubeletClient client.KubeletClient // allow downstream consumers to disable the core controller loops EnableCoreControllers bool @@ -219,7 +220,7 @@ type Master struct { // registries are internal client APIs for accessing the storage layer // TODO: define the internal typed interface in a way that clients can // also be replaced - nodeRegistry minion.Registry + nodeRegistry node.Registry namespaceRegistry namespace.Registry serviceRegistry service.Registry endpointRegistry endpoint.Registry @@ -446,7 +447,7 @@ func (m *Master) init(c *Config) { m.endpointRegistry = endpoint.NewRegistry(endpointsStorage) nodeStorage, nodeStatusStorage := nodeetcd.NewREST(c.DatabaseStorage, c.EnableWatchCache, c.KubeletClient) - m.nodeRegistry = minion.NewRegistry(nodeStorage) + m.nodeRegistry = node.NewRegistry(nodeStorage) serviceStorage := serviceetcd.NewREST(c.DatabaseStorage) m.serviceRegistry = service.NewRegistry(serviceStorage) @@ -566,7 +567,7 @@ func (m *Master) init(c *Config) { apiserver.InstallSupport(m.muxHelper, m.rootWebService, c.EnableProfiling, healthzChecks...) apiserver.AddApiWebService(m.handlerContainer, c.APIPrefix, apiVersions) defaultVersion := m.defaultAPIGroupVersion() - requestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(defaultVersion.Root, "/")), RestMapper: defaultVersion.Mapper} + requestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: sets.NewString(strings.TrimPrefix(defaultVersion.Root, "/")), RestMapper: defaultVersion.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, requestInfoResolver, apiVersions) if m.exp { @@ -575,7 +576,7 @@ func (m *Master) init(c *Config) { glog.Fatalf("Unable to setup experimental api: %v", err) } apiserver.AddApiWebService(m.handlerContainer, c.ExpAPIPrefix, []string{expVersion.Version}) - expRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(expVersion.Root, "/")), RestMapper: expVersion.Mapper} + expRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: sets.NewString(strings.TrimPrefix(expVersion.Root, "/")), RestMapper: expVersion.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, expRequestInfoResolver, []string{expVersion.Version}) } @@ -784,7 +785,7 @@ func (m *Master) InstallThirdPartyAPI(rsrc *expapi.ThirdPartyResource) error { } thirdPartyPrefix := "/thirdparty/" + group + "/" apiserver.AddApiWebService(m.handlerContainer, thirdPartyPrefix, []string{rsrc.Versions[0].Name}) - thirdPartyRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: util.NewStringSet(strings.TrimPrefix(group, "/")), RestMapper: thirdparty.Mapper} + thirdPartyRequestInfoResolver := &apiserver.APIRequestInfoResolver{APIPrefixes: sets.NewString(strings.TrimPrefix(group, "/")), RestMapper: thirdparty.Mapper} apiserver.InstallServiceErrorHandler(m.handlerContainer, thirdPartyRequestInfoResolver, []string{thirdparty.Version}) return nil } @@ -824,7 +825,7 @@ func (m *Master) expapi(c *Config) *apiserver.APIGroupVersion { controllerStorage := expcontrolleretcd.NewStorage(c.ExpDatabaseStorage) autoscalerStorage := horizontalpodautoscaleretcd.NewREST(c.ExpDatabaseStorage) thirdPartyResourceStorage := thirdpartyresourceetcd.NewREST(c.ExpDatabaseStorage) - daemonStorage := daemonetcd.NewREST(c.ExpDatabaseStorage) + daemonSetStorage := daemonetcd.NewREST(c.ExpDatabaseStorage) deploymentStorage := deploymentetcd.NewREST(c.ExpDatabaseStorage) storage := map[string]rest.Storage{ @@ -832,7 +833,7 @@ func (m *Master) expapi(c *Config) *apiserver.APIGroupVersion { strings.ToLower("replicationControllers/scale"): controllerStorage.Scale, strings.ToLower("horizontalpodautoscalers"): autoscalerStorage, strings.ToLower("thirdpartyresources"): thirdPartyResourceStorage, - strings.ToLower("daemons"): daemonStorage, + strings.ToLower("daemonsets"): daemonSetStorage, strings.ToLower("deployments"): deploymentStorage, } @@ -911,7 +912,7 @@ func (m *Master) needToReplaceTunnels(addrs []string) bool { } func (m *Master) getNodeAddresses() ([]string, error) { - nodes, err := m.nodeRegistry.ListMinions(api.NewDefaultContext(), labels.Everything(), fields.Everything()) + nodes, err := m.nodeRegistry.ListNodes(api.NewDefaultContext(), labels.Everything(), fields.Everything()) if err != nil { return nil, err } diff --git a/pkg/master/master_test.go b/pkg/master/master_test.go index f7ac4ebc381..898d92385ab 100644 --- a/pkg/master/master_test.go +++ b/pkg/master/master_test.go @@ -45,7 +45,7 @@ func TestGetServersToValidate(t *testing.T) { config.DatabaseStorage = etcdstorage.NewEtcdStorage(fakeClient, latest.Codec, etcdtest.PathPrefix()) config.ExpDatabaseStorage = etcdstorage.NewEtcdStorage(fakeClient, explatest.Codec, etcdtest.PathPrefix()) - master.nodeRegistry = registrytest.NewMinionRegistry([]string{"node1", "node2"}, api.NodeResources{}) + master.nodeRegistry = registrytest.NewNodeRegistry([]string{"node1", "node2"}, api.NodeResources{}) servers := master.getServersToValidate(&config) diff --git a/pkg/registry/daemon/doc.go b/pkg/registry/daemonset/doc.go similarity index 80% rename from pkg/registry/daemon/doc.go rename to pkg/registry/daemonset/doc.go index a2452004386..435b0fe615b 100644 --- a/pkg/registry/daemon/doc.go +++ b/pkg/registry/daemonset/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package daemon provides Registry interface and its RESTStorage -// implementation for storing Daemon api objects. -package daemon +// Package daemonset provides Registry interface and its RESTStorage +// implementation for storing DaemonSet api objects. +package daemonset diff --git a/pkg/registry/daemon/etcd/etcd.go b/pkg/registry/daemonset/etcd/etcd.go similarity index 72% rename from pkg/registry/daemon/etcd/etcd.go rename to pkg/registry/daemonset/etcd/etcd.go index 1248480b498..f68b20345a3 100644 --- a/pkg/registry/daemon/etcd/etcd.go +++ b/pkg/registry/daemonset/etcd/etcd.go @@ -21,29 +21,28 @@ import ( "k8s.io/kubernetes/pkg/expapi" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/registry/daemon" + "k8s.io/kubernetes/pkg/registry/daemonset" "k8s.io/kubernetes/pkg/registry/generic" etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/storage" ) -// rest implements a RESTStorage for daemons against etcd +// rest implements a RESTStorage for DaemonSets against etcd type REST struct { *etcdgeneric.Etcd } -// daemonPrefix is the location for daemons in etcd, only exposed -// for testing -var daemonPrefix = "/daemons" +// daemonPrefix is the location for daemons in etcd +var daemonPrefix = "/daemonsets" -// NewREST returns a RESTStorage object that will work against daemons. +// NewREST returns a RESTStorage object that will work against DaemonSets. func NewREST(s storage.Interface) *REST { store := &etcdgeneric.Etcd{ - NewFunc: func() runtime.Object { return &expapi.Daemon{} }, + NewFunc: func() runtime.Object { return &expapi.DaemonSet{} }, // NewListFunc returns an object capable of storing results of an etcd list. - NewListFunc: func() runtime.Object { return &expapi.DaemonList{} }, + NewListFunc: func() runtime.Object { return &expapi.DaemonSetList{} }, // Produces a path that etcd understands, to the root of the resource // by combining the namespace in the context with the given prefix KeyRootFunc: func(ctx api.Context) string { @@ -54,21 +53,21 @@ func NewREST(s storage.Interface) *REST { KeyFunc: func(ctx api.Context, name string) (string, error) { return etcdgeneric.NamespaceKeyFunc(ctx, daemonPrefix, name) }, - // Retrieve the name field of a daemon + // Retrieve the name field of a daemon set ObjectNameFunc: func(obj runtime.Object) (string, error) { - return obj.(*expapi.Daemon).Name, nil + return obj.(*expapi.DaemonSet).Name, nil }, // Used to match objects based on labels/fields for list and watch PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { - return daemon.MatchDaemon(label, field) + return daemonset.MatchDaemonSet(label, field) }, - EndpointName: "daemons", + EndpointName: "daemonsets", - // Used to validate daemon creation - CreateStrategy: daemon.Strategy, + // Used to validate daemon set creation + CreateStrategy: daemonset.Strategy, - // Used to validate daemon updates - UpdateStrategy: daemon.Strategy, + // Used to validate daemon set updates + UpdateStrategy: daemonset.Strategy, Storage: s, } diff --git a/pkg/registry/daemon/etcd/etcd_test.go b/pkg/registry/daemonset/etcd/etcd_test.go similarity index 84% rename from pkg/registry/daemon/etcd/etcd_test.go rename to pkg/registry/daemonset/etcd/etcd_test.go index 5d2510a7bf6..341ddd01830 100755 --- a/pkg/registry/daemon/etcd/etcd_test.go +++ b/pkg/registry/daemonset/etcd/etcd_test.go @@ -33,13 +33,13 @@ func newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) { return NewREST(etcdStorage), fakeClient } -func validNewDaemon() *expapi.Daemon { - return &expapi.Daemon{ +func newValidDaemonSet() *expapi.DaemonSet { + return &expapi.DaemonSet{ ObjectMeta: api.ObjectMeta{ Name: "foo", Namespace: api.NamespaceDefault, }, - Spec: expapi.DaemonSpec{ + Spec: expapi.DaemonSetSpec{ Selector: map[string]string{"a": "b"}, Template: &api.PodTemplateSpec{ ObjectMeta: api.ObjectMeta{ @@ -61,21 +61,21 @@ func validNewDaemon() *expapi.Daemon { } } -var validDaemon = validNewDaemon() +var validDaemonSet = newValidDaemonSet() func TestCreate(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) - daemon := validNewDaemon() - daemon.ObjectMeta = api.ObjectMeta{} + ds := newValidDaemonSet() + ds.ObjectMeta = api.ObjectMeta{} test.TestCreate( // valid - daemon, + ds, // invalid (invalid selector) - &expapi.Daemon{ - Spec: expapi.DaemonSpec{ + &expapi.DaemonSet{ + Spec: expapi.DaemonSetSpec{ Selector: map[string]string{}, - Template: validDaemon.Spec.Template, + Template: validDaemonSet.Spec.Template, }, }, ) @@ -86,31 +86,31 @@ func TestUpdate(t *testing.T) { test := registrytest.New(t, fakeClient, storage.Etcd) test.TestUpdate( // valid - validNewDaemon(), + newValidDaemonSet(), // updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Daemon) + object := obj.(*expapi.DaemonSet) object.Spec.Template.Spec.NodeSelector = map[string]string{"c": "d"} return object }, // invalid updateFunc func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Daemon) + object := obj.(*expapi.DaemonSet) object.UID = "newUID" return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Daemon) + object := obj.(*expapi.DaemonSet) object.Name = "" return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Daemon) + object := obj.(*expapi.DaemonSet) object.Spec.Template.Spec.RestartPolicy = api.RestartPolicyOnFailure return object }, func(obj runtime.Object) runtime.Object { - object := obj.(*expapi.Daemon) + object := obj.(*expapi.DaemonSet) object.Spec.Selector = map[string]string{} return object }, @@ -120,26 +120,26 @@ func TestUpdate(t *testing.T) { func TestDelete(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) - test.TestDelete(validNewDaemon()) + test.TestDelete(newValidDaemonSet()) } func TestGet(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) - test.TestGet(validNewDaemon()) + test.TestGet(newValidDaemonSet()) } func TestList(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) - test.TestList(validNewDaemon()) + test.TestList(newValidDaemonSet()) } func TestWatch(t *testing.T) { storage, fakeClient := newStorage(t) test := registrytest.New(t, fakeClient, storage.Etcd) test.TestWatch( - validDaemon, + validDaemonSet, // matching labels []labels.Set{ {"a": "b"}, diff --git a/pkg/registry/daemon/strategy.go b/pkg/registry/daemonset/strategy.go similarity index 53% rename from pkg/registry/daemon/strategy.go rename to pkg/registry/daemonset/strategy.go index 2a9e7cb3796..9861ea464c1 100644 --- a/pkg/registry/daemon/strategy.go +++ b/pkg/registry/daemonset/strategy.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package daemon +package daemonset import ( "fmt" @@ -30,32 +30,32 @@ import ( "k8s.io/kubernetes/pkg/util/fielderrors" ) -// daemonStrategy implements verification logic for daemons. -type daemonStrategy struct { +// daemonSetStrategy implements verification logic for daemon sets. +type daemonSetStrategy struct { runtime.ObjectTyper api.NameGenerator } -// Strategy is the default logic that applies when creating and updating Daemon objects. -var Strategy = daemonStrategy{api.Scheme, api.SimpleNameGenerator} +// Strategy is the default logic that applies when creating and updating DaemonSet objects. +var Strategy = daemonSetStrategy{api.Scheme, api.SimpleNameGenerator} -// NamespaceScoped returns true because all Daemons need to be within a namespace. -func (daemonStrategy) NamespaceScoped() bool { +// NamespaceScoped returns true because all DaemonSets need to be within a namespace. +func (daemonSetStrategy) NamespaceScoped() bool { return true } -// PrepareForCreate clears the status of a daemon before creation. -func (daemonStrategy) PrepareForCreate(obj runtime.Object) { - daemon := obj.(*expapi.Daemon) - daemon.Status = expapi.DaemonStatus{} +// PrepareForCreate clears the status of a daemon set before creation. +func (daemonSetStrategy) PrepareForCreate(obj runtime.Object) { + daemonSet := obj.(*expapi.DaemonSet) + daemonSet.Status = expapi.DaemonSetStatus{} - daemon.Generation = 1 + daemonSet.Generation = 1 } // PrepareForUpdate clears fields that are not allowed to be set by end users on update. -func (daemonStrategy) PrepareForUpdate(obj, old runtime.Object) { - newDaemon := obj.(*expapi.Daemon) - oldDaemon := old.(*expapi.Daemon) +func (daemonSetStrategy) PrepareForUpdate(obj, old runtime.Object) { + newDaemonSet := obj.(*expapi.DaemonSet) + oldDaemonSet := old.(*expapi.DaemonSet) // Any changes to the spec increment the generation number, any changes to the // status should reflect the generation number of the corresponding object. We push @@ -64,59 +64,59 @@ func (daemonStrategy) PrepareForUpdate(obj, old runtime.Object) { // we can at first -- since obj contains spec -- but in the future we will probably make // status its own object, and even if we don't, writes may be the result of a // read-update-write loop, so the contents of spec may not actually be the spec that - // the controller has *seen*. + // the manager has *seen*. // // TODO: Any changes to a part of the object that represents desired state (labels, // annotations etc) should also increment the generation. - if !reflect.DeepEqual(oldDaemon.Spec, newDaemon.Spec) { - newDaemon.Generation = oldDaemon.Generation + 1 + if !reflect.DeepEqual(oldDaemonSet.Spec, newDaemonSet.Spec) { + newDaemonSet.Generation = oldDaemonSet.Generation + 1 } } -// Validate validates a new daemon. -func (daemonStrategy) Validate(ctx api.Context, obj runtime.Object) fielderrors.ValidationErrorList { - daemon := obj.(*expapi.Daemon) - return validation.ValidateDaemon(daemon) +// Validate validates a new daemon set. +func (daemonSetStrategy) Validate(ctx api.Context, obj runtime.Object) fielderrors.ValidationErrorList { + daemonSet := obj.(*expapi.DaemonSet) + return validation.ValidateDaemonSet(daemonSet) } -// AllowCreateOnUpdate is false for daemon; this means a POST is +// AllowCreateOnUpdate is false for daemon set; this means a POST is // needed to create one -func (daemonStrategy) AllowCreateOnUpdate() bool { +func (daemonSetStrategy) AllowCreateOnUpdate() bool { return false } // ValidateUpdate is the default update validation for an end user. -func (daemonStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { - validationErrorList := validation.ValidateDaemon(obj.(*expapi.Daemon)) - updateErrorList := validation.ValidateDaemonUpdate(old.(*expapi.Daemon), obj.(*expapi.Daemon)) +func (daemonSetStrategy) ValidateUpdate(ctx api.Context, obj, old runtime.Object) fielderrors.ValidationErrorList { + validationErrorList := validation.ValidateDaemonSet(obj.(*expapi.DaemonSet)) + updateErrorList := validation.ValidateDaemonSetUpdate(old.(*expapi.DaemonSet), obj.(*expapi.DaemonSet)) return append(validationErrorList, updateErrorList...) } -// AllowUnconditionalUpdate is the default update policy for daemon objects. -func (daemonStrategy) AllowUnconditionalUpdate() bool { +// AllowUnconditionalUpdate is the default update policy for daemon set objects. +func (daemonSetStrategy) AllowUnconditionalUpdate() bool { return true } -// DaemonToSelectableFields returns a field set that represents the object. -func DaemonToSelectableFields(daemon *expapi.Daemon) fields.Set { +// DaemonSetToSelectableFields returns a field set that represents the object. +func DaemonSetToSelectableFields(daemon *expapi.DaemonSet) fields.Set { return fields.Set{ "metadata.name": daemon.Name, } } -// MatchDaemon is the filter used by the generic etcd backend to route +// MatchSetDaemon is the filter used by the generic etcd backend to route // watch events from etcd to clients of the apiserver only interested in specific // labels/fields. -func MatchDaemon(label labels.Selector, field fields.Selector) generic.Matcher { +func MatchDaemonSet(label labels.Selector, field fields.Selector) generic.Matcher { return &generic.SelectionPredicate{ Label: label, Field: field, GetAttrs: func(obj runtime.Object) (labels.Set, fields.Set, error) { - daemon, ok := obj.(*expapi.Daemon) + ds, ok := obj.(*expapi.DaemonSet) if !ok { - return nil, nil, fmt.Errorf("given object is not a daemon.") + return nil, nil, fmt.Errorf("given object is not a ds.") } - return labels.Set(daemon.ObjectMeta.Labels), DaemonToSelectableFields(daemon), nil + return labels.Set(ds.ObjectMeta.Labels), DaemonSetToSelectableFields(ds), nil }, } } diff --git a/pkg/registry/generic/etcd/etcd_test.go b/pkg/registry/generic/etcd/etcd_test.go index cd389b7ef8c..8569ccc84df 100644 --- a/pkg/registry/generic/etcd/etcd_test.go +++ b/pkg/registry/generic/etcd/etcd_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/fielderrors" + "k8s.io/kubernetes/pkg/util/sets" "github.com/coreos/go-etcd/etcd" ) @@ -93,7 +94,7 @@ func NewTestGenericEtcdRegistry(t *testing.T) (*tools.FakeEtcdClient, *Etcd) { // setMatcher is a matcher that matches any pod with id in the set. // Makes testing simpler. type setMatcher struct { - util.StringSet + sets.String } func (sm setMatcher) Matches(obj runtime.Object) (bool, error) { @@ -189,7 +190,7 @@ func TestEtcdList(t *testing.T) { R: singleElemListResp, E: nil, }, - m: setMatcher{util.NewStringSet("foo")}, + m: setMatcher{sets.NewString("foo")}, out: &api.PodList{Items: []api.Pod{*podA}}, succeed: true, }, @@ -198,7 +199,7 @@ func TestEtcdList(t *testing.T) { R: normalListResp, E: nil, }, - m: setMatcher{util.NewStringSet("foo", "makeMatchSingleReturnFalse")}, + m: setMatcher{sets.NewString("foo", "makeMatchSingleReturnFalse")}, out: &api.PodList{Items: []api.Pod{*podA}}, succeed: true, }, @@ -560,8 +561,8 @@ func TestEtcdDelete(t *testing.T) { func TestEtcdWatch(t *testing.T) { table := map[string]generic.Matcher{ - "single": setMatcher{util.NewStringSet("foo")}, - "multi": setMatcher{util.NewStringSet("foo", "bar")}, + "single": setMatcher{sets.NewString("foo")}, + "multi": setMatcher{sets.NewString("foo", "bar")}, } for name, m := range table { diff --git a/pkg/registry/minion/doc.go b/pkg/registry/node/doc.go similarity index 85% rename from pkg/registry/minion/doc.go rename to pkg/registry/node/doc.go index b67a96d2b13..cd604b4ab41 100644 --- a/pkg/registry/minion/doc.go +++ b/pkg/registry/node/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package minion provides Registry interface and implementation for storing Minions. -package minion +// Package node provides Registry interface and implementation for storing Nodes. +package node diff --git a/pkg/registry/minion/etcd/etcd.go b/pkg/registry/node/etcd/etcd.go similarity index 90% rename from pkg/registry/minion/etcd/etcd.go rename to pkg/registry/node/etcd/etcd.go index 46f321513f0..ec66b5cf46f 100644 --- a/pkg/registry/minion/etcd/etcd.go +++ b/pkg/registry/node/etcd/etcd.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/rest" client "k8s.io/kubernetes/pkg/client/unversioned" etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" - "k8s.io/kubernetes/pkg/registry/minion" + "k8s.io/kubernetes/pkg/registry/node" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/storage" ) @@ -79,17 +79,17 @@ func NewREST(s storage.Interface, useCacher bool, connection client.ConnectionIn ObjectNameFunc: func(obj runtime.Object) (string, error) { return obj.(*api.Node).Name, nil }, - PredicateFunc: minion.MatchNode, + PredicateFunc: node.MatchNode, EndpointName: "node", - CreateStrategy: minion.Strategy, - UpdateStrategy: minion.Strategy, + CreateStrategy: node.Strategy, + UpdateStrategy: node.Strategy, Storage: storageInterface, } statusStore := *store - statusStore.UpdateStrategy = minion.StatusStrategy + statusStore.UpdateStrategy = node.StatusStrategy return &REST{store, connection}, &StatusREST{store: &statusStore} } @@ -97,7 +97,7 @@ func NewREST(s storage.Interface, useCacher bool, connection client.ConnectionIn // Implement Redirector. var _ = rest.Redirector(&REST{}) -// ResourceLocation returns a URL to which one can send traffic for the specified minion. +// ResourceLocation returns a URL to which one can send traffic for the specified node. func (r *REST) ResourceLocation(ctx api.Context, id string) (*url.URL, http.RoundTripper, error) { - return minion.ResourceLocation(r, r.connection, ctx, id) + return node.ResourceLocation(r, r.connection, ctx, id) } diff --git a/pkg/registry/minion/etcd/etcd_test.go b/pkg/registry/node/etcd/etcd_test.go similarity index 100% rename from pkg/registry/minion/etcd/etcd_test.go rename to pkg/registry/node/etcd/etcd_test.go diff --git a/pkg/registry/minion/registry.go b/pkg/registry/node/registry.go similarity index 60% rename from pkg/registry/minion/registry.go rename to pkg/registry/node/registry.go index d67f983e286..e587b6c0f14 100644 --- a/pkg/registry/minion/registry.go +++ b/pkg/registry/node/registry.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package minion +package node import ( "k8s.io/kubernetes/pkg/api" @@ -26,12 +26,12 @@ import ( // Registry is an interface for things that know how to store node. type Registry interface { - ListMinions(ctx api.Context, label labels.Selector, field fields.Selector) (*api.NodeList, error) - CreateMinion(ctx api.Context, minion *api.Node) error - UpdateMinion(ctx api.Context, minion *api.Node) error - GetMinion(ctx api.Context, minionID string) (*api.Node, error) - DeleteMinion(ctx api.Context, minionID string) error - WatchMinions(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) + ListNodes(ctx api.Context, label labels.Selector, field fields.Selector) (*api.NodeList, error) + CreateNode(ctx api.Context, node *api.Node) error + UpdateNode(ctx api.Context, node *api.Node) error + GetNode(ctx api.Context, nodeID string) (*api.Node, error) + DeleteNode(ctx api.Context, nodeID string) error + WatchNodes(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) } // storage puts strong typing around storage calls @@ -45,7 +45,7 @@ func NewRegistry(s rest.StandardStorage) Registry { return &storage{s} } -func (s *storage) ListMinions(ctx api.Context, label labels.Selector, field fields.Selector) (*api.NodeList, error) { +func (s *storage) ListNodes(ctx api.Context, label labels.Selector, field fields.Selector) (*api.NodeList, error) { obj, err := s.List(ctx, label, field) if err != nil { return nil, err @@ -54,21 +54,21 @@ func (s *storage) ListMinions(ctx api.Context, label labels.Selector, field fiel return obj.(*api.NodeList), nil } -func (s *storage) CreateMinion(ctx api.Context, node *api.Node) error { +func (s *storage) CreateNode(ctx api.Context, node *api.Node) error { _, err := s.Create(ctx, node) return err } -func (s *storage) UpdateMinion(ctx api.Context, node *api.Node) error { +func (s *storage) UpdateNode(ctx api.Context, node *api.Node) error { _, _, err := s.Update(ctx, node) return err } -func (s *storage) WatchMinions(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { +func (s *storage) WatchNodes(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { return s.Watch(ctx, label, field, resourceVersion) } -func (s *storage) GetMinion(ctx api.Context, name string) (*api.Node, error) { +func (s *storage) GetNode(ctx api.Context, name string) (*api.Node, error) { obj, err := s.Get(ctx, name) if err != nil { return nil, err @@ -76,7 +76,7 @@ func (s *storage) GetMinion(ctx api.Context, name string) (*api.Node, error) { return obj.(*api.Node), nil } -func (s *storage) DeleteMinion(ctx api.Context, name string) error { +func (s *storage) DeleteNode(ctx api.Context, name string) error { _, err := s.Delete(ctx, name, nil) return err } diff --git a/pkg/registry/minion/strategy.go b/pkg/registry/node/strategy.go similarity index 99% rename from pkg/registry/minion/strategy.go rename to pkg/registry/node/strategy.go index 0c775ee66ee..939fc106dca 100644 --- a/pkg/registry/minion/strategy.go +++ b/pkg/registry/node/strategy.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package minion +package node import ( "fmt" diff --git a/pkg/registry/minion/strategy_test.go b/pkg/registry/node/strategy_test.go similarity index 98% rename from pkg/registry/minion/strategy_test.go rename to pkg/registry/node/strategy_test.go index 7708a5abadd..4b0f998ea51 100644 --- a/pkg/registry/minion/strategy_test.go +++ b/pkg/registry/node/strategy_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package minion +package node import ( "testing" diff --git a/pkg/registry/registrytest/minion.go b/pkg/registry/registrytest/minion.go deleted file mode 100644 index 7c0ba439e81..00000000000 --- a/pkg/registry/registrytest/minion.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package registrytest - -import ( - "sync" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/errors" - "k8s.io/kubernetes/pkg/fields" - "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/watch" -) - -// MinionRegistry implements minion.Registry interface. -type MinionRegistry struct { - Err error - Minion string - Minions api.NodeList - - sync.Mutex -} - -// MakeMinionList constructs api.MinionList from list of minion names and a NodeResource. -func MakeMinionList(minions []string, nodeResources api.NodeResources) *api.NodeList { - list := api.NodeList{ - Items: make([]api.Node, len(minions)), - } - for i := range minions { - list.Items[i].Name = minions[i] - list.Items[i].Status.Capacity = nodeResources.Capacity - } - return &list -} - -func NewMinionRegistry(minions []string, nodeResources api.NodeResources) *MinionRegistry { - return &MinionRegistry{ - Minions: *MakeMinionList(minions, nodeResources), - } -} - -func (r *MinionRegistry) SetError(err error) { - r.Lock() - defer r.Unlock() - r.Err = err -} - -func (r *MinionRegistry) ListMinions(ctx api.Context, label labels.Selector, field fields.Selector) (*api.NodeList, error) { - r.Lock() - defer r.Unlock() - return &r.Minions, r.Err -} - -func (r *MinionRegistry) CreateMinion(ctx api.Context, minion *api.Node) error { - r.Lock() - defer r.Unlock() - r.Minion = minion.Name - r.Minions.Items = append(r.Minions.Items, *minion) - return r.Err -} - -func (r *MinionRegistry) UpdateMinion(ctx api.Context, minion *api.Node) error { - r.Lock() - defer r.Unlock() - for i, node := range r.Minions.Items { - if node.Name == minion.Name { - r.Minions.Items[i] = *minion - return r.Err - } - } - return r.Err -} - -func (r *MinionRegistry) GetMinion(ctx api.Context, minionID string) (*api.Node, error) { - r.Lock() - defer r.Unlock() - if r.Err != nil { - return nil, r.Err - } - for _, node := range r.Minions.Items { - if node.Name == minionID { - return &node, nil - } - } - return nil, errors.NewNotFound("node", minionID) -} - -func (r *MinionRegistry) DeleteMinion(ctx api.Context, minionID string) error { - r.Lock() - defer r.Unlock() - var newList []api.Node - for _, node := range r.Minions.Items { - if node.Name != minionID { - newList = append(newList, api.Node{ObjectMeta: api.ObjectMeta{Name: node.Name}}) - } - } - r.Minions.Items = newList - return r.Err -} - -func (r *MinionRegistry) WatchMinions(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - return nil, r.Err -} diff --git a/pkg/registry/registrytest/node.go b/pkg/registry/registrytest/node.go new file mode 100644 index 00000000000..c2d2476f568 --- /dev/null +++ b/pkg/registry/registrytest/node.go @@ -0,0 +1,117 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registrytest + +import ( + "sync" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/fields" + "k8s.io/kubernetes/pkg/labels" + "k8s.io/kubernetes/pkg/watch" +) + +// NodeRegistry implements node.Registry interface. +type NodeRegistry struct { + Err error + Node string + Nodes api.NodeList + + sync.Mutex +} + +// MakeNodeList constructs api.NodeList from list of node names and a NodeResource. +func MakeNodeList(nodes []string, nodeResources api.NodeResources) *api.NodeList { + list := api.NodeList{ + Items: make([]api.Node, len(nodes)), + } + for i := range nodes { + list.Items[i].Name = nodes[i] + list.Items[i].Status.Capacity = nodeResources.Capacity + } + return &list +} + +func NewNodeRegistry(nodes []string, nodeResources api.NodeResources) *NodeRegistry { + return &NodeRegistry{ + Nodes: *MakeNodeList(nodes, nodeResources), + } +} + +func (r *NodeRegistry) SetError(err error) { + r.Lock() + defer r.Unlock() + r.Err = err +} + +func (r *NodeRegistry) ListNodes(ctx api.Context, label labels.Selector, field fields.Selector) (*api.NodeList, error) { + r.Lock() + defer r.Unlock() + return &r.Nodes, r.Err +} + +func (r *NodeRegistry) CreateNode(ctx api.Context, node *api.Node) error { + r.Lock() + defer r.Unlock() + r.Node = node.Name + r.Nodes.Items = append(r.Nodes.Items, *node) + return r.Err +} + +func (r *NodeRegistry) UpdateNode(ctx api.Context, node *api.Node) error { + r.Lock() + defer r.Unlock() + for i, item := range r.Nodes.Items { + if item.Name == node.Name { + r.Nodes.Items[i] = *node + return r.Err + } + } + return r.Err +} + +func (r *NodeRegistry) GetNode(ctx api.Context, nodeID string) (*api.Node, error) { + r.Lock() + defer r.Unlock() + if r.Err != nil { + return nil, r.Err + } + for _, node := range r.Nodes.Items { + if node.Name == nodeID { + return &node, nil + } + } + return nil, errors.NewNotFound("node", nodeID) +} + +func (r *NodeRegistry) DeleteNode(ctx api.Context, nodeID string) error { + r.Lock() + defer r.Unlock() + var newList []api.Node + for _, node := range r.Nodes.Items { + if node.Name != nodeID { + newList = append(newList, api.Node{ObjectMeta: api.ObjectMeta{Name: node.Name}}) + } + } + r.Nodes.Items = newList + return r.Err +} + +func (r *NodeRegistry) WatchNodes(ctx api.Context, label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { + return nil, r.Err +} diff --git a/pkg/registry/service/ipallocator/allocator_test.go b/pkg/registry/service/ipallocator/allocator_test.go index 3d84053449d..c853ebab2e6 100644 --- a/pkg/registry/service/ipallocator/allocator_test.go +++ b/pkg/registry/service/ipallocator/allocator_test.go @@ -21,7 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func TestAllocate(t *testing.T) { @@ -34,7 +34,7 @@ func TestAllocate(t *testing.T) { if f := r.Free(); f != 254 { t.Errorf("unexpected free %d", f) } - found := util.NewStringSet() + found := sets.NewString() count := 0 for r.Free() > 0 { ip, err := r.AllocateNext() @@ -118,7 +118,7 @@ func TestAllocateSmall(t *testing.T) { if f := r.Free(); f != 2 { t.Errorf("free: %d", f) } - found := util.NewStringSet() + found := sets.NewString() for i := 0; i < 2; i++ { ip, err := r.AllocateNext() if err != nil { diff --git a/pkg/registry/service/portallocator/allocator_test.go b/pkg/registry/service/portallocator/allocator_test.go index b7b108cefb0..dd6d5dd1701 100644 --- a/pkg/registry/service/portallocator/allocator_test.go +++ b/pkg/registry/service/portallocator/allocator_test.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) func TestAllocate(t *testing.T) { @@ -34,7 +35,7 @@ func TestAllocate(t *testing.T) { if f := r.Free(); f != 201 { t.Errorf("unexpected free %d", f) } - found := util.NewStringSet() + found := sets.NewString() count := 0 for r.Free() > 0 { p, err := r.AllocateNext() diff --git a/pkg/runtime/conversion_generator.go b/pkg/runtime/conversion_generator.go index 485d13d612e..979313ee53b 100644 --- a/pkg/runtime/conversion_generator.go +++ b/pkg/runtime/conversion_generator.go @@ -25,7 +25,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) type ConversionGenerator interface { @@ -33,7 +33,7 @@ type ConversionGenerator interface { WriteConversionFunctions(w io.Writer) error RegisterConversionFunctions(w io.Writer, pkg string) error AddImport(pkg string) string - RepackImports(exclude util.StringSet) + RepackImports(exclude sets.String) WriteImports(w io.Writer) error OverwritePackage(pkg, overwrite string) AssumePrivateConversions() @@ -279,7 +279,7 @@ func (g *conversionGenerator) targetPackage(pkg string) { g.shortImports[""] = pkg } -func (g *conversionGenerator) RepackImports(exclude util.StringSet) { +func (g *conversionGenerator) RepackImports(exclude sets.String) { var packages []string for key := range g.imports { packages = append(packages, key) diff --git a/pkg/runtime/deep_copy_generator.go b/pkg/runtime/deep_copy_generator.go index 45fe3a01926..5dee4e263ab 100644 --- a/pkg/runtime/deep_copy_generator.go +++ b/pkg/runtime/deep_copy_generator.go @@ -25,7 +25,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/conversion" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // TODO(wojtek-t): As suggested in #8320, we should consider the strategy @@ -69,7 +69,7 @@ type DeepCopyGenerator interface { OverwritePackage(pkg, overwrite string) } -func NewDeepCopyGenerator(scheme *conversion.Scheme, targetPkg string, include util.StringSet) DeepCopyGenerator { +func NewDeepCopyGenerator(scheme *conversion.Scheme, targetPkg string, include sets.String) DeepCopyGenerator { g := &deepCopyGenerator{ scheme: scheme, targetPkg: targetPkg, @@ -100,7 +100,7 @@ type deepCopyGenerator struct { shortImports map[string]string pkgOverwrites map[string]string replace map[pkgPathNamePair]reflect.Type - include util.StringSet + include sets.String } func (g *deepCopyGenerator) addImportByPath(pkg string) string { diff --git a/pkg/storage/cacher_test.go b/pkg/storage/cacher_test.go index b9b7895efac..b0c61e86e14 100644 --- a/pkg/storage/cacher_test.go +++ b/pkg/storage/cacher_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/tools" "k8s.io/kubernetes/pkg/tools/etcdtest" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" ) @@ -160,7 +161,7 @@ func TestListFromMemory(t *testing.T) { if len(result.Items) != 2 { t.Errorf("unexpected list result: %d", len(result.Items)) } - keys := util.StringSet{} + keys := sets.String{} for _, item := range result.Items { keys.Insert(item.ObjectMeta.Name) } diff --git a/pkg/storage/watch_cache_test.go b/pkg/storage/watch_cache_test.go index f94e35d94f4..6ab467e8ba1 100644 --- a/pkg/storage/watch_cache_test.go +++ b/pkg/storage/watch_cache_test.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/client/unversioned/cache" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" ) @@ -76,7 +77,7 @@ func TestWatchCacheBasic(t *testing.T) { store.Add(makeTestPod("pod2", 5)) store.Add(makeTestPod("pod3", 6)) { - podNames := util.StringSet{} + podNames := sets.String{} for _, item := range store.List() { podNames.Insert(item.(*api.Pod).ObjectMeta.Name) } @@ -94,7 +95,7 @@ func TestWatchCacheBasic(t *testing.T) { makeTestPod("pod5", 8), }, "8") { - podNames := util.StringSet{} + podNames := sets.String{} for _, item := range store.List() { podNames.Insert(item.(*api.Pod).ObjectMeta.Name) } diff --git a/pkg/util/bandwidth/linux.go b/pkg/util/bandwidth/linux.go index e1ebd687b1a..7989394ec94 100644 --- a/pkg/util/bandwidth/linux.go +++ b/pkg/util/bandwidth/linux.go @@ -25,8 +25,8 @@ import ( "strings" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/sets" "github.com/golang/glog" ) @@ -65,7 +65,7 @@ func (t *tcShaper) nextClassID() (int, error) { } scanner := bufio.NewScanner(bytes.NewBuffer(data)) - classes := util.StringSet{} + classes := sets.String{} for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) // skip empty lines diff --git a/pkg/util/iptables/iptables.go b/pkg/util/iptables/iptables.go index 5de379a9fe5..ab0f0e71ff5 100644 --- a/pkg/util/iptables/iptables.go +++ b/pkg/util/iptables/iptables.go @@ -26,8 +26,8 @@ import ( "github.com/coreos/go-semver/semver" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util" utilexec "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/sets" ) type RulePosition string @@ -352,7 +352,7 @@ func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...st tmpField := strings.Trim(args[i], "\"") argsCopy = append(argsCopy, strings.Fields(tmpField)...) } - argset := util.NewStringSet(argsCopy...) + argset := sets.NewString(argsCopy...) for _, line := range strings.Split(string(out), "\n") { var fields = strings.Fields(line) @@ -370,7 +370,7 @@ func (runner *runner) checkRuleWithoutCheck(table Table, chain Chain, args ...st } // TODO: This misses reorderings e.g. "-x foo ! -y bar" will match "! -x foo -y bar" - if util.NewStringSet(fields...).IsSuperset(argset) { + if sets.NewString(fields...).IsSuperset(argset) { return true, nil } glog.V(5).Infof("DBG: fields is not a superset of args: fields=%v args=%v", fields, args) diff --git a/pkg/util/iptables/iptables_test.go b/pkg/util/iptables/iptables_test.go index b515df2614d..c8f7c1a100e 100644 --- a/pkg/util/iptables/iptables_test.go +++ b/pkg/util/iptables/iptables_test.go @@ -20,8 +20,8 @@ import ( "strings" "testing" - "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/sets" ) func getIptablesCommand(protocol Protocol) string { @@ -68,7 +68,7 @@ func testEnsureChain(t *testing.T, protocol Protocol) { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } cmd := getIptablesCommand(protocol) - if !util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAll(cmd, "-t", "nat", "-N", "FOOBAR") { + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll(cmd, "-t", "nat", "-N", "FOOBAR") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } // Exists. @@ -121,7 +121,7 @@ func TestFlushChain(t *testing.T) { if fcmd.CombinedOutputCalls != 2 { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-F", "FOOBAR") { + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-F", "FOOBAR") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } // Failure. @@ -158,7 +158,7 @@ func TestDeleteChain(t *testing.T) { if fcmd.CombinedOutputCalls != 2 { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-X", "FOOBAR") { + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-X", "FOOBAR") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } // Failure. @@ -196,7 +196,7 @@ func TestEnsureRuleAlreadyExists(t *testing.T) { if fcmd.CombinedOutputCalls != 2 { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } } @@ -232,7 +232,7 @@ func TestEnsureRuleNew(t *testing.T) { if fcmd.CombinedOutputCalls != 3 { t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-A", "OUTPUT", "abc", "123") { + if !sets.NewString(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-A", "OUTPUT", "abc", "123") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[2]) } } @@ -319,7 +319,7 @@ func TestDeleteRuleAlreadyExists(t *testing.T) { if fcmd.CombinedOutputCalls != 2 { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-t", "nat", "-C", "OUTPUT", "abc", "123") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } } @@ -352,7 +352,7 @@ func TestDeleteRuleNew(t *testing.T) { if fcmd.CombinedOutputCalls != 3 { t.Errorf("expected 3 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-D", "OUTPUT", "abc", "123") { + if !sets.NewString(fcmd.CombinedOutputLog[2]...).HasAll("iptables", "-t", "nat", "-D", "OUTPUT", "abc", "123") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[2]) } } @@ -484,7 +484,7 @@ COMMIT if fcmd.CombinedOutputCalls != 1 { t.Errorf("expected 1 CombinedOutput() call, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[0]...).HasAll("iptables-save", "-t", "nat") { + if !sets.NewString(fcmd.CombinedOutputLog[0]...).HasAll("iptables-save", "-t", "nat") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[0]) } } @@ -522,7 +522,7 @@ COMMIT if fcmd.CombinedOutputCalls != 1 { t.Errorf("expected 1 CombinedOutput() call, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[0]...).HasAll("iptables-save", "-t", "nat") { + if !sets.NewString(fcmd.CombinedOutputLog[0]...).HasAll("iptables-save", "-t", "nat") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[0]) } } @@ -573,7 +573,7 @@ func TestWaitFlagUnavailable(t *testing.T) { if fcmd.CombinedOutputCalls != 2 { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAny("-w", "-w2") { + if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w", "-w2") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } } @@ -601,10 +601,10 @@ func TestWaitFlagOld(t *testing.T) { if fcmd.CombinedOutputCalls != 2 { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w") { + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } - if util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAny("-w2") { + if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w2") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } } @@ -632,10 +632,10 @@ func TestWaitFlagNew(t *testing.T) { if fcmd.CombinedOutputCalls != 2 { t.Errorf("expected 2 CombinedOutput() calls, got %d", fcmd.CombinedOutputCalls) } - if !util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w2") { + if !sets.NewString(fcmd.CombinedOutputLog[1]...).HasAll("iptables", "-w2") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } - if util.NewStringSet(fcmd.CombinedOutputLog[1]...).HasAny("-w") { + if sets.NewString(fcmd.CombinedOutputLog[1]...).HasAny("-w") { t.Errorf("wrong CombinedOutput() log, got %s", fcmd.CombinedOutputLog[1]) } } diff --git a/pkg/util/proxy/transport.go b/pkg/util/proxy/transport.go index b4f5b6b49d2..ca88147752d 100644 --- a/pkg/util/proxy/transport.go +++ b/pkg/util/proxy/transport.go @@ -31,38 +31,38 @@ import ( "golang.org/x/net/html" "golang.org/x/net/html/atom" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) // atomsToAttrs states which attributes of which tags require URL substitution. // Sources: http://www.w3.org/TR/REC-html40/index/attributes.html // http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1 -var atomsToAttrs = map[atom.Atom]util.StringSet{ - atom.A: util.NewStringSet("href"), - atom.Applet: util.NewStringSet("codebase"), - atom.Area: util.NewStringSet("href"), - atom.Audio: util.NewStringSet("src"), - atom.Base: util.NewStringSet("href"), - atom.Blockquote: util.NewStringSet("cite"), - atom.Body: util.NewStringSet("background"), - atom.Button: util.NewStringSet("formaction"), - atom.Command: util.NewStringSet("icon"), - atom.Del: util.NewStringSet("cite"), - atom.Embed: util.NewStringSet("src"), - atom.Form: util.NewStringSet("action"), - atom.Frame: util.NewStringSet("longdesc", "src"), - atom.Head: util.NewStringSet("profile"), - atom.Html: util.NewStringSet("manifest"), - atom.Iframe: util.NewStringSet("longdesc", "src"), - atom.Img: util.NewStringSet("longdesc", "src", "usemap"), - atom.Input: util.NewStringSet("src", "usemap", "formaction"), - atom.Ins: util.NewStringSet("cite"), - atom.Link: util.NewStringSet("href"), - atom.Object: util.NewStringSet("classid", "codebase", "data", "usemap"), - atom.Q: util.NewStringSet("cite"), - atom.Script: util.NewStringSet("src"), - atom.Source: util.NewStringSet("src"), - atom.Video: util.NewStringSet("poster", "src"), +var atomsToAttrs = map[atom.Atom]sets.String{ + atom.A: sets.NewString("href"), + atom.Applet: sets.NewString("codebase"), + atom.Area: sets.NewString("href"), + atom.Audio: sets.NewString("src"), + atom.Base: sets.NewString("href"), + atom.Blockquote: sets.NewString("cite"), + atom.Body: sets.NewString("background"), + atom.Button: sets.NewString("formaction"), + atom.Command: sets.NewString("icon"), + atom.Del: sets.NewString("cite"), + atom.Embed: sets.NewString("src"), + atom.Form: sets.NewString("action"), + atom.Frame: sets.NewString("longdesc", "src"), + atom.Head: sets.NewString("profile"), + atom.Html: sets.NewString("manifest"), + atom.Iframe: sets.NewString("longdesc", "src"), + atom.Img: sets.NewString("longdesc", "src", "usemap"), + atom.Input: sets.NewString("src", "usemap", "formaction"), + atom.Ins: sets.NewString("cite"), + atom.Link: sets.NewString("href"), + atom.Object: sets.NewString("classid", "codebase", "data", "usemap"), + atom.Q: sets.NewString("cite"), + atom.Script: sets.NewString("src"), + atom.Source: sets.NewString("src"), + atom.Video: sets.NewString("poster", "src"), // TODO: css URLs hidden in style elements. } diff --git a/pkg/util/set.go b/pkg/util/sets/set.go similarity index 79% rename from pkg/util/set.go rename to pkg/util/sets/set.go index 431312c6d51..a0b8900b361 100644 --- a/pkg/util/set.go +++ b/pkg/util/sets/set.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package sets import ( "reflect" @@ -26,19 +26,19 @@ import ( type Empty struct{} // StringSet is a set of strings, implemented via map[string]struct{} for minimal memory consumption. -type StringSet map[string]Empty +type String map[string]Empty -// NewStringSet creates a StringSet from a list of values. -func NewStringSet(items ...string) StringSet { - ss := StringSet{} +// New creates a StringSet from a list of values. +func NewString(items ...string) String { + ss := String{} ss.Insert(items...) return ss } // KeySet creates a StringSet from a keys of a map[string](? extends interface{}). Since you can't describe that map type in the Go type system // the reflected value is required. -func KeySet(theMap reflect.Value) StringSet { - ret := StringSet{} +func KeySet(theMap reflect.Value) String { + ret := String{} for _, keyValue := range theMap.MapKeys() { ret.Insert(keyValue.String()) @@ -48,27 +48,27 @@ func KeySet(theMap reflect.Value) StringSet { } // Insert adds items to the set. -func (s StringSet) Insert(items ...string) { +func (s String) Insert(items ...string) { for _, item := range items { s[item] = Empty{} } } // Delete removes all items from the set. -func (s StringSet) Delete(items ...string) { +func (s String) Delete(items ...string) { for _, item := range items { delete(s, item) } } // Has returns true iff item is contained in the set. -func (s StringSet) Has(item string) bool { +func (s String) Has(item string) bool { _, contained := s[item] return contained } // HasAll returns true iff all items are contained in the set. -func (s StringSet) HasAll(items ...string) bool { +func (s String) HasAll(items ...string) bool { for _, item := range items { if !s.Has(item) { return false @@ -78,7 +78,7 @@ func (s StringSet) HasAll(items ...string) bool { } // HasAny returns true if any items are contained in the set. -func (s StringSet) HasAny(items ...string) bool { +func (s String) HasAny(items ...string) bool { for _, item := range items { if s.Has(item) { return true @@ -93,8 +93,8 @@ func (s StringSet) HasAny(items ...string) bool { // s2 = {1, 2, 4, 5} // s1.Difference(s2) = {3} // s2.Difference(s1) = {4, 5} -func (s StringSet) Difference(s2 StringSet) StringSet { - result := NewStringSet() +func (s String) Difference(s2 String) String { + result := NewString() for key := range s { if !s2.Has(key) { result.Insert(key) @@ -110,8 +110,8 @@ func (s StringSet) Difference(s2 StringSet) StringSet { // s2 = {3, 4} // s1.Union(s2) = {1, 2, 3, 4} // s2.Union(s1) = {1, 2, 3, 4} -func (s1 StringSet) Union(s2 StringSet) StringSet { - result := NewStringSet() +func (s1 String) Union(s2 String) String { + result := NewString() for key := range s1 { result.Insert(key) } @@ -122,7 +122,7 @@ func (s1 StringSet) Union(s2 StringSet) StringSet { } // IsSuperset returns true iff s1 is a superset of s2. -func (s1 StringSet) IsSuperset(s2 StringSet) bool { +func (s1 String) IsSuperset(s2 String) bool { for item := range s2 { if !s1.Has(item) { return false @@ -134,7 +134,7 @@ func (s1 StringSet) IsSuperset(s2 StringSet) bool { // Equal returns true iff s1 is equal (as a set) to s2. // Two sets are equal if their membership is identical. // (In practice, this means same elements, order doesn't matter) -func (s1 StringSet) Equal(s2 StringSet) bool { +func (s1 String) Equal(s2 String) bool { if len(s1) != len(s2) { return false } @@ -147,7 +147,7 @@ func (s1 StringSet) Equal(s2 StringSet) bool { } // List returns the contents as a sorted string slice. -func (s StringSet) List() []string { +func (s String) List() []string { res := make([]string, 0, len(s)) for key := range s { res = append(res, key) @@ -157,7 +157,7 @@ func (s StringSet) List() []string { } // Returns a single element from the set. -func (s StringSet) PopAny() (string, bool) { +func (s String) PopAny() (string, bool) { for key := range s { s.Delete(key) return key, true @@ -166,6 +166,6 @@ func (s StringSet) PopAny() (string, bool) { } // Len returns the size of the set. -func (s StringSet) Len() int { +func (s String) Len() int { return len(s) } diff --git a/pkg/util/set_test.go b/pkg/util/sets/set_test.go similarity index 88% rename from pkg/util/set_test.go rename to pkg/util/sets/set_test.go index 936891feebf..dd401cfd197 100644 --- a/pkg/util/set_test.go +++ b/pkg/util/sets/set_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package sets import ( "reflect" @@ -22,8 +22,8 @@ import ( ) func TestStringSet(t *testing.T) { - s := StringSet{} - s2 := StringSet{} + s := String{} + s2 := String{} if len(s) != 0 { t.Errorf("Expected len=0: %d", len(s)) } @@ -60,7 +60,7 @@ func TestStringSet(t *testing.T) { } func TestStringSetDeleteMultiples(t *testing.T) { - s := StringSet{} + s := String{} s.Insert("a", "b", "c") if len(s) != 3 { t.Errorf("Expected len=3: %d", len(s)) @@ -83,7 +83,7 @@ func TestStringSetDeleteMultiples(t *testing.T) { } func TestNewStringSet(t *testing.T) { - s := NewStringSet("a", "b", "c") + s := NewString("a", "b", "c") if len(s) != 3 { t.Errorf("Expected len=3: %d", len(s)) } @@ -93,15 +93,15 @@ func TestNewStringSet(t *testing.T) { } func TestStringSetList(t *testing.T) { - s := NewStringSet("z", "y", "x", "a") + s := NewString("z", "y", "x", "a") if !reflect.DeepEqual(s.List(), []string{"a", "x", "y", "z"}) { t.Errorf("List gave unexpected result: %#v", s.List()) } } func TestStringSetDifference(t *testing.T) { - a := NewStringSet("1", "2", "3") - b := NewStringSet("1", "2", "4", "5") + a := NewString("1", "2", "3") + b := NewString("1", "2", "4", "5") c := a.Difference(b) d := b.Difference(a) if len(c) != 1 { @@ -119,7 +119,7 @@ func TestStringSetDifference(t *testing.T) { } func TestStringSetHasAny(t *testing.T) { - a := NewStringSet("1", "2", "3") + a := NewString("1", "2", "3") if !a.HasAny("1", "4") { t.Errorf("expected true, got false") @@ -132,37 +132,37 @@ func TestStringSetHasAny(t *testing.T) { func TestStringSetEquals(t *testing.T) { // Simple case (order doesn't matter) - a := NewStringSet("1", "2") - b := NewStringSet("2", "1") + a := NewString("1", "2") + b := NewString("2", "1") if !a.Equal(b) { t.Errorf("Expected to be equal: %v vs %v", a, b) } // It is a set; duplicates are ignored - b = NewStringSet("2", "2", "1") + b = NewString("2", "2", "1") if !a.Equal(b) { t.Errorf("Expected to be equal: %v vs %v", a, b) } // Edge cases around empty sets / empty strings - a = NewStringSet() - b = NewStringSet() + a = NewString() + b = NewString() if !a.Equal(b) { t.Errorf("Expected to be equal: %v vs %v", a, b) } - b = NewStringSet("1", "2", "3") + b = NewString("1", "2", "3") if a.Equal(b) { t.Errorf("Expected to be not-equal: %v vs %v", a, b) } - b = NewStringSet("1", "2", "") + b = NewString("1", "2", "") if a.Equal(b) { t.Errorf("Expected to be not-equal: %v vs %v", a, b) } // Check for equality after mutation - a = NewStringSet() + a = NewString() a.Insert("1") if a.Equal(b) { t.Errorf("Expected to be not-equal: %v vs %v", a, b) diff --git a/pkg/volume/gce_pd/gce_util.go b/pkg/volume/gce_pd/gce_util.go index 261e1a04ca9..47ddeb5f518 100644 --- a/pkg/volume/gce_pd/gce_util.go +++ b/pkg/volume/gce_pd/gce_util.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/operationmanager" + "k8s.io/kubernetes/pkg/util/sets" ) const ( @@ -62,7 +63,7 @@ func (diskUtil *GCEDiskUtil) AttachAndMountDisk(b *gcePersistentDiskBuilder, glo if err != nil { glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) } - sdBeforeSet := util.NewStringSet(sdBefore...) + sdBeforeSet := sets.NewString(sdBefore...) devicePath, err := attachDiskAndVerify(b, sdBeforeSet) if err != nil { @@ -120,7 +121,7 @@ func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskCleaner) error { } // Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails. -func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet util.StringSet) (string, error) { +func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) { devicePaths := getDiskByIdPaths(b.gcePersistentDisk) var gce cloudprovider.Interface for numRetries := 0; numRetries < maxRetries; numRetries++ { @@ -287,7 +288,7 @@ func pathExists(path string) (bool, error) { // Calls "udevadm trigger --action=change" for newly created "/dev/sd*" drives (exist only in after set). // This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed. -func udevadmChangeToNewDrives(sdBeforeSet util.StringSet) error { +func udevadmChangeToNewDrives(sdBeforeSet sets.String) error { sdAfter, err := filepath.Glob(diskSDPattern) if err != nil { return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err) diff --git a/pkg/volume/host_path/host_path_test.go b/pkg/volume/host_path/host_path_test.go index 2189f1e1cbb..9e7fe52c642 100644 --- a/pkg/volume/host_path/host_path_test.go +++ b/pkg/volume/host_path/host_path_test.go @@ -63,7 +63,8 @@ func TestGetAccessModes(t *testing.T) { func TestRecycler(t *testing.T) { plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, newMockRecycler}}, volume.NewFakeVolumeHost("/tmp/fake", nil, nil)) + volumeHost := volume.NewFakeVolumeHost("/tmp/fake", nil, nil) + plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.NewFakeRecycler}}, volumeHost) spec := &volume.Spec{PersistentVolume: &api.PersistentVolume{Spec: api.PersistentVolumeSpec{PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/foo"}}}}} plug, err := plugMgr.FindRecyclablePluginBySpec(spec) @@ -82,26 +83,6 @@ func TestRecycler(t *testing.T) { } } -func newMockRecycler(spec *volume.Spec, host volume.VolumeHost) (volume.Recycler, error) { - return &mockRecycler{ - path: spec.PersistentVolume.Spec.HostPath.Path, - }, nil -} - -type mockRecycler struct { - path string - host volume.VolumeHost -} - -func (r *mockRecycler) GetPath() string { - return r.path -} - -func (r *mockRecycler) Recycle() error { - // return nil means recycle passed - return nil -} - func TestPlugin(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volume.NewFakeVolumeHost("fake", nil, nil)) diff --git a/pkg/volume/testing.go b/pkg/volume/testing.go index 7ceb94d4f25..b347f5461f3 100644 --- a/pkg/volume/testing.go +++ b/pkg/volume/testing.go @@ -17,6 +17,7 @@ limitations under the License. package volume import ( + "fmt" "os" "path" @@ -125,7 +126,7 @@ func (plugin *FakeVolumePlugin) NewCleaner(volName string, podUID types.UID, mou } func (plugin *FakeVolumePlugin) NewRecycler(spec *Spec) (Recycler, error) { - return &FakeRecycler{"/attributesTransferredFromSpec"}, nil + return &fakeRecycler{"/attributesTransferredFromSpec"}, nil } func (plugin *FakeVolumePlugin) GetAccessModes() []api.PersistentVolumeAccessMode { @@ -162,15 +163,24 @@ func (fv *FakeVolume) TearDownAt(dir string) error { return os.RemoveAll(dir) } -type FakeRecycler struct { +type fakeRecycler struct { path string } -func (fr *FakeRecycler) Recycle() error { +func (fr *fakeRecycler) Recycle() error { // nil is success, else error return nil } -func (fr *FakeRecycler) GetPath() string { +func (fr *fakeRecycler) GetPath() string { return fr.path } + +func NewFakeRecycler(spec *Spec, host VolumeHost) (Recycler, error) { + if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.HostPath == nil { + return nil, fmt.Errorf("fakeRecycler only supports spec.PersistentVolume.Spec.HostPath") + } + return &fakeRecycler{ + path: spec.PersistentVolume.Spec.HostPath.Path, + }, nil +} diff --git a/plugin/pkg/admission/exec/denyprivileged/admission.go b/plugin/pkg/admission/exec/denyprivileged/admission.go index 481eb557baa..9fa8ffb202a 100644 --- a/plugin/pkg/admission/exec/denyprivileged/admission.go +++ b/plugin/pkg/admission/exec/denyprivileged/admission.go @@ -45,8 +45,8 @@ func (d *denyExecOnPrivileged) Admit(a admission.Attributes) (err error) { if !ok { return errors.NewBadRequest("a connect request was received, but could not convert the request object.") } - // Only handle exec requests on pods - if connectRequest.ResourcePath != "pods/exec" { + // Only handle exec or attach requests on pods + if connectRequest.ResourcePath != "pods/exec" && connectRequest.ResourcePath != "pods/attach" { return nil } pod, err := d.client.Pods(a.GetNamespace()).Get(connectRequest.Name) @@ -54,7 +54,7 @@ func (d *denyExecOnPrivileged) Admit(a admission.Attributes) (err error) { return admission.NewForbidden(a, err) } if isPrivileged(pod) { - return admission.NewForbidden(a, fmt.Errorf("Cannot exec into a privileged container")) + return admission.NewForbidden(a, fmt.Errorf("Cannot exec into or attach to a privileged container")) } return nil } diff --git a/plugin/pkg/admission/exec/denyprivileged/admission_test.go b/plugin/pkg/admission/exec/denyprivileged/admission_test.go index 22d5b97613d..4c0bc7115ec 100644 --- a/plugin/pkg/admission/exec/denyprivileged/admission_test.go +++ b/plugin/pkg/admission/exec/denyprivileged/admission_test.go @@ -47,15 +47,30 @@ func testAdmission(t *testing.T, pod *api.Pod, shouldAccept bool) { handler := &denyExecOnPrivileged{ client: mockClient, } - req := &rest.ConnectRequest{Name: pod.Name, ResourcePath: "pods/exec"} - err := handler.Admit(admission.NewAttributesRecord(req, "Pod", "test", "name", "pods", "exec", admission.Connect, nil)) - if shouldAccept && err != nil { - t.Errorf("Unexpected error returned from admission handler: %v", err) - } - if !shouldAccept && err == nil { - t.Errorf("An error was expected from the admission handler. Received nil") + + // pods/exec + { + req := &rest.ConnectRequest{Name: pod.Name, ResourcePath: "pods/exec"} + err := handler.Admit(admission.NewAttributesRecord(req, "Pod", "test", "name", "pods", "exec", admission.Connect, nil)) + if shouldAccept && err != nil { + t.Errorf("Unexpected error returned from admission handler: %v", err) + } + if !shouldAccept && err == nil { + t.Errorf("An error was expected from the admission handler. Received nil") + } } + // pods/attach + { + req := &rest.ConnectRequest{Name: pod.Name, ResourcePath: "pods/attach"} + err := handler.Admit(admission.NewAttributesRecord(req, "Pod", "test", "name", "pods", "attach", admission.Connect, nil)) + if shouldAccept && err != nil { + t.Errorf("Unexpected error returned from admission handler: %v", err) + } + if !shouldAccept && err == nil { + t.Errorf("An error was expected from the admission handler. Received nil") + } + } } func acceptPod(name string) *api.Pod { diff --git a/plugin/pkg/admission/limitranger/admission.go b/plugin/pkg/admission/limitranger/admission.go index bc544c3863d..ba98913d254 100644 --- a/plugin/pkg/admission/limitranger/admission.go +++ b/plugin/pkg/admission/limitranger/admission.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/watch" ) @@ -300,30 +301,31 @@ func sum(inputs []api.ResourceList) api.ResourceList { // the specified LimitRange. The pod may be modified to apply default resource // requirements if not specified, and enumerated on the LimitRange func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { + var errs []error + defaultResources := defaultContainerResourceRequirements(limitRange) mergePodResourceRequirements(pod, &defaultResources) for i := range limitRange.Spec.Limits { limit := limitRange.Spec.Limits[i] limitType := limit.Type - // enforce container limits if limitType == api.LimitTypeContainer { for j := range pod.Spec.Containers { container := &pod.Spec.Containers[j] for k, v := range limit.Min { if err := minConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { - return err + errs = append(errs, err) } } for k, v := range limit.Max { if err := maxConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { - return err + errs = append(errs, err) } } for k, v := range limit.MaxLimitRequestRatio { if err := limitRequestRatioConstraint(limitType, k, v, container.Resources.Requests, container.Resources.Limits); err != nil { - return err + errs = append(errs, err) } } } @@ -341,20 +343,20 @@ func PodLimitFunc(limitRange *api.LimitRange, pod *api.Pod) error { podLimits := sum(containerLimits) for k, v := range limit.Min { if err := minConstraint(limitType, k, v, podRequests, podLimits); err != nil { - return err + errs = append(errs, err) } } for k, v := range limit.Max { if err := maxConstraint(limitType, k, v, podRequests, podLimits); err != nil { - return err + errs = append(errs, err) } } for k, v := range limit.MaxLimitRequestRatio { if err := limitRequestRatioConstraint(limitType, k, v, podRequests, podLimits); err != nil { - return err + errs = append(errs, err) } } } } - return nil + return errors.NewAggregate(errs) } diff --git a/plugin/pkg/admission/namespace/lifecycle/admission.go b/plugin/pkg/admission/namespace/lifecycle/admission.go index df9eae8eedd..ac644cf1a9a 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission.go @@ -30,7 +30,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" ) @@ -46,7 +46,7 @@ type lifecycle struct { *admission.Handler client client.Interface store cache.Store - immortalNamespaces util.StringSet + immortalNamespaces sets.String } func (l *lifecycle) Admit(a admission.Attributes) (err error) { @@ -120,6 +120,6 @@ func NewLifecycle(c client.Interface) admission.Interface { Handler: admission.NewHandler(admission.Create, admission.Update, admission.Delete), client: c, store: store, - immortalNamespaces: util.NewStringSet(api.NamespaceDefault), + immortalNamespaces: sets.NewString(api.NamespaceDefault), } } diff --git a/plugin/pkg/admission/serviceaccount/admission.go b/plugin/pkg/admission/serviceaccount/admission.go index 6b5f8aaa643..8590b33c328 100644 --- a/plugin/pkg/admission/serviceaccount/admission.go +++ b/plugin/pkg/admission/serviceaccount/admission.go @@ -31,7 +31,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" ) @@ -249,7 +249,7 @@ func (s *serviceAccount) getReferencedServiceAccountToken(serviceAccount *api.Se return "", err } - references := util.NewStringSet() + references := sets.NewString() for _, secret := range serviceAccount.Secrets { references.Insert(secret.Name) } @@ -293,7 +293,7 @@ func (s *serviceAccount) getServiceAccountTokens(serviceAccount *api.ServiceAcco func (s *serviceAccount) limitSecretReferences(serviceAccount *api.ServiceAccount, pod *api.Pod) error { // Ensure all secrets the pod references are allowed by the service account - mountableSecrets := util.NewStringSet() + mountableSecrets := sets.NewString() for _, s := range serviceAccount.Secrets { mountableSecrets.Insert(s.Name) } @@ -309,7 +309,7 @@ func (s *serviceAccount) limitSecretReferences(serviceAccount *api.ServiceAccoun } // limit pull secret references as well - pullSecrets := util.NewStringSet() + pullSecrets := sets.NewString() for _, s := range serviceAccount.ImagePullSecrets { pullSecrets.Insert(s.Name) } @@ -340,7 +340,7 @@ func (s *serviceAccount) mountServiceAccountToken(serviceAccount *api.ServiceAcc // Find the volume and volume name for the ServiceAccountTokenSecret if it already exists tokenVolumeName := "" hasTokenVolume := false - allVolumeNames := util.NewStringSet() + allVolumeNames := sets.NewString() for _, volume := range pod.Spec.Volumes { allVolumeNames.Insert(volume.Name) if volume.Secret != nil && volume.Secret.SecretName == serviceAccountToken { diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 753827f6f58..c8b801fd52f 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -18,7 +18,7 @@ limitations under the License. package defaults import ( - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/plugin/pkg/scheduler" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" @@ -48,8 +48,8 @@ func init() { ) } -func defaultPredicates() util.StringSet { - return util.NewStringSet( +func defaultPredicates() sets.String { + return sets.NewString( // Fit is defined based on the absence of port conflicts. factory.RegisterFitPredicate("PodFitsPorts", predicates.PodFitsPorts), // Fit is determined by resource availability. @@ -73,8 +73,8 @@ func defaultPredicates() util.StringSet { ) } -func defaultPriorities() util.StringSet { - return util.NewStringSet( +func defaultPriorities() sets.String { + return sets.NewString( // Prioritize nodes by least requested utilization. factory.RegisterPriorityFunction("LeastRequestedPriority", priorities.LeastRequestedPriority, 1), // Prioritizes nodes to help achieve balanced resource usage diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index fbe4467fc59..2b7b79293eb 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/controller/framework" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/plugin/pkg/scheduler" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" @@ -137,13 +138,13 @@ func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler return nil, err } - predicateKeys := util.NewStringSet() + predicateKeys := sets.NewString() for _, predicate := range policy.Predicates { glog.V(2).Infof("Registering predicate: %s", predicate.Name) predicateKeys.Insert(RegisterCustomFitPredicate(predicate)) } - priorityKeys := util.NewStringSet() + priorityKeys := sets.NewString() for _, priority := range policy.Priorities { glog.V(2).Infof("Registering priority: %s", priority.Name) priorityKeys.Insert(RegisterCustomPriorityFunction(priority)) @@ -153,7 +154,7 @@ func (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler } // Creates a scheduler from a set of registered fit predicate keys and priority keys. -func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSet) (*scheduler.Config, error) { +func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String) (*scheduler.Config, error) { glog.V(2).Infof("creating scheduler with fit predicates '%v' and priority functions '%v", predicateKeys, priorityKeys) pluginArgs := PluginFactoryArgs{ PodLister: f.PodLister, diff --git a/plugin/pkg/scheduler/factory/plugins.go b/plugin/pkg/scheduler/factory/plugins.go index 99953f7e50d..6251c86098c 100644 --- a/plugin/pkg/scheduler/factory/plugins.go +++ b/plugin/pkg/scheduler/factory/plugins.go @@ -22,7 +22,7 @@ import ( "strings" "sync" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities" @@ -66,8 +66,8 @@ const ( ) type AlgorithmProviderConfig struct { - FitPredicateKeys util.StringSet - PriorityFunctionKeys util.StringSet + FitPredicateKeys sets.String + PriorityFunctionKeys sets.String } // RegisterFitPredicate registers a fit predicate with the algorithm @@ -209,7 +209,7 @@ func IsPriorityFunctionRegistered(name string) bool { // Registers a new algorithm provider with the algorithm registry. This should // be called from the init function in a provider plugin. -func RegisterAlgorithmProvider(name string, predicateKeys, priorityKeys util.StringSet) string { +func RegisterAlgorithmProvider(name string, predicateKeys, priorityKeys sets.String) string { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() validateAlgorithmNameOrDie(name) @@ -234,7 +234,7 @@ func GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) { return &provider, nil } -func getFitPredicateFunctions(names util.StringSet, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { +func getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() @@ -249,7 +249,7 @@ func getFitPredicateFunctions(names util.StringSet, args PluginFactoryArgs) (map return predicates, nil } -func getPriorityFunctionConfigs(names util.StringSet, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) { +func getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) { schedulerFactoryMutex.Lock() defer schedulerFactoryMutex.Unlock() diff --git a/plugin/pkg/scheduler/generic_scheduler.go b/plugin/pkg/scheduler/generic_scheduler.go index 07a2e855162..e0e7b06a15c 100644 --- a/plugin/pkg/scheduler/generic_scheduler.go +++ b/plugin/pkg/scheduler/generic_scheduler.go @@ -25,12 +25,12 @@ import ( "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" ) -type FailedPredicateMap map[string]util.StringSet +type FailedPredicateMap map[string]sets.String type FitError struct { Pod *api.Pod @@ -124,7 +124,7 @@ func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFunc if !fit { fits = false if _, found := failedPredicateMap[node.Name]; !found { - failedPredicateMap[node.Name] = util.StringSet{} + failedPredicateMap[node.Name] = sets.String{} } if predicates.FailedResourceType != "" { failedPredicateMap[node.Name].Insert(predicates.FailedResourceType) diff --git a/plugin/pkg/scheduler/generic_scheduler_test.go b/plugin/pkg/scheduler/generic_scheduler_test.go index 37b95483454..4f1a1b416c4 100644 --- a/plugin/pkg/scheduler/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/generic_scheduler_test.go @@ -24,7 +24,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" ) @@ -101,7 +101,7 @@ func TestSelectHost(t *testing.T) { scheduler := genericScheduler{random: rand.New(rand.NewSource(0))} tests := []struct { list algorithm.HostPriorityList - possibleHosts util.StringSet + possibleHosts sets.String expectsErr bool }{ { @@ -109,7 +109,7 @@ func TestSelectHost(t *testing.T) { {Host: "machine1.1", Score: 1}, {Host: "machine2.1", Score: 2}, }, - possibleHosts: util.NewStringSet("machine2.1"), + possibleHosts: sets.NewString("machine2.1"), expectsErr: false, }, // equal scores @@ -120,7 +120,7 @@ func TestSelectHost(t *testing.T) { {Host: "machine1.3", Score: 2}, {Host: "machine2.1", Score: 2}, }, - possibleHosts: util.NewStringSet("machine1.2", "machine1.3", "machine2.1"), + possibleHosts: sets.NewString("machine1.2", "machine1.3", "machine2.1"), expectsErr: false, }, // out of order scores @@ -132,13 +132,13 @@ func TestSelectHost(t *testing.T) { {Host: "machine3.1", Score: 1}, {Host: "machine1.3", Score: 3}, }, - possibleHosts: util.NewStringSet("machine1.1", "machine1.2", "machine1.3"), + possibleHosts: sets.NewString("machine1.1", "machine1.2", "machine1.3"), expectsErr: false, }, // empty priorityList { list: []algorithm.HostPriority{}, - possibleHosts: util.NewStringSet(), + possibleHosts: sets.NewString(), expectsErr: true, }, } diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go index 57f8b2d0fa0..4d3e971b8cf 100644 --- a/test/e2e/daemon_restart.go +++ b/test/e2e/daemon_restart.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" @@ -140,7 +141,7 @@ func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Sele pods, err := c.Pods(ns).List(labelSelector, fields.Everything()) expectNoError(err) failedContainers := 0 - containerRestartNodes := util.NewStringSet() + containerRestartNodes := sets.NewString() for _, p := range pods.Items { for _, v := range FailedContainers(&p) { failedContainers = failedContainers + v.restarts @@ -224,8 +225,8 @@ var _ = Describe("DaemonRestart", func() { // Only check the keys, the pods can be different if the kubelet updated it. // TODO: Can it really? - existingKeys := util.NewStringSet() - newKeys := util.NewStringSet() + existingKeys := sets.NewString() + newKeys := sets.NewString() for _, k := range existingPods.ListKeys() { existingKeys.Insert(k) } diff --git a/test/e2e/density.go b/test/e2e/density.go index 00c7ad44ad2..cb310aaa117 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -34,6 +34,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" . "github.com/onsi/ginkgo" @@ -148,7 +149,7 @@ var _ = Describe("Density", func() { expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after")) // Verify latency metrics - highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, util.NewStringSet("events")) + highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, sets.NewString("events")) expectNoError(err) Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") }) diff --git a/test/e2e/downwardapi_volume.go b/test/e2e/downwardapi_volume.go index 7bb05b75016..5bc26907eee 100644 --- a/test/e2e/downwardapi_volume.go +++ b/test/e2e/downwardapi_volume.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo" ) -var _ = Describe("Downwar dAPI volume", func() { +var _ = Describe("Downward API volume", func() { f := NewFramework("downward-api") It("should provide labels and annotations files", func() { diff --git a/test/e2e/kubelet.go b/test/e2e/kubelet.go index 0cee3811392..6741e7bf63a 100644 --- a/test/e2e/kubelet.go +++ b/test/e2e/kubelet.go @@ -25,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" . "github.com/onsi/ginkgo" @@ -41,8 +42,8 @@ const ( // getPodMatches returns a set of pod names on the given node that matches the // podNamePrefix and namespace. -func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, namespace string) util.StringSet { - matches := util.NewStringSet() +func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, namespace string) sets.String { + matches := sets.NewString() Logf("Checking pods on node %v via /runningpods endpoint", nodeName) runningPods, err := GetKubeletPods(c, nodeName) if err != nil { @@ -65,9 +66,9 @@ func getPodMatches(c *client.Client, nodeName string, podNamePrefix string, name // information; they are reconstructed by examining the container runtime. In // the scope of this test, we do not expect pod naming conflicts so // podNamePrefix should be sufficient to identify the pods. -func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames util.StringSet, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { +func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error { return wait.Poll(pollInterval, timeout, func() (bool, error) { - matchCh := make(chan util.StringSet, len(nodeNames)) + matchCh := make(chan sets.String, len(nodeNames)) for _, item := range nodeNames.List() { // Launch a goroutine per node to check the pods running on the nodes. nodeName := item @@ -76,7 +77,7 @@ func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames util.StringSet, pod }() } - seen := util.NewStringSet() + seen := sets.NewString() for i := 0; i < len(nodeNames.List()); i++ { seen = seen.Union(<-matchCh) } @@ -90,7 +91,7 @@ func waitTillNPodsRunningOnNodes(c *client.Client, nodeNames util.StringSet, pod var _ = Describe("kubelet", func() { var numNodes int - var nodeNames util.StringSet + var nodeNames sets.String framework := NewFramework("kubelet") var resourceMonitor *resourceMonitor @@ -98,7 +99,7 @@ var _ = Describe("kubelet", func() { nodes, err := framework.Client.Nodes().List(labels.Everything(), fields.Everything()) expectNoError(err) numNodes = len(nodes.Items) - nodeNames = util.NewStringSet() + nodeNames = sets.NewString() for _, node := range nodes.Items { nodeNames.Insert(node.Name) } diff --git a/test/e2e/kubelet_stats.go b/test/e2e/kubelet_stats.go index 5a543cef505..98fca776661 100644 --- a/test/e2e/kubelet_stats.go +++ b/test/e2e/kubelet_stats.go @@ -37,6 +37,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "github.com/prometheus/client_golang/extraction" "github.com/prometheus/client_golang/model" @@ -66,7 +67,7 @@ func (a KubeletMetricByLatency) Less(i, j int) bool { return a[i].Latency > a[j] type kubeletMetricIngester []KubeletMetric func (k *kubeletMetricIngester) Ingest(samples model.Samples) error { - acceptedMethods := util.NewStringSet( + acceptedMethods := sets.NewString( metrics.PodWorkerLatencyKey, metrics.PodWorkerStartLatencyKey, metrics.SyncPodsLatencyKey, diff --git a/test/e2e/load.go b/test/e2e/load.go index 41dddbc51c8..f7137376724 100644 --- a/test/e2e/load.go +++ b/test/e2e/load.go @@ -26,7 +26,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -86,7 +86,7 @@ var _ = Describe("Load capacity", func() { } // Verify latency metrics - highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, util.NewStringSet("events")) + highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, sets.NewString("events")) expectNoError(err, "Too many instances metrics above the threshold") Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) }) diff --git a/test/e2e/rc.go b/test/e2e/rc.go index 9d25f37c100..409b91505de 100644 --- a/test/e2e/rc.go +++ b/test/e2e/rc.go @@ -41,7 +41,7 @@ var _ = Describe("ReplicationController", func() { // requires private images SkipUnlessProviderIs("gce", "gke") - ServeImageOrFail(framework, "private", "gcr.io/_b_k8s_authenticated_test/serve_hostname:1.1") + ServeImageOrFail(framework, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:1.1") }) }) diff --git a/test/e2e/service_latency.go b/test/e2e/service_latency.go index bcbb24096e5..91171378f5d 100644 --- a/test/e2e/service_latency.go +++ b/test/e2e/service_latency.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/watch" . "github.com/onsi/ginkgo" @@ -70,7 +71,7 @@ var _ = Describe("Service endpoints latency", func() { f.Client.RESTClient.Throttle = util.NewFakeRateLimiter() defer func() { f.Client.RESTClient.Throttle = oldThrottle }() - failing := util.NewStringSet() + failing := sets.NewString() d, err := runServiceLatencies(f, parallelTrials, totalTrials) if err != nil { failing.Insert(fmt.Sprintf("Not all RC/pod/service trials succeeded: %v", err)) diff --git a/test/e2e/util.go b/test/e2e/util.go index d148d2d97a8..63233a25e5b 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -45,6 +45,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/watch" @@ -1096,7 +1097,7 @@ type podInfo struct { type PodDiff map[string]*podInfo // Print formats and prints the give PodDiff. -func (p PodDiff) Print(ignorePhases util.StringSet) { +func (p PodDiff) Print(ignorePhases sets.String) { for name, info := range p { if ignorePhases.Has(info.phase) { continue @@ -1258,7 +1259,7 @@ func RunRC(config RCConfig) error { unknown := 0 inactive := 0 failedContainers := 0 - containerRestartNodes := util.NewStringSet() + containerRestartNodes := sets.NewString() pods := podStore.List() created := []*api.Pod{} @@ -1312,7 +1313,7 @@ func RunRC(config RCConfig) error { // - diagnose by comparing the previous "2 Pod states" lines for inactive pods errorStr := fmt.Sprintf("Number of reported pods changed: %d vs %d", len(pods), len(oldPods)) Logf("%v, pods that changed since the last iteration:", errorStr) - Diff(oldPods, pods).Print(util.NewStringSet()) + Diff(oldPods, pods).Print(sets.NewString()) return fmt.Errorf(errorStr) } @@ -1342,7 +1343,7 @@ func RunRC(config RCConfig) error { } func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) { - badNodes := util.NewStringSet() + badNodes := sets.NewString() for _, p := range pods { if p.Status.Phase != api.PodRunning { if p.Spec.NodeName != "" { @@ -1851,8 +1852,8 @@ func ReadLatencyMetrics(c *client.Client) ([]LatencyMetric, error) { // Prints summary metrics for request types with latency above threshold // and returns number of such request types. -func HighLatencyRequests(c *client.Client, threshold time.Duration, ignoredResources util.StringSet) (int, error) { - ignoredVerbs := util.NewStringSet("WATCHLIST", "PROXY") +func HighLatencyRequests(c *client.Client, threshold time.Duration, ignoredResources sets.String) (int, error) { + ignoredVerbs := sets.NewString("WATCHLIST", "PROXY") metrics, err := ReadLatencyMetrics(c) if err != nil { diff --git a/test/images/network-tester/webserver.go b/test/images/network-tester/webserver.go index 7ffba00f80f..5b31a72a884 100644 --- a/test/images/network-tester/webserver.go +++ b/test/images/network-tester/webserver.go @@ -45,7 +45,7 @@ import ( "time" client "k8s.io/kubernetes/pkg/client/unversioned" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" ) var ( @@ -235,7 +235,7 @@ func contactOthers(state *State) { time.Sleep(time.Duration(1+rand.Intn(10)) * time.Second) } - eps := util.StringSet{} + eps := sets.String{} for _, ss := range endpoints.Subsets { for _, a := range ss.Addresses { for _, p := range ss.Ports { diff --git a/test/images/resource-consumer/resource_consumer_handler.go b/test/images/resource-consumer/resource_consumer_handler.go index 7bc7ee4e640..928064aed5d 100644 --- a/test/images/resource-consumer/resource_consumer_handler.go +++ b/test/images/resource-consumer/resource_consumer_handler.go @@ -31,7 +31,7 @@ const ( consumeCPUAddress = "/ConsumeCPU" consumeMemAddress = "/ConsumeMem" getCurrentStatusAddress = "/GetCurrentStatus" - milicoresQuery = "milicores" + millicoresQuery = "millicores" megabytesQuery = "megabytes" durationSecQuery = "durationSec" ) @@ -68,21 +68,21 @@ func (handler ResourceConsumerHandler) ServeHTTP(w http.ResponseWriter, req *htt func (handler ResourceConsumerHandler) handleConsumeCPU(w http.ResponseWriter, query url.Values) { // geting string data for consumeCPU durationSecString := query.Get(durationSecQuery) - milicoresString := query.Get(milicoresQuery) - if durationSecString == "" || milicoresString == "" { + millicoresString := query.Get(millicoresQuery) + if durationSecString == "" || millicoresString == "" { http.Error(w, notGivenFunctionArgument, http.StatusBadRequest) return } else { // convert data (strings to ints) for consumeCPU durationSec, durationSecError := strconv.Atoi(durationSecString) - milicores, milicoresError := strconv.Atoi(milicoresString) - if durationSecError != nil || milicoresError != nil { + millicores, millicoresError := strconv.Atoi(millicoresString) + if durationSecError != nil || millicoresError != nil { http.Error(w, incorrectFunctionArgument, http.StatusBadRequest) return } - go ConsumeCPU(milicores, durationSec) + go ConsumeCPU(millicores, durationSec) fmt.Fprintln(w, consumeCPUAddress[1:]) - fmt.Fprintln(w, milicores, milicoresQuery) + fmt.Fprintln(w, millicores, millicoresQuery) fmt.Fprintln(w, durationSec, durationSecQuery) } @@ -104,8 +104,7 @@ func (handler ResourceConsumerHandler) handleConsumeMem(w http.ResponseWriter, q http.Error(w, incorrectFunctionArgument, http.StatusBadRequest) return } - ConsumeMem(megabytes, durationSec) - fmt.Fprintln(w, "Warning: not implemented!") + go ConsumeMem(megabytes, durationSec) fmt.Fprintln(w, consumeMemAddress[1:]) fmt.Fprintln(w, megabytes, megabytesQuery) fmt.Fprintln(w, durationSec, durationSecQuery) diff --git a/test/images/resource-consumer/utils.go b/test/images/resource-consumer/utils.go index 9557e49c952..3b3b646eb2a 100644 --- a/test/images/resource-consumer/utils.go +++ b/test/images/resource-consumer/utils.go @@ -20,9 +20,13 @@ import ( "fmt" "log" "os/exec" + "strconv" ) -const consumeCPUBinary = "./consume-cpu/consume-cpu" +const ( + consumeCPUBinary = "./consume-cpu/consume-cpu" + consumeMemBinary = "stress" +) func ConsumeCPU(millicores int, durationSec int) { log.Printf("ConsumeCPU millicores: %v, durationSec: %v", millicores, durationSec) @@ -35,7 +39,11 @@ func ConsumeCPU(millicores int, durationSec int) { func ConsumeMem(megabytes int, durationSec int) { log.Printf("ConsumeMem megabytes: %v, durationSec: %v", megabytes, durationSec) - // not implemented + megabytesString := strconv.Itoa(megabytes) + "M" + durationSecString := strconv.Itoa(durationSec) + // creating new consume memory process + consumeMem := exec.Command(consumeMemBinary, "-m", "1", "--vm-bytes", megabytesString, "--vm-hang", "0", "-t", durationSecString) + consumeMem.Start() } func GetCurrentStatus() { diff --git a/test/integration/service_account_test.go b/test/integration/service_account_test.go index 7218b547f8a..fe3ae4fd2db 100644 --- a/test/integration/service_account_test.go +++ b/test/integration/service_account_test.go @@ -45,7 +45,7 @@ import ( "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/master" "k8s.io/kubernetes/pkg/tools/etcdtest" - "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" serviceaccountadmission "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/union" @@ -170,7 +170,7 @@ func TestServiceAccountTokenAutoCreate(t *testing.T) { } // Wait for tokens to be deleted - tokensToCleanup := util.NewStringSet(token1Name, token2Name, token3Name) + tokensToCleanup := sets.NewString(token1Name, token2Name, token3Name) err = wait.Poll(time.Second, 10*time.Second, func() (bool, error) { // Get all secrets in the namespace secrets, err := c.Secrets(ns).List(labels.Everything(), fields.Everything())