move experimental/v1 to experimental/v1alpha1;

use "group/version" in many places where used to expect "version" only.
This commit is contained in:
Chao Xu 2015-09-16 22:15:05 -07:00
parent a518a27354
commit ae1293418b
53 changed files with 750 additions and 450 deletions

View File

@ -63,7 +63,7 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}" TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL" KUBELET_TEST_ARGS="--max-pods=100 $TEST_CLUSTER_LOG_LEVEL"
APISERVER_TEST_ARGS="--runtime-config=experimental/v1 ${TEST_CLUSTER_LOG_LEVEL}" APISERVER_TEST_ARGS="--runtime-config=experimental/v1alpha1 ${TEST_CLUSTER_LOG_LEVEL}"
CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" CONTROLLER_MANAGER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" SCHEDULER_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"
KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}" KUBEPROXY_TEST_ARGS="${TEST_CLUSTER_LOG_LEVEL}"

View File

@ -55,10 +55,10 @@ function join_csv {
function verify-prereqs { function verify-prereqs {
if [[ "${ENABLE_EXPERIMENTAL_API}" == "true" ]]; then if [[ "${ENABLE_EXPERIMENTAL_API}" == "true" ]]; then
if [[ -z "${RUNTIME_CONFIG}" ]]; then if [[ -z "${RUNTIME_CONFIG}" ]]; then
RUNTIME_CONFIG="experimental/v1=true" RUNTIME_CONFIG="experimental/v1alpha1=true"
else else
# TODO: add checking if RUNTIME_CONFIG contains "experimental/v1=false" and appending "experimental/v1=true" if not. # TODO: add checking if RUNTIME_CONFIG contains "experimental/v1alpha1=false" and appending "experimental/v1alpha1=true" if not.
if echo "${RUNTIME_CONFIG}" | grep -q -v "experimental/v1=true"; then if echo "${RUNTIME_CONFIG}" | grep -q -v "experimental/v1alpha1=true"; then
echo "Experimental API should be turned on, but is not turned on in RUNTIME_CONFIG!" echo "Experimental API should be turned on, but is not turned on in RUNTIME_CONFIG!"
exit 1 exit 1
fi fi

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/v1" _ "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/apis/experimental" _ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/apis/experimental/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -94,7 +94,7 @@ func main() {
generator.AddImport(path.Join(pkgBase, "api/resource")) generator.AddImport(path.Join(pkgBase, "api/resource"))
// TODO(wojtek-t): Change the overwrites to a flag. // TODO(wojtek-t): Change the overwrites to a flag.
generator.OverwritePackage(version, "") generator.OverwritePackage(version, "")
for _, knownType := range api.Scheme.KnownTypes(version) { for _, knownType := range api.Scheme.KnownTypes(*groupVersion) {
if knownType.PkgPath() != versionPath { if knownType.PkgPath() != versionPath {
continue continue
} }

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
_ "k8s.io/kubernetes/pkg/api/v1" _ "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/apis/experimental" _ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/apis/experimental/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
pkg_runtime "k8s.io/kubernetes/pkg/runtime" pkg_runtime "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -47,7 +47,7 @@ var (
// types inside the api package don't need to say "api.Scheme"; all others do. // types inside the api package don't need to say "api.Scheme"; all others do.
func destScheme(group, version string) string { func destScheme(group, version string) string {
if group == "api" && version == "" { if group == "" && version == "" {
return "Scheme" return "Scheme"
} }
return "api.Scheme" return "api.Scheme"
@ -92,7 +92,13 @@ func main() {
group, version := path.Split(*groupVersion) group, version := path.Split(*groupVersion)
group = strings.TrimRight(group, "/") group = strings.TrimRight(group, "/")
registerTo := destScheme(group, version) registerTo := destScheme(group, version)
pkgname := group var pkgname string
if group == "" {
// the internal version of v1 is registered in package api
pkgname = "api"
} else {
pkgname = group
}
if len(version) != 0 { if len(version) != 0 {
pkgname = version pkgname = version
} }
@ -115,7 +121,14 @@ func main() {
generator.OverwritePackage(vals[0], vals[1]) generator.OverwritePackage(vals[0], vals[1])
} }
} }
for _, knownType := range api.Scheme.KnownTypes(version) { var schemeVersion string
if version == "" {
// This occurs when we generate deep-copy for internal version.
schemeVersion = ""
} else {
schemeVersion = *groupVersion
}
for _, knownType := range api.Scheme.KnownTypes(schemeVersion) {
if knownType.PkgPath() != versionPath { if knownType.PkgPath() != versionPath {
continue continue
} }

View File

@ -126,20 +126,21 @@ func startComponents(firstManifestURL, secondManifestURL string) (string, string
glog.Fatalf("Failed to connect to etcd") glog.Fatalf("Failed to connect to etcd")
} }
cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.Version()}) cl := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.GroupAndVersion()})
// TODO: caesarxuchao: hacky way to specify version of Experimental client. // TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config // We will fix this by supporting multiple group versions in Config
cl.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Experimental.Version()}) cl.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Experimental.GroupAndVersion()})
storageVersions := make(map[string]string) storageVersions := make(map[string]string)
etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.Version(), etcdtest.PathPrefix()) etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.GroupAndVersion(), etcdtest.PathPrefix())
storageVersions[""] = testapi.Default.Version() storageVersions[""] = testapi.Default.GroupAndVersion()
if err != nil { if err != nil {
glog.Fatalf("Unable to get etcd storage: %v", err) glog.Fatalf("Unable to get etcd storage: %v", err)
} }
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("experimental").InterfacesFor, testapi.Experimental.Version(), etcdtest.PathPrefix()) expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("experimental").InterfacesFor, testapi.Experimental.GroupAndVersion(), etcdtest.PathPrefix())
storageVersions["experimental"] = testapi.Experimental.Version() storageVersions["experimental"] = testapi.Experimental.GroupAndVersion()
if err != nil { if err != nil {
glog.Fatalf("Unable to get etcd storage for experimental: %v", err) glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
} }
@ -973,10 +974,10 @@ func main() {
// Wait for the synchronization threads to come up. // Wait for the synchronization threads to come up.
time.Sleep(time.Second * 10) time.Sleep(time.Second * 10)
kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Default.Version()}) kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, Version: testapi.Default.GroupAndVersion()})
// TODO: caesarxuchao: hacky way to specify version of Experimental client. // TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config // We will fix this by supporting multiple group versions in Config
kubeClient.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServerURL, Version: testapi.Experimental.Version()}) kubeClient.ExperimentalClient = client.NewExperimentalOrDie(&client.Config{Host: apiServerURL, Version: testapi.Experimental.GroupAndVersion()})
// Run tests in parallel // Run tests in parallel
testFuncs := []testFunc{ testFuncs := []testFunc{

View File

@ -329,9 +329,9 @@ func (s *APIServer) Run(_ []string) error {
disableV1 := disableAllAPIs disableV1 := disableAllAPIs
disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1) disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1)
// "experimental/v1={true|false} allows users to enable/disable the experimental API. // "experimental/v1alpha1={true|false} allows users to enable/disable the experimental API.
// This takes preference over api/all, if specified. // This takes preference over api/all, if specified.
enableExp := s.getRuntimeConfigValue("experimental/v1", false) enableExp := s.getRuntimeConfigValue("experimental/v1alpha1", false)
clientConfig := &client.Config{ clientConfig := &client.Config{
Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)), Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
@ -363,7 +363,7 @@ func (s *APIServer) Run(_ []string) error {
glog.Fatalf("experimental API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err) glog.Fatalf("experimental API is enabled in runtime config, but not enabled in the environment variable KUBE_API_VERSIONS. Error: %v", err)
} }
if s.ExpStorageVersion == "" { if s.ExpStorageVersion == "" {
s.ExpStorageVersion = g.Version s.ExpStorageVersion = g.GroupVersion
} }
expEtcdStorage, err = newEtcd(s.EtcdConfigFile, s.EtcdServerList, g.InterfacesFor, s.ExpStorageVersion, s.EtcdPathPrefix) expEtcdStorage, err = newEtcd(s.EtcdConfigFile, s.EtcdServerList, g.InterfacesFor, s.ExpStorageVersion, s.EtcdPathPrefix)
if err != nil { if err != nil {

View File

@ -114,7 +114,7 @@ For example, if a user creates:
```yaml ```yaml
metadata: metadata:
name: cron-tab.example.com name: cron-tab.example.com
apiVersion: experimental/v1 apiVersion: experimental/v1alpha1
kind: ThirdPartyResource kind: ThirdPartyResource
description: "A specification of a Pod to run on a cron style schedule" description: "A specification of a Pod to run on a cron style schedule"
versions: versions:

View File

@ -1,4 +1,4 @@
apiVersion: v1 apiVersion: experimental/v1alpha1
kind: Deployment kind: Deployment
metadata: metadata:
name: nginx-deployment name: nginx-deployment

View File

@ -43,10 +43,10 @@ EOF
} }
# TODO(lavalamp): get this list by listing the pkg/apis/ directory? # TODO(lavalamp): get this list by listing the pkg/apis/ directory?
DEFAULT_GROUP_VERSIONS="api/v1 experimental/v1" DEFAULT_GROUP_VERSIONS="v1 experimental/v1alpha1"
VERSIONS=${VERSIONS:-$DEFAULT_GROUP_VERSIONS} VERSIONS=${VERSIONS:-$DEFAULT_GROUP_VERSIONS}
for ver in $VERSIONS; do for ver in $VERSIONS; do
# Ensure that the version being processed is registered by setting # Ensure that the version being processed is registered by setting
# KUBE_API_VERSIONS. # KUBE_API_VERSIONS.
KUBE_API_VERSIONS="${ver##*/}" generate_version "${ver}" KUBE_API_VERSIONS="${ver}" generate_version "${ver}"
done done

View File

@ -49,11 +49,14 @@ function generate_deep_copies() {
for ver in ${group_versions}; do for ver in ${group_versions}; do
# Ensure that the version being processed is registered by setting # Ensure that the version being processed is registered by setting
# KUBE_API_VERSIONS. # KUBE_API_VERSIONS.
apiVersions="${ver##*/}" if [ -z ${ver##*/} ]; then
apiVersions=""
fi
KUBE_API_VERSIONS="${apiVersions}" generate_version "${ver}" KUBE_API_VERSIONS="${apiVersions}" generate_version "${ver}"
done done
} }
DEFAULT_VERSIONS="api/ api/v1 experimental/ experimental/v1" # v1 is in the group ""
DEFAULT_VERSIONS="/ v1 experimental/ experimental/v1alpha1"
VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS} VERSIONS=${VERSIONS:-$DEFAULT_VERSIONS}
generate_deep_copies "$VERSIONS" generate_deep_copies "$VERSIONS"

View File

@ -50,7 +50,7 @@ kube::etcd::start
# Start kube-apiserver # Start kube-apiserver
kube::log::status "Starting kube-apiserver" kube::log::status "Starting kube-apiserver"
KUBE_API_VERSIONS="v1,experimental/v1" "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \ KUBE_API_VERSIONS="v1,experimental/v1alpha1" "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
--address="127.0.0.1" \ --address="127.0.0.1" \
--public-address-override="127.0.0.1" \ --public-address-override="127.0.0.1" \
--port="${API_PORT}" \ --port="${API_PORT}" \

View File

@ -242,10 +242,11 @@ kube::util::analytics-link() {
# Takes a group/version and returns the path to its location on disk, sans # Takes a group/version and returns the path to its location on disk, sans
# "pkg". E.g.: # "pkg". E.g.:
# * default behavior: experimental/v1 -> apis/experimental/v1 # * default behavior: experimental/v1alpha1 -> apis/experimental/v1alpha1
# * legacy behavior: api/v1 -> api/v1 # * legacy behavior: api/v1 -> api/v1
# * Special handling for only a group: experimental -> apis/experimental # * Special handling for only a group: experimental -> apis/experimental
# * Special handling for only "api" group: api -> api # * Special handling for only "api" group: api -> api
# * Special handling for when both group and version are "": / -> api
# * Very special handling for "v1": v1 -> api/v1 # * Very special handling for "v1": v1 -> api/v1
kube::util::group-version-to-pkg-path() { kube::util::group-version-to-pkg-path() {
local group_version="$1" local group_version="$1"
@ -253,6 +254,10 @@ kube::util::group-version-to-pkg-path() {
# TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api, # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
# moving the results to pkg/apis/api. # moving the results to pkg/apis/api.
case "${group_version}" in case "${group_version}" in
# both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
/)
echo "api"
;;
v1) v1)
echo "api/v1" echo "api/v1"
;; ;;

View File

@ -127,7 +127,7 @@ kube::util::wait_for_url "http://127.0.0.1:${KUBELET_HEALTHZ_PORT}/healthz" "kub
# Start kube-apiserver # Start kube-apiserver
kube::log::status "Starting kube-apiserver" kube::log::status "Starting kube-apiserver"
KUBE_API_VERSIONS="v1,experimental/v1" "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \ KUBE_API_VERSIONS="v1,experimental/v1alpha1" "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \
--address="127.0.0.1" \ --address="127.0.0.1" \
--public-address-override="127.0.0.1" \ --public-address-override="127.0.0.1" \
--port="${API_PORT}" \ --port="${API_PORT}" \
@ -829,7 +829,7 @@ kube_api_versions=(
v1 v1
) )
for version in "${kube_api_versions[@]}"; do for version in "${kube_api_versions[@]}"; do
KUBE_API_VERSIONS="v1,experimental/v1" runTests "${version}" KUBE_API_VERSIONS="v1,experimental/v1alpha1" runTests "${version}"
done done
kube::log::status "TEST PASSED" kube::log::status "TEST PASSED"

View File

@ -58,7 +58,7 @@ KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-}
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" # "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
# TODO: It's going to be: # TODO: It's going to be:
# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"} # KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"}
KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1"} KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"}
# once we have multiple group supports # once we have multiple group supports
# Run tests with the standard (registry) and a custom etcd prefix # Run tests with the standard (registry) and a custom etcd prefix
# (kubernetes.io/registry). # (kubernetes.io/registry).
@ -291,7 +291,7 @@ for (( i=0, j=0; ; )); do
# KUBE_TEST_API sets the version of each group to be tested. KUBE_API_VERSIONS # KUBE_TEST_API sets the version of each group to be tested. KUBE_API_VERSIONS
# register the groups/versions as supported by k8s. So KUBE_API_VERSIONS # register the groups/versions as supported by k8s. So KUBE_API_VERSIONS
# needs to be the superset of KUBE_TEST_API. # needs to be the superset of KUBE_TEST_API.
KUBE_TEST_API="${apiVersion}" KUBE_API_VERSIONS="v1,experimental/v1" ETCD_PREFIX=${etcdPrefix} runTests "$@" KUBE_TEST_API="${apiVersion}" KUBE_API_VERSIONS="v1,experimental/v1alpha1" ETCD_PREFIX=${etcdPrefix} runTests "$@"
i=${i}+1 i=${i}+1
j=${j}+1 j=${j}+1
if [[ i -eq ${apiVersionsCount} ]] && [[ j -eq ${etcdPrefixesCount} ]]; then if [[ i -eq ${apiVersionsCount} ]] && [[ j -eq ${etcdPrefixesCount} ]]; then

View File

@ -29,7 +29,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
# "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3" # "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
# TODO: It's going to be: # TODO: It's going to be:
# KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"} # KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"}
KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1"} KUBE_TEST_API_VERSIONS=${KUBE_TEST_API_VERSIONS:-"v1,experimental/v1alpha1"}
KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"} KUBE_INTEGRATION_TEST_MAX_CONCURRENCY=${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY:-"-1"}
LOG_LEVEL=${LOG_LEVEL:-2} LOG_LEVEL=${LOG_LEVEL:-2}
@ -49,18 +49,18 @@ runTests() {
KUBE_GOFLAGS="-tags 'integration no-docker' " \ KUBE_GOFLAGS="-tags 'integration no-docker' " \
KUBE_RACE="" \ KUBE_RACE="" \
KUBE_TEST_API_VERSIONS="$1" \ KUBE_TEST_API_VERSIONS="$1" \
KUBE_API_VERSIONS="v1,experimental/v1" \ KUBE_API_VERSIONS="v1,experimental/v1alpha1" \
"${KUBE_ROOT}/hack/test-go.sh" test/integration "${KUBE_ROOT}/hack/test-go.sh" test/integration
kube::log::status "Running integration test scenario" kube::log::status "Running integration test scenario"
KUBE_API_VERSIONS="v1,experimental/v1" KUBE_TEST_API_VERSIONS="$1" "${KUBE_OUTPUT_HOSTBIN}/integration" --v=${LOG_LEVEL} \ KUBE_API_VERSIONS="v1,experimental/v1alpha1" KUBE_TEST_API_VERSIONS="$1" "${KUBE_OUTPUT_HOSTBIN}/integration" --v=${LOG_LEVEL} \
--max-concurrency="${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY}" --max-concurrency="${KUBE_INTEGRATION_TEST_MAX_CONCURRENCY}"
cleanup cleanup
} }
KUBE_API_VERSIONS="v1,experimental/v1" "${KUBE_ROOT}/hack/build-go.sh" "$@" cmd/integration KUBE_API_VERSIONS="v1,experimental/v1alpha1" "${KUBE_ROOT}/hack/build-go.sh" "$@" cmd/integration
# Run cleanup to stop etcd on interrupt or other kill signal. # Run cleanup to stop etcd on interrupt or other kill signal.
trap cleanup EXIT trap cleanup EXIT

View File

@ -56,7 +56,7 @@ EOF
mv "$TMPFILE" "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go" mv "$TMPFILE" "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go"
} }
GROUP_VERSIONS=(api/unversioned api/v1 experimental/v1) GROUP_VERSIONS=(api/unversioned api/v1 experimental/v1alpha1)
# To avoid compile errors, remove the currently existing files. # To avoid compile errors, remove the currently existing files.
for group_version in "${GROUP_VERSIONS[@]}"; do for group_version in "${GROUP_VERSIONS[@]}"; do
rm -f "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go" rm -f "pkg/$(kube::util::group-version-to-pkg-path "${group_version}")/types_swagger_doc_generated.go"

View File

@ -58,10 +58,13 @@ func init() {
Codec: runtime.CodecFor(api.Scheme, groupVersion), Codec: runtime.CodecFor(api.Scheme, groupVersion),
} }
var versions []string var versions []string
var groupVersions []string
for i := len(registeredGroupVersions) - 1; i >= 0; i-- { for i := len(registeredGroupVersions) - 1; i >= 0; i-- {
versions = append(versions, apiutil.GetVersion(registeredGroupVersions[i])) versions = append(versions, apiutil.GetVersion(registeredGroupVersions[i]))
groupVersions = append(groupVersions, registeredGroupVersions[i])
} }
groupMeta.Versions = versions groupMeta.Versions = versions
groupMeta.GroupVersions = groupVersions
groupMeta.SelfLinker = runtime.SelfLinker(accessor) groupMeta.SelfLinker = runtime.SelfLinker(accessor)
@ -89,7 +92,7 @@ func init() {
"ThirdPartyResourceData", "ThirdPartyResourceData",
"ThirdPartyResourceList") "ThirdPartyResourceList")
mapper := api.NewDefaultRESTMapper("api", versions, interfacesFor, importPrefix, ignoredKinds, rootScoped) mapper := api.NewDefaultRESTMapper("", versions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
// setup aliases for groups of resources // setup aliases for groups of resources
mapper.AddResourceAlias("all", userResources...) mapper.AddResourceAlias("all", userResources...)
groupMeta.RESTMapper = mapper groupMeta.RESTMapper = mapper

View File

@ -96,6 +96,10 @@ type GroupMeta struct {
// items when presented with a set of versions to choose. // items when presented with a set of versions to choose.
Versions []string Versions []string
// GroupVersions is Group + Versions. This is to avoid string concatenation
// in many places.
GroupVersions []string
// Codec is the default codec for serializing output that should use // Codec is the default codec for serializing output that should use
// the latest supported version. Use this Codec when writing to // the latest supported version. Use this Codec when writing to
// disk, a data store that is not dynamically versioned, or in tests. // disk, a data store that is not dynamically versioned, or in tests.

View File

@ -32,12 +32,12 @@ var RegisteredVersions []string
func init() { func init() {
// TODO: caesarxuchao: rename this variable to validGroupVersions // TODO: caesarxuchao: rename this variable to validGroupVersions
validAPIVersions := map[string]bool{ validAPIVersions := map[string]bool{
"v1": true, "v1": true,
"experimental/v1": true, "experimental/v1alpha1": true,
} }
// The default list of supported api versions, in order of most preferred to the least. // The default list of supported api versions, in order of most preferred to the least.
defaultSupportedVersions := "v1,experimental/v1" defaultSupportedVersions := "v1,experimental/v1alpha1"
// Env var KUBE_API_VERSIONS is a comma separated list of API versions that should be registered in the scheme. // Env var KUBE_API_VERSIONS is a comma separated list of API versions that should be registered in the scheme.
// The versions should be in the order of most preferred to the least. // The versions should be in the order of most preferred to the least.
supportedVersions := os.Getenv("KUBE_API_VERSIONS") supportedVersions := os.Getenv("KUBE_API_VERSIONS")

View File

@ -33,7 +33,7 @@ import (
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
_ "k8s.io/kubernetes/pkg/apis/experimental" _ "k8s.io/kubernetes/pkg/apis/experimental"
_ "k8s.io/kubernetes/pkg/apis/experimental/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
flag "github.com/spf13/pflag" flag "github.com/spf13/pflag"
) )
@ -90,10 +90,16 @@ func roundTripSame(t *testing.T, item runtime.Object, except ...string) {
set := sets.NewString(except...) set := sets.NewString(except...)
seed := rand.Int63() seed := rand.Int63()
fuzzInternalObject(t, "", item, seed) fuzzInternalObject(t, "", item, seed)
codec, err := testapi.GetCodecForObject(item)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
version := testapi.Default.Version() version := testapi.Default.Version()
if !set.Has(version) { if !set.Has(version) {
fuzzInternalObject(t, version, item, seed) fuzzInternalObject(t, version, item, seed)
roundTrip(t, testapi.Default.Codec(), item) roundTrip(t, codec, item)
} }
} }

View File

@ -64,10 +64,10 @@ func init() {
if _, ok := Groups[""]; !ok { if _, ok := Groups[""]; !ok {
// TODO: The second latest.GroupOrDie("").Version will be latest.GroupVersion after we // TODO: The second latest.GroupOrDie("").Version will be latest.GroupVersion after we
// have multiple group support // have multiple group support
Groups[""] = TestGroup{"", latest.GroupOrDie("").Version, latest.GroupOrDie("").Version} Groups[""] = TestGroup{"", latest.GroupOrDie("").Version, latest.GroupOrDie("").GroupVersion}
} }
if _, ok := Groups["experimental"]; !ok { if _, ok := Groups["experimental"]; !ok {
Groups["experimental"] = TestGroup{"experimental", latest.GroupOrDie("experimental").Version, latest.GroupOrDie("experimental").Version} Groups["experimental"] = TestGroup{"experimental", latest.GroupOrDie("experimental").Version, latest.GroupOrDie("experimental").GroupVersion}
} }
Default = Groups[""] Default = Groups[""]
@ -91,14 +91,14 @@ func (g TestGroup) GroupAndVersion() string {
func (g TestGroup) Codec() runtime.Codec { func (g TestGroup) Codec() runtime.Codec {
// TODO: caesarxuchao: Restructure the body once we have a central `latest`. // TODO: caesarxuchao: Restructure the body once we have a central `latest`.
if g.Group == "" { if g.Group == "" {
interfaces, err := latest.GroupOrDie("").InterfacesFor(g.VersionUnderTest) interfaces, err := latest.GroupOrDie("").InterfacesFor(g.GroupVersionUnderTest)
if err != nil { if err != nil {
panic(err) panic(err)
} }
return interfaces.Codec return interfaces.Codec
} }
if g.Group == "experimental" { if g.Group == "experimental" {
interfaces, err := latest.GroupOrDie("experimental").InterfacesFor(g.VersionUnderTest) interfaces, err := latest.GroupOrDie("experimental").InterfacesFor(g.GroupVersionUnderTest)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -23,6 +23,7 @@ import (
api "k8s.io/kubernetes/pkg/api" api "k8s.io/kubernetes/pkg/api"
resource "k8s.io/kubernetes/pkg/api/resource" resource "k8s.io/kubernetes/pkg/api/resource"
experimental "k8s.io/kubernetes/pkg/apis/experimental"
conversion "k8s.io/kubernetes/pkg/conversion" conversion "k8s.io/kubernetes/pkg/conversion"
) )
@ -2461,6 +2462,15 @@ func convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu
return nil return nil
} }
func convert_v1_APIVersion_To_experimental_APIVersion(in *APIVersion, out *experimental.APIVersion, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*APIVersion))(in)
}
out.Name = in.Name
out.APIGroup = in.APIGroup
return nil
}
func convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { func convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*AWSElasticBlockStoreVolumeSource))(in) defaulting.(func(*AWSElasticBlockStoreVolumeSource))(in)
@ -4748,6 +4758,69 @@ func convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *TCPSocketAction, out
return nil return nil
} }
func convert_v1_ThirdPartyResource_To_experimental_ThirdPartyResource(in *ThirdPartyResource, out *experimental.ThirdPartyResource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ThirdPartyResource))(in)
}
if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
return err
}
if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
}
out.Description = in.Description
if in.Versions != nil {
out.Versions = make([]experimental.APIVersion, len(in.Versions))
for i := range in.Versions {
if err := convert_v1_APIVersion_To_experimental_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil {
return err
}
}
} else {
out.Versions = nil
}
return nil
}
func convert_v1_ThirdPartyResourceData_To_experimental_ThirdPartyResourceData(in *ThirdPartyResourceData, out *experimental.ThirdPartyResourceData, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ThirdPartyResourceData))(in)
}
if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
return err
}
if err := convert_v1_ObjectMeta_To_api_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
}
if err := s.Convert(&in.Data, &out.Data, 0); err != nil {
return err
}
return nil
}
func convert_v1_ThirdPartyResourceList_To_experimental_ThirdPartyResourceList(in *ThirdPartyResourceList, out *experimental.ThirdPartyResourceList, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*ThirdPartyResourceList))(in)
}
if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
return err
}
if err := s.Convert(&in.ListMeta, &out.ListMeta, 0); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]experimental.ThirdPartyResource, len(in.Items))
for i := range in.Items {
if err := convert_v1_ThirdPartyResource_To_experimental_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error { func convert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*Volume))(in) defaulting.(func(*Volume))(in)
@ -4896,6 +4969,78 @@ func convert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.Volu
return nil return nil
} }
func convert_experimental_APIVersion_To_v1_APIVersion(in *experimental.APIVersion, out *APIVersion, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*experimental.APIVersion))(in)
}
out.Name = in.Name
out.APIGroup = in.APIGroup
return nil
}
func convert_experimental_ThirdPartyResource_To_v1_ThirdPartyResource(in *experimental.ThirdPartyResource, out *ThirdPartyResource, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*experimental.ThirdPartyResource))(in)
}
if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
return err
}
if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
}
out.Description = in.Description
if in.Versions != nil {
out.Versions = make([]APIVersion, len(in.Versions))
for i := range in.Versions {
if err := convert_experimental_APIVersion_To_v1_APIVersion(&in.Versions[i], &out.Versions[i], s); err != nil {
return err
}
}
} else {
out.Versions = nil
}
return nil
}
func convert_experimental_ThirdPartyResourceData_To_v1_ThirdPartyResourceData(in *experimental.ThirdPartyResourceData, out *ThirdPartyResourceData, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*experimental.ThirdPartyResourceData))(in)
}
if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
return err
}
if err := convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
}
if err := s.Convert(&in.Data, &out.Data, 0); err != nil {
return err
}
return nil
}
func convert_experimental_ThirdPartyResourceList_To_v1_ThirdPartyResourceList(in *experimental.ThirdPartyResourceList, out *ThirdPartyResourceList, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*experimental.ThirdPartyResourceList))(in)
}
if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
return err
}
if err := s.Convert(&in.ListMeta, &out.ListMeta, 0); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]ThirdPartyResource, len(in.Items))
for i := range in.Items {
if err := convert_experimental_ThirdPartyResource_To_v1_ThirdPartyResource(&in.Items[i], &out.Items[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func init() { func init() {
err := api.Scheme.AddGeneratedConversionFuncs( err := api.Scheme.AddGeneratedConversionFuncs(
convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource,
@ -5013,6 +5158,11 @@ func init() {
convert_api_VolumeMount_To_v1_VolumeMount, convert_api_VolumeMount_To_v1_VolumeMount,
convert_api_VolumeSource_To_v1_VolumeSource, convert_api_VolumeSource_To_v1_VolumeSource,
convert_api_Volume_To_v1_Volume, convert_api_Volume_To_v1_Volume,
convert_experimental_APIVersion_To_v1_APIVersion,
convert_experimental_ThirdPartyResourceData_To_v1_ThirdPartyResourceData,
convert_experimental_ThirdPartyResourceList_To_v1_ThirdPartyResourceList,
convert_experimental_ThirdPartyResource_To_v1_ThirdPartyResource,
convert_v1_APIVersion_To_experimental_APIVersion,
convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource,
convert_v1_Binding_To_api_Binding, convert_v1_Binding_To_api_Binding,
convert_v1_Capabilities_To_api_Capabilities, convert_v1_Capabilities_To_api_Capabilities,
@ -5125,6 +5275,9 @@ func init() {
convert_v1_ServiceStatus_To_api_ServiceStatus, convert_v1_ServiceStatus_To_api_ServiceStatus,
convert_v1_Service_To_api_Service, convert_v1_Service_To_api_Service,
convert_v1_TCPSocketAction_To_api_TCPSocketAction, convert_v1_TCPSocketAction_To_api_TCPSocketAction,
convert_v1_ThirdPartyResourceData_To_experimental_ThirdPartyResourceData,
convert_v1_ThirdPartyResourceList_To_experimental_ThirdPartyResourceList,
convert_v1_ThirdPartyResource_To_experimental_ThirdPartyResource,
convert_v1_VolumeMount_To_api_VolumeMount, convert_v1_VolumeMount_To_api_VolumeMount,
convert_v1_VolumeSource_To_api_VolumeSource, convert_v1_VolumeSource_To_api_VolumeSource,
convert_v1_Volume_To_api_Volume, convert_v1_Volume_To_api_Volume,

View File

@ -67,6 +67,12 @@ func deepCopy_unversioned_TypeMeta(in unversioned.TypeMeta, out *unversioned.Typ
return nil return nil
} }
func deepCopy_v1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
out.Name = in.Name
out.APIGroup = in.APIGroup
return nil
}
func deepCopy_v1_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { func deepCopy_v1_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error {
out.VolumeID = in.VolumeID out.VolumeID = in.VolumeID
out.FSType = in.FSType out.FSType = in.FSType
@ -2117,6 +2123,65 @@ func deepCopy_v1_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *co
return nil return nil
} }
func deepCopy_v1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
out.Description = in.Description
if in.Versions != nil {
out.Versions = make([]APIVersion, len(in.Versions))
for i := range in.Versions {
if err := deepCopy_v1_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil {
return err
}
}
} else {
out.Versions = nil
}
return nil
}
func deepCopy_v1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err
}
if in.Data != nil {
out.Data = make([]uint8, len(in.Data))
for i := range in.Data {
out.Data[i] = in.Data[i]
}
} else {
out.Data = nil
}
return nil
}
func deepCopy_v1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := deepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
if in.Items != nil {
out.Items = make([]ThirdPartyResource, len(in.Items))
for i := range in.Items {
if err := deepCopy_v1_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
func deepCopy_v1_Volume(in Volume, out *Volume, c *conversion.Cloner) error { func deepCopy_v1_Volume(in Volume, out *Volume, c *conversion.Cloner) error {
out.Name = in.Name out.Name = in.Name
if err := deepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil { if err := deepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil {
@ -2281,6 +2346,7 @@ func init() {
deepCopy_unversioned_ListMeta, deepCopy_unversioned_ListMeta,
deepCopy_unversioned_Time, deepCopy_unversioned_Time,
deepCopy_unversioned_TypeMeta, deepCopy_unversioned_TypeMeta,
deepCopy_v1_APIVersion,
deepCopy_v1_AWSElasticBlockStoreVolumeSource, deepCopy_v1_AWSElasticBlockStoreVolumeSource,
deepCopy_v1_Binding, deepCopy_v1_Binding,
deepCopy_v1_Capabilities, deepCopy_v1_Capabilities,
@ -2395,6 +2461,9 @@ func init() {
deepCopy_v1_ServiceSpec, deepCopy_v1_ServiceSpec,
deepCopy_v1_ServiceStatus, deepCopy_v1_ServiceStatus,
deepCopy_v1_TCPSocketAction, deepCopy_v1_TCPSocketAction,
deepCopy_v1_ThirdPartyResource,
deepCopy_v1_ThirdPartyResourceData,
deepCopy_v1_ThirdPartyResourceList,
deepCopy_v1_Volume, deepCopy_v1_Volume,
deepCopy_v1_VolumeMount, deepCopy_v1_VolumeMount,
deepCopy_v1_VolumeSource, deepCopy_v1_VolumeSource,

View File

@ -20,10 +20,11 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"reflect" "reflect"
"strings" "regexp"
"github.com/emicklei/go-restful/swagger" "github.com/emicklei/go-restful/swagger"
"github.com/golang/glog" "github.com/golang/glog"
apiutil "k8s.io/kubernetes/pkg/api/util"
"k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/errors"
errs "k8s.io/kubernetes/pkg/util/fielderrors" errs "k8s.io/kubernetes/pkg/util/fielderrors"
"k8s.io/kubernetes/pkg/util/yaml" "k8s.io/kubernetes/pkg/util/yaml"
@ -79,22 +80,23 @@ func (s *SwaggerSchema) ValidateBytes(data []byte) error {
if !ok { if !ok {
return fmt.Errorf("error in unmarshaling data %s", string(data)) return fmt.Errorf("error in unmarshaling data %s", string(data))
} }
apiVersion := fields["apiVersion"] groupVersion := fields["apiVersion"]
if apiVersion == nil { if groupVersion == nil {
return fmt.Errorf("apiVersion not set") return fmt.Errorf("apiVersion not set")
} }
kind := fields["kind"] kind := fields["kind"]
if kind == nil { if kind == nil {
return fmt.Errorf("kind not set") return fmt.Errorf("kind not set")
} }
allErrs := s.ValidateObject(obj, apiVersion.(string), "", apiVersion.(string)+"."+kind.(string)) version := apiutil.GetVersion(groupVersion.(string))
allErrs := s.ValidateObject(obj, "", version+"."+kind.(string))
if len(allErrs) == 1 { if len(allErrs) == 1 {
return allErrs[0] return allErrs[0]
} }
return errors.NewAggregate(allErrs) return errors.NewAggregate(allErrs)
} }
func (s *SwaggerSchema) ValidateObject(obj interface{}, apiVersion, fieldName, typeName string) errs.ValidationErrorList { func (s *SwaggerSchema) ValidateObject(obj interface{}, fieldName, typeName string) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{} allErrs := errs.ValidationErrorList{}
models := s.api.Models models := s.api.Models
// TODO: handle required fields here too. // TODO: handle required fields here too.
@ -139,7 +141,7 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, apiVersion, fieldName, t
glog.V(2).Infof("Skipping nil field: %s", key) glog.V(2).Infof("Skipping nil field: %s", key)
continue continue
} }
errs := s.validateField(value, apiVersion, fieldName+key, fieldType, &details) errs := s.validateField(value, fieldName+key, fieldType, &details)
if len(errs) > 0 { if len(errs) > 0 {
allErrs = append(allErrs, errs...) allErrs = append(allErrs, errs...)
} }
@ -147,9 +149,22 @@ func (s *SwaggerSchema) ValidateObject(obj interface{}, apiVersion, fieldName, t
return allErrs return allErrs
} }
func (s *SwaggerSchema) validateField(value interface{}, apiVersion, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) errs.ValidationErrorList { // This matches type name in the swagger spec, such as "v1.Binding".
if strings.HasPrefix(fieldType, apiVersion) { var versionRegexp = regexp.MustCompile(`^v.+\..*`)
return s.ValidateObject(value, apiVersion, fieldName, fieldType)
func (s *SwaggerSchema) validateField(value interface{}, fieldName, fieldType string, fieldDetails *swagger.ModelProperty) errs.ValidationErrorList {
// TODO: caesarxuchao: because we have multiple group/versions and objects
// may reference objects in other group, the commented out way of checking
// if a filedType is a type defined by us is outdated. We use a hacky way
// for now.
// TODO: the type name in the swagger spec is something like "v1.Binding",
// and the "v1" is generated from the package name, not the groupVersion of
// the type. We need to fix go-restful to embed the group name in the type
// name, otherwise we couldn't handle identically named types in different
// groups correctly.
if versionRegexp.MatchString(fieldType) {
// if strings.HasPrefix(fieldType, apiVersion) {
return s.ValidateObject(value, fieldName, fieldType)
} }
allErrs := errs.ValidationErrorList{} allErrs := errs.ValidationErrorList{}
switch fieldType { switch fieldType {
@ -176,7 +191,7 @@ func (s *SwaggerSchema) validateField(value interface{}, apiVersion, fieldName,
arrType = *fieldDetails.Items.Type arrType = *fieldDetails.Items.Type
} }
for ix := range arr { for ix := range arr {
errs := s.validateField(arr[ix], apiVersion, fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil) errs := s.validateField(arr[ix], fmt.Sprintf("%s[%d]", fieldName, ix), arrType, nil)
if len(errs) > 0 { if len(errs) > 0 {
allErrs = append(allErrs, errs...) allErrs = append(allErrs, errs...)
} }

View File

@ -125,3 +125,31 @@ func TestValid(t *testing.T) {
} }
} }
} }
func TestVersionRegex(t *testing.T) {
testCases := []struct {
typeName string
match bool
}{
{
typeName: "v1.Binding",
match: true,
},
{
typeName: "v1alpha1.Binding",
match: true,
},
{
typeName: "Binding",
match: false,
},
}
for _, test := range testCases {
if versionRegexp.MatchString(test.typeName) && !test.match {
t.Errorf("unexpected error: expect %s not to match the regular expression", test.typeName)
}
if !versionRegexp.MatchString(test.typeName) && test.match {
t.Errorf("unexpected error: expect %s to match the regular expression", test.typeName)
}
}
}

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/registered"
apiutil "k8s.io/kubernetes/pkg/api/util" apiutil "k8s.io/kubernetes/pkg/api/util"
_ "k8s.io/kubernetes/pkg/apis/experimental" _ "k8s.io/kubernetes/pkg/apis/experimental"
"k8s.io/kubernetes/pkg/apis/experimental/v1" "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
@ -51,14 +51,16 @@ func init() {
GroupVersion: groupVersion, GroupVersion: groupVersion,
Group: apiutil.GetGroup(groupVersion), Group: apiutil.GetGroup(groupVersion),
Version: apiutil.GetVersion(groupVersion), Version: apiutil.GetVersion(groupVersion),
// TODO: caesarxuchao: change it to groupVersion when we support multiple groups Codec: runtime.CodecFor(api.Scheme, groupVersion),
Codec: runtime.CodecFor(api.Scheme, apiutil.GetVersion(groupVersion)),
} }
var versions []string var versions []string
var groupVersions []string
for i := len(registeredGroupVersions) - 1; i >= 0; i-- { for i := len(registeredGroupVersions) - 1; i >= 0; i-- {
versions = append(versions, apiutil.GetVersion(registeredGroupVersions[i])) versions = append(versions, apiutil.GetVersion(registeredGroupVersions[i]))
groupVersions = append(groupVersions, registeredGroupVersions[i])
} }
groupMeta.Versions = versions groupMeta.Versions = versions
groupMeta.GroupVersions = groupVersions
groupMeta.SelfLinker = runtime.SelfLinker(accessor) groupMeta.SelfLinker = runtime.SelfLinker(accessor)
@ -68,7 +70,7 @@ func init() {
ignoredKinds := sets.NewString() ignoredKinds := sets.NewString()
groupMeta.RESTMapper = api.NewDefaultRESTMapper("experimental", versions, interfacesFor, importPrefix, ignoredKinds, rootScoped) groupMeta.RESTMapper = api.NewDefaultRESTMapper("experimental", groupVersions, interfacesFor, importPrefix, ignoredKinds, rootScoped)
api.RegisterRESTMapper(groupMeta.RESTMapper) api.RegisterRESTMapper(groupMeta.RESTMapper)
groupMeta.InterfacesFor = interfacesFor groupMeta.InterfacesFor = interfacesFor
} }
@ -77,9 +79,9 @@ func init() {
// string, or an error if the version is not known. // string, or an error if the version is not known.
func interfacesFor(version string) (*meta.VersionInterfaces, error) { func interfacesFor(version string) (*meta.VersionInterfaces, error) {
switch version { switch version {
case "v1": case "experimental/v1alpha1":
return &meta.VersionInterfaces{ return &meta.VersionInterfaces{
Codec: v1.Codec, Codec: v1alpha1.Codec,
ObjectConvertor: api.Scheme, ObjectConvertor: api.Scheme,
MetadataAccessor: accessor, MetadataAccessor: accessor,
}, nil }, nil

View File

@ -58,7 +58,7 @@ func TestCodec(t *testing.T) {
if err := json.Unmarshal(data, &other); err != nil { if err := json.Unmarshal(data, &other); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
if other.APIVersion != latest.GroupOrDie("experimental").Version || other.Kind != "DaemonSet" { if other.APIVersion != latest.GroupOrDie("experimental").GroupVersion || other.Kind != "DaemonSet" {
t.Errorf("unexpected unmarshalled object %#v", other) t.Errorf("unexpected unmarshalled object %#v", other)
} }
} }
@ -67,24 +67,24 @@ func TestInterfacesFor(t *testing.T) {
if _, err := latest.GroupOrDie("experimental").InterfacesFor(""); err == nil { if _, err := latest.GroupOrDie("experimental").InterfacesFor(""); err == nil {
t.Fatalf("unexpected non-error: %v", err) t.Fatalf("unexpected non-error: %v", err)
} }
for i, version := range append([]string{latest.GroupOrDie("experimental").Version}, latest.GroupOrDie("experimental").Versions...) { for i, groupVersion := range append([]string{latest.GroupOrDie("experimental").GroupVersion}, latest.GroupOrDie("experimental").GroupVersions...) {
if vi, err := latest.GroupOrDie("experimental").InterfacesFor(version); err != nil || vi == nil { if vi, err := latest.GroupOrDie("experimental").InterfacesFor(groupVersion); err != nil || vi == nil {
t.Fatalf("%d: unexpected result: %v", i, err) t.Fatalf("%d: unexpected result: %v", i, err)
} }
} }
} }
func TestRESTMapper(t *testing.T) { func TestRESTMapper(t *testing.T) {
if v, k, err := latest.GroupOrDie("experimental").RESTMapper.VersionAndKindForResource("horizontalpodautoscalers"); err != nil || v != "v1" || k != "HorizontalPodAutoscaler" { if v, k, err := latest.GroupOrDie("experimental").RESTMapper.VersionAndKindForResource("horizontalpodautoscalers"); err != nil || v != "experimental/v1alpha1" || k != "HorizontalPodAutoscaler" {
t.Errorf("unexpected version mapping: %s %s %v", v, k, err) t.Errorf("unexpected version mapping: %s %s %v", v, k, err)
} }
if m, err := latest.GroupOrDie("experimental").RESTMapper.RESTMapping("DaemonSet", ""); err != nil || m.APIVersion != "v1" || m.Resource != "daemonsets" { if m, err := latest.GroupOrDie("experimental").RESTMapper.RESTMapping("DaemonSet", ""); err != nil || m.APIVersion != "experimental/v1alpha1" || m.Resource != "daemonsets" {
t.Errorf("unexpected version mapping: %#v %v", m, err) t.Errorf("unexpected version mapping: %#v %v", m, err)
} }
for _, version := range latest.GroupOrDie("experimental").Versions { for _, groupVersion := range latest.GroupOrDie("experimental").GroupVersions {
mapping, err := latest.GroupOrDie("experimental").RESTMapper.RESTMapping("HorizontalPodAutoscaler", version) mapping, err := latest.GroupOrDie("experimental").RESTMapper.RESTMapping("HorizontalPodAutoscaler", groupVersion)
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -92,11 +92,11 @@ func TestRESTMapper(t *testing.T) {
if mapping.Resource != "horizontalpodautoscalers" { if mapping.Resource != "horizontalpodautoscalers" {
t.Errorf("incorrect resource name: %#v", mapping) t.Errorf("incorrect resource name: %#v", mapping)
} }
if mapping.APIVersion != version { if mapping.APIVersion != groupVersion {
t.Errorf("incorrect version: %v", mapping) t.Errorf("incorrect groupVersion: %v", mapping)
} }
interfaces, _ := latest.GroupOrDie("experimental").InterfacesFor(version) interfaces, _ := latest.GroupOrDie("experimental").InterfacesFor(groupVersion)
if mapping.Codec != interfaces.Codec { if mapping.Codec != interfaces.Codec {
t.Errorf("unexpected codec: %#v, expected: %#v", mapping, interfaces) t.Errorf("unexpected codec: %#v, expected: %#v", mapping, interfaces)
} }

View File

@ -15,7 +15,7 @@ limitations under the License.
*/ */
/* /*
This file (together with pkg/apis/experimental/v1/types.go) contain the experimental This file (together with pkg/apis/experimental/v1alpha1/types.go) contain the experimental
types in kubernetes. These API objects are experimental, meaning that the types in kubernetes. These API objects are experimental, meaning that the
APIs may be broken at any time by the kubernetes team. APIs may be broken at any time by the kubernetes team.

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package v1 package v1alpha1
import ( import (
"reflect" "reflect"
@ -31,12 +31,12 @@ func addConversionFuncs() {
err := api.Scheme.AddConversionFuncs( err := api.Scheme.AddConversionFuncs(
convert_api_PodSpec_To_v1_PodSpec, convert_api_PodSpec_To_v1_PodSpec,
convert_v1_PodSpec_To_api_PodSpec, convert_v1_PodSpec_To_api_PodSpec,
convert_experimental_DeploymentSpec_To_v1_DeploymentSpec, convert_experimental_DeploymentSpec_To_v1alpha1_DeploymentSpec,
convert_v1_DeploymentSpec_To_experimental_DeploymentSpec, convert_v1alpha1_DeploymentSpec_To_experimental_DeploymentSpec,
convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy, convert_experimental_DeploymentStrategy_To_v1alpha1_DeploymentStrategy,
convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy, convert_v1alpha1_DeploymentStrategy_To_experimental_DeploymentStrategy,
convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment, convert_experimental_RollingUpdateDeployment_To_v1alpha1_RollingUpdateDeployment,
convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment, convert_v1alpha1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment,
) )
if err != nil { if err != nil {
// If one of the conversion functions is malformed, detect it immediately. // If one of the conversion functions is malformed, detect it immediately.
@ -182,7 +182,7 @@ func convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve
return nil return nil
} }
func convert_experimental_DeploymentSpec_To_v1_DeploymentSpec(in *experimental.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error { func convert_experimental_DeploymentSpec_To_v1alpha1_DeploymentSpec(in *experimental.DeploymentSpec, out *DeploymentSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*experimental.DeploymentSpec))(in) defaulting.(func(*experimental.DeploymentSpec))(in)
} }
@ -204,7 +204,7 @@ func convert_experimental_DeploymentSpec_To_v1_DeploymentSpec(in *experimental.D
} else { } else {
out.Template = nil out.Template = nil
} }
if err := convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { if err := convert_experimental_DeploymentStrategy_To_v1alpha1_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err return err
} }
out.UniqueLabelKey = new(string) out.UniqueLabelKey = new(string)
@ -212,7 +212,7 @@ func convert_experimental_DeploymentSpec_To_v1_DeploymentSpec(in *experimental.D
return nil return nil
} }
func convert_v1_DeploymentSpec_To_experimental_DeploymentSpec(in *DeploymentSpec, out *experimental.DeploymentSpec, s conversion.Scope) error { func convert_v1alpha1_DeploymentSpec_To_experimental_DeploymentSpec(in *DeploymentSpec, out *experimental.DeploymentSpec, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*DeploymentSpec))(in) defaulting.(func(*DeploymentSpec))(in)
} }
@ -235,7 +235,7 @@ func convert_v1_DeploymentSpec_To_experimental_DeploymentSpec(in *DeploymentSpec
} else { } else {
out.Template = nil out.Template = nil
} }
if err := convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil { if err := convert_v1alpha1_DeploymentStrategy_To_experimental_DeploymentStrategy(&in.Strategy, &out.Strategy, s); err != nil {
return err return err
} }
if in.UniqueLabelKey != nil { if in.UniqueLabelKey != nil {
@ -244,14 +244,14 @@ func convert_v1_DeploymentSpec_To_experimental_DeploymentSpec(in *DeploymentSpec
return nil return nil
} }
func convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(in *experimental.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error { func convert_experimental_DeploymentStrategy_To_v1alpha1_DeploymentStrategy(in *experimental.DeploymentStrategy, out *DeploymentStrategy, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*experimental.DeploymentStrategy))(in) defaulting.(func(*experimental.DeploymentStrategy))(in)
} }
out.Type = DeploymentStrategyType(in.Type) out.Type = DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil { if in.RollingUpdate != nil {
out.RollingUpdate = new(RollingUpdateDeployment) out.RollingUpdate = new(RollingUpdateDeployment)
if err := convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { if err := convert_experimental_RollingUpdateDeployment_To_v1alpha1_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err return err
} }
} else { } else {
@ -260,14 +260,14 @@ func convert_experimental_DeploymentStrategy_To_v1_DeploymentStrategy(in *experi
return nil return nil
} }
func convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(in *DeploymentStrategy, out *experimental.DeploymentStrategy, s conversion.Scope) error { func convert_v1alpha1_DeploymentStrategy_To_experimental_DeploymentStrategy(in *DeploymentStrategy, out *experimental.DeploymentStrategy, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*DeploymentStrategy))(in) defaulting.(func(*DeploymentStrategy))(in)
} }
out.Type = experimental.DeploymentStrategyType(in.Type) out.Type = experimental.DeploymentStrategyType(in.Type)
if in.RollingUpdate != nil { if in.RollingUpdate != nil {
out.RollingUpdate = new(experimental.RollingUpdateDeployment) out.RollingUpdate = new(experimental.RollingUpdateDeployment)
if err := convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil { if err := convert_v1alpha1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in.RollingUpdate, out.RollingUpdate, s); err != nil {
return err return err
} }
} else { } else {
@ -276,7 +276,7 @@ func convert_v1_DeploymentStrategy_To_experimental_DeploymentStrategy(in *Deploy
return nil return nil
} }
func convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(in *experimental.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error { func convert_experimental_RollingUpdateDeployment_To_v1alpha1_RollingUpdateDeployment(in *experimental.RollingUpdateDeployment, out *RollingUpdateDeployment, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*experimental.RollingUpdateDeployment))(in) defaulting.(func(*experimental.RollingUpdateDeployment))(in)
} }
@ -296,7 +296,7 @@ func convert_experimental_RollingUpdateDeployment_To_v1_RollingUpdateDeployment(
return nil return nil
} }
func convert_v1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in *RollingUpdateDeployment, out *experimental.RollingUpdateDeployment, s conversion.Scope) error { func convert_v1alpha1_RollingUpdateDeployment_To_experimental_RollingUpdateDeployment(in *RollingUpdateDeployment, out *experimental.RollingUpdateDeployment, s conversion.Scope) error {
if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found {
defaulting.(func(*RollingUpdateDeployment))(in) defaulting.(func(*RollingUpdateDeployment))(in)
} }

View File

@ -16,7 +16,7 @@ limitations under the License.
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh. // DO NOT EDIT. THIS FILE IS AUTO-GENERATED BY $KUBEROOT/hack/update-generated-deep-copies.sh.
package v1 package v1alpha1
import ( import (
time "time" time "time"
@ -819,29 +819,29 @@ func deepCopy_v1_VolumeSource(in v1.VolumeSource, out *v1.VolumeSource, c *conve
return nil return nil
} }
func deepCopy_v1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error { func deepCopy_v1alpha1_APIVersion(in APIVersion, out *APIVersion, c *conversion.Cloner) error {
out.Name = in.Name out.Name = in.Name
out.APIGroup = in.APIGroup out.APIGroup = in.APIGroup
return nil return nil
} }
func deepCopy_v1_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error { func deepCopy_v1alpha1_DaemonSet(in DaemonSet, out *DaemonSet, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_v1alpha1_DaemonSetSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_v1_DaemonSetStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_v1alpha1_DaemonSetStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error { func deepCopy_v1alpha1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -851,7 +851,7 @@ func deepCopy_v1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversi
if in.Items != nil { if in.Items != nil {
out.Items = make([]DaemonSet, len(in.Items)) out.Items = make([]DaemonSet, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_v1_DaemonSet(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_v1alpha1_DaemonSet(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@ -861,7 +861,7 @@ func deepCopy_v1_DaemonSetList(in DaemonSetList, out *DaemonSetList, c *conversi
return nil return nil
} }
func deepCopy_v1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error { func deepCopy_v1alpha1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversion.Cloner) error {
if in.Selector != nil { if in.Selector != nil {
out.Selector = make(map[string]string) out.Selector = make(map[string]string)
for key, val := range in.Selector { for key, val := range in.Selector {
@ -881,30 +881,30 @@ func deepCopy_v1_DaemonSetSpec(in DaemonSetSpec, out *DaemonSetSpec, c *conversi
return nil return nil
} }
func deepCopy_v1_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error { func deepCopy_v1alpha1_DaemonSetStatus(in DaemonSetStatus, out *DaemonSetStatus, c *conversion.Cloner) error {
out.CurrentNumberScheduled = in.CurrentNumberScheduled out.CurrentNumberScheduled = in.CurrentNumberScheduled
out.NumberMisscheduled = in.NumberMisscheduled out.NumberMisscheduled = in.NumberMisscheduled
out.DesiredNumberScheduled = in.DesiredNumberScheduled out.DesiredNumberScheduled = in.DesiredNumberScheduled
return nil return nil
} }
func deepCopy_v1_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error { func deepCopy_v1alpha1_Deployment(in Deployment, out *Deployment, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_DeploymentSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_v1alpha1_DeploymentSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_v1_DeploymentStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_v1alpha1_DeploymentStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error { func deepCopy_v1alpha1_DeploymentList(in DeploymentList, out *DeploymentList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -914,7 +914,7 @@ func deepCopy_v1_DeploymentList(in DeploymentList, out *DeploymentList, c *conve
if in.Items != nil { if in.Items != nil {
out.Items = make([]Deployment, len(in.Items)) out.Items = make([]Deployment, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_v1_Deployment(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_v1alpha1_Deployment(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@ -924,7 +924,7 @@ func deepCopy_v1_DeploymentList(in DeploymentList, out *DeploymentList, c *conve
return nil return nil
} }
func deepCopy_v1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error { func deepCopy_v1alpha1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conversion.Cloner) error {
if in.Replicas != nil { if in.Replicas != nil {
out.Replicas = new(int) out.Replicas = new(int)
*out.Replicas = *in.Replicas *out.Replicas = *in.Replicas
@ -947,7 +947,7 @@ func deepCopy_v1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conve
} else { } else {
out.Template = nil out.Template = nil
} }
if err := deepCopy_v1_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil { if err := deepCopy_v1alpha1_DeploymentStrategy(in.Strategy, &out.Strategy, c); err != nil {
return err return err
} }
if in.UniqueLabelKey != nil { if in.UniqueLabelKey != nil {
@ -959,17 +959,17 @@ func deepCopy_v1_DeploymentSpec(in DeploymentSpec, out *DeploymentSpec, c *conve
return nil return nil
} }
func deepCopy_v1_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error { func deepCopy_v1alpha1_DeploymentStatus(in DeploymentStatus, out *DeploymentStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas out.Replicas = in.Replicas
out.UpdatedReplicas = in.UpdatedReplicas out.UpdatedReplicas = in.UpdatedReplicas
return nil return nil
} }
func deepCopy_v1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error { func deepCopy_v1alpha1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrategy, c *conversion.Cloner) error {
out.Type = in.Type out.Type = in.Type
if in.RollingUpdate != nil { if in.RollingUpdate != nil {
out.RollingUpdate = new(RollingUpdateDeployment) out.RollingUpdate = new(RollingUpdateDeployment)
if err := deepCopy_v1_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil { if err := deepCopy_v1alpha1_RollingUpdateDeployment(*in.RollingUpdate, out.RollingUpdate, c); err != nil {
return err return err
} }
} else { } else {
@ -978,19 +978,19 @@ func deepCopy_v1_DeploymentStrategy(in DeploymentStrategy, out *DeploymentStrate
return nil return nil
} }
func deepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error { func deepCopy_v1alpha1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *HorizontalPodAutoscaler, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_v1alpha1_HorizontalPodAutoscalerSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if in.Status != nil { if in.Status != nil {
out.Status = new(HorizontalPodAutoscalerStatus) out.Status = new(HorizontalPodAutoscalerStatus)
if err := deepCopy_v1_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil { if err := deepCopy_v1alpha1_HorizontalPodAutoscalerStatus(*in.Status, out.Status, c); err != nil {
return err return err
} }
} else { } else {
@ -999,7 +999,7 @@ func deepCopy_v1_HorizontalPodAutoscaler(in HorizontalPodAutoscaler, out *Horizo
return nil return nil
} }
func deepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error { func deepCopy_v1alpha1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out *HorizontalPodAutoscalerList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -1009,7 +1009,7 @@ func deepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out
if in.Items != nil { if in.Items != nil {
out.Items = make([]HorizontalPodAutoscaler, len(in.Items)) out.Items = make([]HorizontalPodAutoscaler, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_v1_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_v1alpha1_HorizontalPodAutoscaler(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@ -1019,10 +1019,10 @@ func deepCopy_v1_HorizontalPodAutoscalerList(in HorizontalPodAutoscalerList, out
return nil return nil
} }
func deepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error { func deepCopy_v1alpha1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out *HorizontalPodAutoscalerSpec, c *conversion.Cloner) error {
if in.ScaleRef != nil { if in.ScaleRef != nil {
out.ScaleRef = new(SubresourceReference) out.ScaleRef = new(SubresourceReference)
if err := deepCopy_v1_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil { if err := deepCopy_v1alpha1_SubresourceReference(*in.ScaleRef, out.ScaleRef, c); err != nil {
return err return err
} }
} else { } else {
@ -1030,18 +1030,18 @@ func deepCopy_v1_HorizontalPodAutoscalerSpec(in HorizontalPodAutoscalerSpec, out
} }
out.MinReplicas = in.MinReplicas out.MinReplicas = in.MinReplicas
out.MaxReplicas = in.MaxReplicas out.MaxReplicas = in.MaxReplicas
if err := deepCopy_v1_ResourceConsumption(in.Target, &out.Target, c); err != nil { if err := deepCopy_v1alpha1_ResourceConsumption(in.Target, &out.Target, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error { func deepCopy_v1alpha1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus, out *HorizontalPodAutoscalerStatus, c *conversion.Cloner) error {
out.CurrentReplicas = in.CurrentReplicas out.CurrentReplicas = in.CurrentReplicas
out.DesiredReplicas = in.DesiredReplicas out.DesiredReplicas = in.DesiredReplicas
if in.CurrentConsumption != nil { if in.CurrentConsumption != nil {
out.CurrentConsumption = new(ResourceConsumption) out.CurrentConsumption = new(ResourceConsumption)
if err := deepCopy_v1_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil { if err := deepCopy_v1alpha1_ResourceConsumption(*in.CurrentConsumption, out.CurrentConsumption, c); err != nil {
return err return err
} }
} else { } else {
@ -1058,23 +1058,23 @@ func deepCopy_v1_HorizontalPodAutoscalerStatus(in HorizontalPodAutoscalerStatus,
return nil return nil
} }
func deepCopy_v1_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error { func deepCopy_v1alpha1_Ingress(in Ingress, out *Ingress, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_IngressSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_v1alpha1_IngressSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_v1_IngressStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_v1alpha1_IngressStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error { func deepCopy_v1alpha1_IngressBackend(in IngressBackend, out *IngressBackend, c *conversion.Cloner) error {
if err := deepCopy_v1_LocalObjectReference(in.ServiceRef, &out.ServiceRef, c); err != nil { if err := deepCopy_v1_LocalObjectReference(in.ServiceRef, &out.ServiceRef, c); err != nil {
return err return err
} }
@ -1085,7 +1085,7 @@ func deepCopy_v1_IngressBackend(in IngressBackend, out *IngressBackend, c *conve
return nil return nil
} }
func deepCopy_v1_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error { func deepCopy_v1alpha1_IngressList(in IngressList, out *IngressList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -1095,7 +1095,7 @@ func deepCopy_v1_IngressList(in IngressList, out *IngressList, c *conversion.Clo
if in.Items != nil { if in.Items != nil {
out.Items = make([]Ingress, len(in.Items)) out.Items = make([]Ingress, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_v1_Ingress(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_v1alpha1_Ingress(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@ -1105,20 +1105,20 @@ func deepCopy_v1_IngressList(in IngressList, out *IngressList, c *conversion.Clo
return nil return nil
} }
func deepCopy_v1_IngressPath(in IngressPath, out *IngressPath, c *conversion.Cloner) error { func deepCopy_v1alpha1_IngressPath(in IngressPath, out *IngressPath, c *conversion.Cloner) error {
out.Path = in.Path out.Path = in.Path
if err := deepCopy_v1_IngressBackend(in.Backend, &out.Backend, c); err != nil { if err := deepCopy_v1alpha1_IngressBackend(in.Backend, &out.Backend, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error { func deepCopy_v1alpha1_IngressRule(in IngressRule, out *IngressRule, c *conversion.Cloner) error {
out.Host = in.Host out.Host = in.Host
if in.Paths != nil { if in.Paths != nil {
out.Paths = make([]IngressPath, len(in.Paths)) out.Paths = make([]IngressPath, len(in.Paths))
for i := range in.Paths { for i := range in.Paths {
if err := deepCopy_v1_IngressPath(in.Paths[i], &out.Paths[i], c); err != nil { if err := deepCopy_v1alpha1_IngressPath(in.Paths[i], &out.Paths[i], c); err != nil {
return err return err
} }
} }
@ -1128,11 +1128,11 @@ func deepCopy_v1_IngressRule(in IngressRule, out *IngressRule, c *conversion.Clo
return nil return nil
} }
func deepCopy_v1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error { func deepCopy_v1alpha1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Cloner) error {
if in.Rules != nil { if in.Rules != nil {
out.Rules = make([]IngressRule, len(in.Rules)) out.Rules = make([]IngressRule, len(in.Rules))
for i := range in.Rules { for i := range in.Rules {
if err := deepCopy_v1_IngressRule(in.Rules[i], &out.Rules[i], c); err != nil { if err := deepCopy_v1alpha1_IngressRule(in.Rules[i], &out.Rules[i], c); err != nil {
return err return err
} }
} }
@ -1142,30 +1142,30 @@ func deepCopy_v1_IngressSpec(in IngressSpec, out *IngressSpec, c *conversion.Clo
return nil return nil
} }
func deepCopy_v1_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error { func deepCopy_v1alpha1_IngressStatus(in IngressStatus, out *IngressStatus, c *conversion.Cloner) error {
if err := deepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil { if err := deepCopy_v1_LoadBalancerStatus(in.LoadBalancer, &out.LoadBalancer, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_Job(in Job, out *Job, c *conversion.Cloner) error { func deepCopy_v1alpha1_Job(in Job, out *Job, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_JobSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_v1alpha1_JobSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_v1_JobStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_v1alpha1_JobStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error { func deepCopy_v1alpha1_JobCondition(in JobCondition, out *JobCondition, c *conversion.Cloner) error {
out.Type = in.Type out.Type = in.Type
out.Status = in.Status out.Status = in.Status
if err := deepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil { if err := deepCopy_unversioned_Time(in.LastProbeTime, &out.LastProbeTime, c); err != nil {
@ -1179,7 +1179,7 @@ func deepCopy_v1_JobCondition(in JobCondition, out *JobCondition, c *conversion.
return nil return nil
} }
func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error { func deepCopy_v1alpha1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -1189,7 +1189,7 @@ func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
if in.Items != nil { if in.Items != nil {
out.Items = make([]Job, len(in.Items)) out.Items = make([]Job, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_v1_Job(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_v1alpha1_Job(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@ -1199,7 +1199,7 @@ func deepCopy_v1_JobList(in JobList, out *JobList, c *conversion.Cloner) error {
return nil return nil
} }
func deepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error { func deepCopy_v1alpha1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
if in.Parallelism != nil { if in.Parallelism != nil {
out.Parallelism = new(int) out.Parallelism = new(int)
*out.Parallelism = *in.Parallelism *out.Parallelism = *in.Parallelism
@ -1231,11 +1231,11 @@ func deepCopy_v1_JobSpec(in JobSpec, out *JobSpec, c *conversion.Cloner) error {
return nil return nil
} }
func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error { func deepCopy_v1alpha1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) error {
if in.Conditions != nil { if in.Conditions != nil {
out.Conditions = make([]JobCondition, len(in.Conditions)) out.Conditions = make([]JobCondition, len(in.Conditions))
for i := range in.Conditions { for i := range in.Conditions {
if err := deepCopy_v1_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil { if err := deepCopy_v1alpha1_JobCondition(in.Conditions[i], &out.Conditions[i], c); err != nil {
return err return err
} }
} }
@ -1264,14 +1264,14 @@ func deepCopy_v1_JobStatus(in JobStatus, out *JobStatus, c *conversion.Cloner) e
return nil return nil
} }
func deepCopy_v1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error { func deepCopy_v1alpha1_ReplicationControllerDummy(in ReplicationControllerDummy, out *ReplicationControllerDummy, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error { func deepCopy_v1alpha1_ResourceConsumption(in ResourceConsumption, out *ResourceConsumption, c *conversion.Cloner) error {
out.Resource = in.Resource out.Resource = in.Resource
if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil { if err := deepCopy_resource_Quantity(in.Quantity, &out.Quantity, c); err != nil {
return err return err
@ -1279,7 +1279,7 @@ func deepCopy_v1_ResourceConsumption(in ResourceConsumption, out *ResourceConsum
return nil return nil
} }
func deepCopy_v1_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error { func deepCopy_v1alpha1_RollingUpdateDeployment(in RollingUpdateDeployment, out *RollingUpdateDeployment, c *conversion.Cloner) error {
if in.MaxUnavailable != nil { if in.MaxUnavailable != nil {
out.MaxUnavailable = new(util.IntOrString) out.MaxUnavailable = new(util.IntOrString)
if err := deepCopy_util_IntOrString(*in.MaxUnavailable, out.MaxUnavailable, c); err != nil { if err := deepCopy_util_IntOrString(*in.MaxUnavailable, out.MaxUnavailable, c); err != nil {
@ -1300,28 +1300,28 @@ func deepCopy_v1_RollingUpdateDeployment(in RollingUpdateDeployment, out *Rollin
return nil return nil
} }
func deepCopy_v1_Scale(in Scale, out *Scale, c *conversion.Cloner) error { func deepCopy_v1alpha1_Scale(in Scale, out *Scale, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil { if err := deepCopy_v1_ObjectMeta(in.ObjectMeta, &out.ObjectMeta, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ScaleSpec(in.Spec, &out.Spec, c); err != nil { if err := deepCopy_v1alpha1_ScaleSpec(in.Spec, &out.Spec, c); err != nil {
return err return err
} }
if err := deepCopy_v1_ScaleStatus(in.Status, &out.Status, c); err != nil { if err := deepCopy_v1alpha1_ScaleStatus(in.Status, &out.Status, c); err != nil {
return err return err
} }
return nil return nil
} }
func deepCopy_v1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error { func deepCopy_v1alpha1_ScaleSpec(in ScaleSpec, out *ScaleSpec, c *conversion.Cloner) error {
out.Replicas = in.Replicas out.Replicas = in.Replicas
return nil return nil
} }
func deepCopy_v1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error { func deepCopy_v1alpha1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Cloner) error {
out.Replicas = in.Replicas out.Replicas = in.Replicas
if in.Selector != nil { if in.Selector != nil {
out.Selector = make(map[string]string) out.Selector = make(map[string]string)
@ -1334,7 +1334,7 @@ func deepCopy_v1_ScaleStatus(in ScaleStatus, out *ScaleStatus, c *conversion.Clo
return nil return nil
} }
func deepCopy_v1_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error { func deepCopy_v1alpha1_SubresourceReference(in SubresourceReference, out *SubresourceReference, c *conversion.Cloner) error {
out.Kind = in.Kind out.Kind = in.Kind
out.Namespace = in.Namespace out.Namespace = in.Namespace
out.Name = in.Name out.Name = in.Name
@ -1343,7 +1343,7 @@ func deepCopy_v1_SubresourceReference(in SubresourceReference, out *SubresourceR
return nil return nil
} }
func deepCopy_v1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error { func deepCopy_v1alpha1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResource, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -1354,7 +1354,7 @@ func deepCopy_v1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResour
if in.Versions != nil { if in.Versions != nil {
out.Versions = make([]APIVersion, len(in.Versions)) out.Versions = make([]APIVersion, len(in.Versions))
for i := range in.Versions { for i := range in.Versions {
if err := deepCopy_v1_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil { if err := deepCopy_v1alpha1_APIVersion(in.Versions[i], &out.Versions[i], c); err != nil {
return err return err
} }
} }
@ -1364,7 +1364,7 @@ func deepCopy_v1_ThirdPartyResource(in ThirdPartyResource, out *ThirdPartyResour
return nil return nil
} }
func deepCopy_v1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error { func deepCopy_v1alpha1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPartyResourceData, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -1382,7 +1382,7 @@ func deepCopy_v1_ThirdPartyResourceData(in ThirdPartyResourceData, out *ThirdPar
return nil return nil
} }
func deepCopy_v1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error { func deepCopy_v1alpha1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *ThirdPartyResourceDataList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -1392,7 +1392,7 @@ func deepCopy_v1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *
if in.Items != nil { if in.Items != nil {
out.Items = make([]ThirdPartyResourceData, len(in.Items)) out.Items = make([]ThirdPartyResourceData, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_v1_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_v1alpha1_ThirdPartyResourceData(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@ -1402,7 +1402,7 @@ func deepCopy_v1_ThirdPartyResourceDataList(in ThirdPartyResourceDataList, out *
return nil return nil
} }
func deepCopy_v1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error { func deepCopy_v1alpha1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPartyResourceList, c *conversion.Cloner) error {
if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil { if err := deepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err return err
} }
@ -1412,7 +1412,7 @@ func deepCopy_v1_ThirdPartyResourceList(in ThirdPartyResourceList, out *ThirdPar
if in.Items != nil { if in.Items != nil {
out.Items = make([]ThirdPartyResource, len(in.Items)) out.Items = make([]ThirdPartyResource, len(in.Items))
for i := range in.Items { for i := range in.Items {
if err := deepCopy_v1_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil { if err := deepCopy_v1alpha1_ThirdPartyResource(in.Items[i], &out.Items[i], c); err != nil {
return err return err
} }
} }
@ -1475,43 +1475,43 @@ func init() {
deepCopy_v1_Volume, deepCopy_v1_Volume,
deepCopy_v1_VolumeMount, deepCopy_v1_VolumeMount,
deepCopy_v1_VolumeSource, deepCopy_v1_VolumeSource,
deepCopy_v1_APIVersion, deepCopy_v1alpha1_APIVersion,
deepCopy_v1_DaemonSet, deepCopy_v1alpha1_DaemonSet,
deepCopy_v1_DaemonSetList, deepCopy_v1alpha1_DaemonSetList,
deepCopy_v1_DaemonSetSpec, deepCopy_v1alpha1_DaemonSetSpec,
deepCopy_v1_DaemonSetStatus, deepCopy_v1alpha1_DaemonSetStatus,
deepCopy_v1_Deployment, deepCopy_v1alpha1_Deployment,
deepCopy_v1_DeploymentList, deepCopy_v1alpha1_DeploymentList,
deepCopy_v1_DeploymentSpec, deepCopy_v1alpha1_DeploymentSpec,
deepCopy_v1_DeploymentStatus, deepCopy_v1alpha1_DeploymentStatus,
deepCopy_v1_DeploymentStrategy, deepCopy_v1alpha1_DeploymentStrategy,
deepCopy_v1_HorizontalPodAutoscaler, deepCopy_v1alpha1_HorizontalPodAutoscaler,
deepCopy_v1_HorizontalPodAutoscalerList, deepCopy_v1alpha1_HorizontalPodAutoscalerList,
deepCopy_v1_HorizontalPodAutoscalerSpec, deepCopy_v1alpha1_HorizontalPodAutoscalerSpec,
deepCopy_v1_HorizontalPodAutoscalerStatus, deepCopy_v1alpha1_HorizontalPodAutoscalerStatus,
deepCopy_v1_Ingress, deepCopy_v1alpha1_Ingress,
deepCopy_v1_IngressBackend, deepCopy_v1alpha1_IngressBackend,
deepCopy_v1_IngressList, deepCopy_v1alpha1_IngressList,
deepCopy_v1_IngressPath, deepCopy_v1alpha1_IngressPath,
deepCopy_v1_IngressRule, deepCopy_v1alpha1_IngressRule,
deepCopy_v1_IngressSpec, deepCopy_v1alpha1_IngressSpec,
deepCopy_v1_IngressStatus, deepCopy_v1alpha1_IngressStatus,
deepCopy_v1_Job, deepCopy_v1alpha1_Job,
deepCopy_v1_JobCondition, deepCopy_v1alpha1_JobCondition,
deepCopy_v1_JobList, deepCopy_v1alpha1_JobList,
deepCopy_v1_JobSpec, deepCopy_v1alpha1_JobSpec,
deepCopy_v1_JobStatus, deepCopy_v1alpha1_JobStatus,
deepCopy_v1_ReplicationControllerDummy, deepCopy_v1alpha1_ReplicationControllerDummy,
deepCopy_v1_ResourceConsumption, deepCopy_v1alpha1_ResourceConsumption,
deepCopy_v1_RollingUpdateDeployment, deepCopy_v1alpha1_RollingUpdateDeployment,
deepCopy_v1_Scale, deepCopy_v1alpha1_Scale,
deepCopy_v1_ScaleSpec, deepCopy_v1alpha1_ScaleSpec,
deepCopy_v1_ScaleStatus, deepCopy_v1alpha1_ScaleStatus,
deepCopy_v1_SubresourceReference, deepCopy_v1alpha1_SubresourceReference,
deepCopy_v1_ThirdPartyResource, deepCopy_v1alpha1_ThirdPartyResource,
deepCopy_v1_ThirdPartyResourceData, deepCopy_v1alpha1_ThirdPartyResourceData,
deepCopy_v1_ThirdPartyResourceDataList, deepCopy_v1alpha1_ThirdPartyResourceDataList,
deepCopy_v1_ThirdPartyResourceList, deepCopy_v1alpha1_ThirdPartyResourceList,
deepCopy_util_IntOrString, deepCopy_util_IntOrString,
) )
if err != nil { if err != nil {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package v1 package v1alpha1
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package v1 package v1alpha1
import ( import (
"reflect" "reflect"
@ -258,7 +258,7 @@ func TestSetDefaultJob(t *testing.T) {
} }
func roundTrip(t *testing.T, obj runtime.Object) runtime.Object { func roundTrip(t *testing.T, obj runtime.Object) runtime.Object {
data, err := v1.Codec.Encode(obj) data, err := Codec.Encode(obj)
if err != nil { if err != nil {
t.Errorf("%v\n %#v", err, obj) t.Errorf("%v\n %#v", err, obj)
return nil return nil

View File

@ -14,14 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package v1 package v1alpha1
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
) )
var Codec = runtime.CodecFor(api.Scheme, "v1") var Codec = runtime.CodecFor(api.Scheme, "experimental/v1alpha1")
func init() { func init() {
addKnownTypes() addKnownTypes()
@ -31,7 +31,7 @@ func init() {
// Adds the list of known types to api.Scheme. // Adds the list of known types to api.Scheme.
func addKnownTypes() { func addKnownTypes() {
api.Scheme.AddKnownTypes("v1", api.Scheme.AddKnownTypes("experimental/v1alpha1",
&Deployment{}, &Deployment{},
&DeploymentList{}, &DeploymentList{},
&HorizontalPodAutoscaler{}, &HorizontalPodAutoscaler{},

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package v1 package v1alpha1
import ( import (
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package v1 package v1alpha1
// This file contains a collection of methods that can be used from go-resful to // This file contains a collection of methods that can be used from go-resful to
// generate Swagger API documentation for its models. Please read this PR for more // generate Swagger API documentation for its models. Please read this PR for more

View File

@ -129,13 +129,14 @@ func setExperimentalDefaults(config *Config) error {
if err != nil { if err != nil {
return err return err
} }
config.Prefix = "apis/" + g.Group config.Prefix = "apis/"
if config.UserAgent == "" { if config.UserAgent == "" {
config.UserAgent = DefaultKubernetesUserAgent() config.UserAgent = DefaultKubernetesUserAgent()
} }
if config.Version == "" { // TODO: Unconditionally set the config.Version, until we fix the config.
config.Version = g.Version //if config.Version == "" {
} config.Version = g.GroupVersion
//}
versionInterfaces, err := g.InterfacesFor(config.Version) versionInterfaces, err := g.InterfacesFor(config.Version)
if err != nil { if err != nil {

View File

@ -21,7 +21,6 @@ import (
"sync" "sync"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/registered"
client "k8s.io/kubernetes/pkg/client/unversioned" client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -39,7 +38,7 @@ func NewSimpleFake(objects ...runtime.Object) *Fake {
} }
fakeClient := &Fake{} fakeClient := &Fake{}
fakeClient.AddReactor("*", "*", ObjectReaction(o, latest.GroupOrDie("").RESTMapper)) fakeClient.AddReactor("*", "*", ObjectReaction(o, api.RESTMapper))
return fakeClient return fakeClient
} }

View File

@ -158,7 +158,7 @@ func addPods(podStore cache.Store, nodeName string, label map[string]string, num
} }
func newTestController() (*DaemonSetsController, *FakePodControl) { func newTestController() (*DaemonSetsController, *FakePodControl) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()})
manager := NewDaemonSetsController(client) manager := NewDaemonSetsController(client)
podControl := &FakePodControl{} podControl := &FakePodControl{}
manager.podControl = podControl manager.podControl = podControl

View File

@ -205,7 +205,7 @@ func TestControllerSyncJob(t *testing.T) {
for name, tc := range testCases { for name, tc := range testCases {
// job manager setup // job manager setup
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()})
manager := NewJobController(client) manager := NewJobController(client)
fakePodControl := FakePodControl{err: tc.podControllerError} fakePodControl := FakePodControl{err: tc.podControllerError}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -269,7 +269,7 @@ func TestControllerSyncJob(t *testing.T) {
} }
func TestSyncJobDeleted(t *testing.T) { func TestSyncJobDeleted(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()})
manager := NewJobController(client) manager := NewJobController(client)
fakePodControl := FakePodControl{} fakePodControl := FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -289,7 +289,7 @@ func TestSyncJobDeleted(t *testing.T) {
} }
func TestSyncJobUpdateRequeue(t *testing.T) { func TestSyncJobUpdateRequeue(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()})
manager := NewJobController(client) manager := NewJobController(client)
fakePodControl := FakePodControl{} fakePodControl := FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@ -319,7 +319,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
} }
func TestJobPodLookup(t *testing.T) { func TestJobPodLookup(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()})
manager := NewJobController(client) manager := NewJobController(client)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
testCases := []struct { testCases := []struct {
@ -399,7 +399,7 @@ func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool {
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods // TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations. // and checking expectations.
func TestSyncJobExpectations(t *testing.T) { func TestSyncJobExpectations(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()}) client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()})
manager := NewJobController(client) manager := NewJobController(client)
fakePodControl := FakePodControl{} fakePodControl := FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl

View File

@ -31,7 +31,6 @@ import (
"github.com/spf13/pflag" "github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/latest"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/registered" "k8s.io/kubernetes/pkg/api/registered"
"k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/api/validation"
@ -140,7 +139,7 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
return nil, err return nil, err
} }
switch group { switch group {
case "api": case "":
return client.RESTClient, nil return client.RESTClient, nil
case "experimental": case "experimental":
return client.ExperimentalClient.RESTClient, nil return client.ExperimentalClient.RESTClient, nil
@ -309,9 +308,9 @@ type schemaClient interface {
Get() *client.Request Get() *client.Request
} }
func getSchemaAndValidate(c schemaClient, data []byte, group, version, cacheDir string) (err error) { func getSchemaAndValidate(c schemaClient, data []byte, prefix, groupVersion, cacheDir string) (err error) {
var schemaData []byte var schemaData []byte
cacheFile := path.Join(cacheDir, group, version, schemaFileName) cacheFile := path.Join(cacheDir, prefix, groupVersion, schemaFileName)
if len(cacheDir) != 0 { if len(cacheDir) != 0 {
if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) { if schemaData, err = ioutil.ReadFile(cacheFile); err != nil && !os.IsNotExist(err) {
@ -320,14 +319,14 @@ func getSchemaAndValidate(c schemaClient, data []byte, group, version, cacheDir
} }
if schemaData == nil { if schemaData == nil {
schemaData, err = c.Get(). schemaData, err = c.Get().
AbsPath("/swaggerapi", group, version). AbsPath("/swaggerapi", prefix, groupVersion).
Do(). Do().
Raw() Raw()
if err != nil { if err != nil {
return err return err
} }
if len(cacheDir) != 0 { if len(cacheDir) != 0 {
if err = os.MkdirAll(path.Join(cacheDir, group, version), 0755); err != nil { if err = os.MkdirAll(path.Join(cacheDir, prefix, groupVersion), 0755); err != nil {
return err return err
} }
tmpFile, err := ioutil.TempFile(cacheDir, "schema") tmpFile, err := ioutil.TempFile(cacheDir, "schema")
@ -363,14 +362,10 @@ func (c *clientSwaggerSchema) ValidateBytes(data []byte) error {
return fmt.Errorf("could not find api group for %s: %v", kind, err) return fmt.Errorf("could not find api group for %s: %v", kind, err)
} }
if group == "experimental" { if group == "experimental" {
g, err := latest.Group(group)
if err != nil {
return err
}
if c.c.ExperimentalClient == nil { if c.c.ExperimentalClient == nil {
return errors.New("unable to validate: no experimental client") return errors.New("unable to validate: no experimental client")
} }
return getSchemaAndValidate(c.c.ExperimentalClient.RESTClient, data, "apis/"+g.Group, version, c.cacheDir) return getSchemaAndValidate(c.c.ExperimentalClient.RESTClient, data, "apis/", version, c.cacheDir)
} }
return getSchemaAndValidate(c.c.RESTClient, data, "api", version, c.cacheDir) return getSchemaAndValidate(c.c.RESTClient, data, "api", version, c.cacheDir)
} }

View File

@ -109,7 +109,7 @@ func DescriberFor(group string, kind string, c *client.Client) (Describer, bool)
var ok bool var ok bool
switch group { switch group {
case "api": case "":
f, ok = describerMap(c)[kind] f, ok = describerMap(c)[kind]
case "experimental": case "experimental":
f, ok = expDescriberMap(c)[kind] f, ok = expDescriberMap(c)[kind]

View File

@ -985,18 +985,21 @@ func (m *Master) experimental(c *Config) *apiserver.APIGroupVersion {
strings.ToLower("jobs/status"): jobStatusStorage, strings.ToLower("jobs/status"): jobStatusStorage,
} }
expMeta := latest.GroupOrDie("experimental")
return &apiserver.APIGroupVersion{ return &apiserver.APIGroupVersion{
Root: m.apiGroupPrefix + "/" + latest.GroupOrDie("experimental").Group, Root: m.apiGroupPrefix,
Creater: api.Scheme, Creater: api.Scheme,
Convertor: api.Scheme, Convertor: api.Scheme,
Typer: api.Scheme, Typer: api.Scheme,
Mapper: latest.GroupOrDie("experimental").RESTMapper, Mapper: expMeta.RESTMapper,
Codec: latest.GroupOrDie("experimental").Codec, Codec: expMeta.Codec,
Linker: latest.GroupOrDie("experimental").SelfLinker, Linker: expMeta.SelfLinker,
Storage: storage, Storage: storage,
Version: latest.GroupOrDie("experimental").Version, Version: expMeta.GroupVersion,
ServerVersion: latest.GroupOrDie("").GroupVersion,
Admit: m.admissionControl, Admit: m.admissionControl,
Context: m.requestContextMapper, Context: m.requestContextMapper,

View File

@ -289,11 +289,11 @@ func TestExpapi(t *testing.T) {
master, config, assert := setUp(t) master, config, assert := setUp(t)
expAPIGroup := master.experimental(&config) expAPIGroup := master.experimental(&config)
assert.Equal(expAPIGroup.Root, master.apiGroupPrefix+"/"+latest.GroupOrDie("experimental").Group) assert.Equal(expAPIGroup.Root, master.apiGroupPrefix)
assert.Equal(expAPIGroup.Mapper, latest.GroupOrDie("experimental").RESTMapper) assert.Equal(expAPIGroup.Mapper, latest.GroupOrDie("experimental").RESTMapper)
assert.Equal(expAPIGroup.Codec, latest.GroupOrDie("experimental").Codec) assert.Equal(expAPIGroup.Codec, latest.GroupOrDie("experimental").Codec)
assert.Equal(expAPIGroup.Linker, latest.GroupOrDie("experimental").SelfLinker) assert.Equal(expAPIGroup.Linker, latest.GroupOrDie("experimental").SelfLinker)
assert.Equal(expAPIGroup.Version, latest.GroupOrDie("experimental").Version) assert.Equal(expAPIGroup.Version, latest.GroupOrDie("experimental").GroupVersion)
} }
// TestSecondsSinceSync verifies that proper results are returned // TestSecondsSinceSync verifies that proper results are returned

View File

@ -30,7 +30,7 @@ import (
) )
func newStorage(t *testing.T) (*ScaleREST, *tools.FakeEtcdClient) { func newStorage(t *testing.T) (*ScaleREST, *tools.FakeEtcdClient) {
etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "experimental") etcdStorage, fakeClient := registrytest.NewEtcdStorage(t, "")
return NewStorage(etcdStorage).Scale, fakeClient return NewStorage(etcdStorage).Scale, fakeClient
} }
@ -82,7 +82,7 @@ func TestGet(t *testing.T) {
ctx := api.WithNamespace(api.NewContext(), "test") ctx := api.WithNamespace(api.NewContext(), "test")
key := etcdtest.AddPrefix("/controllers/test/foo") key := etcdtest.AddPrefix("/controllers/test/foo")
if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Experimental.Codec(), &validController), 0); err != nil { if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), &validController), 0); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -102,7 +102,7 @@ func TestUpdate(t *testing.T) {
ctx := api.WithNamespace(api.NewContext(), "test") ctx := api.WithNamespace(api.NewContext(), "test")
key := etcdtest.AddPrefix("/controllers/test/foo") key := etcdtest.AddPrefix("/controllers/test/foo")
if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Experimental.Codec(), &validController), 0); err != nil { if _, err := fakeClient.Set(key, runtime.EncodeOrDie(testapi.Default.Codec(), &validController), 0); err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
replicas := 12 replicas := 12

View File

@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/apis/experimental"
// Ensure that experimental/v1 package is initialized. // Ensure that experimental/v1alpha1 package is initialized.
_ "k8s.io/kubernetes/pkg/apis/experimental/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"

View File

@ -21,8 +21,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/apis/experimental"
// Ensure that experimental/v1 package is initialized. // Ensure that experimental/v1alpha1 package is initialized.
_ "k8s.io/kubernetes/pkg/apis/experimental/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"

View File

@ -21,8 +21,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/apis/experimental"
// Ensure that experimental/v1 package is initialized. // Ensure that experimental/v1alpha1 package is initialized.
_ "k8s.io/kubernetes/pkg/apis/experimental/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"

View File

@ -57,7 +57,7 @@ func (t *thirdPartyResourceDataMapper) RESTMapping(kind string, versions ...stri
if kind != "ThirdPartyResourceData" { if kind != "ThirdPartyResourceData" {
return nil, fmt.Errorf("unknown kind %s expected %s", kind, t.kind) return nil, fmt.Errorf("unknown kind %s expected %s", kind, t.kind)
} }
mapping, err := t.mapper.RESTMapping("ThirdPartyResourceData", latest.GroupOrDie("experimental").Version) mapping, err := t.mapper.RESTMapping("ThirdPartyResourceData", latest.GroupOrDie("experimental").GroupVersion)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -21,8 +21,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/experimental" "k8s.io/kubernetes/pkg/apis/experimental"
// Ensure that experimental/v1 package is initialized. // Ensure that experimental/v1alpha1 package is initialized.
_ "k8s.io/kubernetes/pkg/apis/experimental/v1" _ "k8s.io/kubernetes/pkg/apis/experimental/v1alpha1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/registry/registrytest" "k8s.io/kubernetes/pkg/registry/registrytest"

View File

@ -6,7 +6,7 @@ matrix:
include: include:
- go: 1.4 - go: 1.4
env: env:
- KUBE_TEST_API_VERSIONS=v1,experimental/v1 KUBE_TEST_ETCD_PREFIXES=registry - KUBE_TEST_API_VERSIONS=v1,experimental/v1alpha1 KUBE_TEST_ETCD_PREFIXES=registry
- KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults" - KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults"
- CI_NAME="shippable" - CI_NAME="shippable"
- CI_BUILD_NUMBER="$BUILD_NUMBER" - CI_BUILD_NUMBER="$BUILD_NUMBER"
@ -17,7 +17,7 @@ matrix:
- secure: hfh1Kwl2XYUlJCn4dtKSG0C9yXl5TtksVOY74OeqolvDAdVj4sc+GJD3Bywsp91CJe8YMEnkt9rN0WGI+gPVMcjTmZ9tMUxKiNNBP8m5oLRFbdgKOkNuXjpjpFHHWGAnNhMmh9vjI+ehADo+QIpU1fGxd3yO4tmIJ1qoK3QqvUrOZ1RwUubRXoeVn3xy3LK5yg4vP5ruitbNeWMw/RZZ7D6czvqvEfCgV6b4mdNDRMiqlUJNkaTRc3em1APXr30yagDV3a7hXLq3HdlyFwvF+9pmB4AKhQctyjPN4zvvPd0/gJXq3ZHXSlZXOZBMPXHlSS5pizfSInNszyZyrP3+/w== - secure: hfh1Kwl2XYUlJCn4dtKSG0C9yXl5TtksVOY74OeqolvDAdVj4sc+GJD3Bywsp91CJe8YMEnkt9rN0WGI+gPVMcjTmZ9tMUxKiNNBP8m5oLRFbdgKOkNuXjpjpFHHWGAnNhMmh9vjI+ehADo+QIpU1fGxd3yO4tmIJ1qoK3QqvUrOZ1RwUubRXoeVn3xy3LK5yg4vP5ruitbNeWMw/RZZ7D6czvqvEfCgV6b4mdNDRMiqlUJNkaTRc3em1APXr30yagDV3a7hXLq3HdlyFwvF+9pmB4AKhQctyjPN4zvvPd0/gJXq3ZHXSlZXOZBMPXHlSS5pizfSInNszyZyrP3+/w==
- go: 1.3 - go: 1.3
env: env:
- KUBE_TEST_API_VERSIONS=v1,experimental/v1 KUBE_TEST_ETCD_PREFIXES=kubernetes.io/registry - KUBE_TEST_API_VERSIONS=v1,experimental/v1alpha1 KUBE_TEST_ETCD_PREFIXES=kubernetes.io/registry
- KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults" - KUBE_JUNIT_REPORT_DIR="${SHIPPABLE_REPO_DIR}/shippable/testresults"
- CI_NAME="shippable" - CI_NAME="shippable"
- CI_BUILD_NUMBER="$BUILD_NUMBER" - CI_BUILD_NUMBER="$BUILD_NUMBER"

View File

@ -131,13 +131,13 @@ func startMasterOrDie(masterConfig *master.Config) (*master.Master, *httptest.Se
if masterConfig == nil { if masterConfig == nil {
etcdClient := NewEtcdClient() etcdClient := NewEtcdClient()
storageVersions := make(map[string]string) storageVersions := make(map[string]string)
etcdStorage, err = master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, latest.GroupOrDie("").Version, etcdtest.PathPrefix()) etcdStorage, err = master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, latest.GroupOrDie("").GroupVersion, etcdtest.PathPrefix())
storageVersions[""] = latest.GroupOrDie("").Version storageVersions[""] = latest.GroupOrDie("").GroupVersion
if err != nil { if err != nil {
glog.Fatalf("Failed to create etcd storage for master %v", err) glog.Fatalf("Failed to create etcd storage for master %v", err)
} }
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("experimental").InterfacesFor, latest.GroupOrDie("experimental").Version, etcdtest.PathPrefix()) expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("experimental").InterfacesFor, latest.GroupOrDie("experimental").GroupVersion, etcdtest.PathPrefix())
storageVersions["experimental"] = latest.GroupOrDie("experimental").Version storageVersions["experimental"] = latest.GroupOrDie("experimental").GroupVersion
if err != nil { if err != nil {
glog.Fatalf("Failed to create etcd storage for master %v", err) glog.Fatalf("Failed to create etcd storage for master %v", err)
} }
@ -275,13 +275,13 @@ func StartPods(numPods int, host string, restClient *client.Client) error {
func RunAMaster(t *testing.T) (*master.Master, *httptest.Server) { func RunAMaster(t *testing.T) (*master.Master, *httptest.Server) {
etcdClient := NewEtcdClient() etcdClient := NewEtcdClient()
storageVersions := make(map[string]string) storageVersions := make(map[string]string)
etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.Version(), etcdtest.PathPrefix()) etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.GroupAndVersion(), etcdtest.PathPrefix())
storageVersions[""] = testapi.Default.Version() storageVersions[""] = testapi.Default.Version()
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("experimental").InterfacesFor, latest.GroupOrDie("experimental").Version, etcdtest.PathPrefix()) expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("experimental").InterfacesFor, testapi.Experimental.GroupAndVersion(), etcdtest.PathPrefix())
storageVersions["experimental"] = testapi.Experimental.Version() storageVersions["experimental"] = testapi.Experimental.GroupAndVersion()
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }

View File

@ -38,8 +38,8 @@ func TestKubectlValidation(t *testing.T) {
// The following test the experimental api. // The following test the experimental api.
// TOOD: Replace with something more robust. These may move. // TOOD: Replace with something more robust. These may move.
{`{"apiVersion": "v1", "kind": "DaemonSet"}`, false}, {`{"apiVersion": "experimental/v1alpha1", "kind": "DaemonSet"}`, false},
{`{"apiVersion": "v1", "kind": "Job"}`, false}, {`{"apiVersion": "experimental/v1alpha1", "kind": "Job"}`, false},
{`{"apiVersion": "vNotAVersion", "kind": "Job"}`, true}, {`{"apiVersion": "vNotAVersion", "kind": "Job"}`, true},
} }
components := framework.NewMasterComponents(&framework.Config{}) components := framework.NewMasterComponents(&framework.Config{})