Merge branch 'master' into upgrade_aliases_branch
This commit is contained in:
@@ -114,7 +114,7 @@ data:
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) (?<log>.*)$/
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) (?<tag>.*) (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</source>
|
||||
|
@@ -58,7 +58,7 @@ data:
|
||||
time_format %Y-%m-%dT%H:%M:%S.%NZ
|
||||
</pattern>
|
||||
<pattern>
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) (?<log>.*)$/
|
||||
format /^(?<time>.+) (?<stream>stdout|stderr) (?<tag>.*) (?<log>.*)$/
|
||||
time_format %Y-%m-%dT%H:%M:%S.%N%:z
|
||||
</pattern>
|
||||
</source>
|
||||
|
@@ -442,6 +442,10 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild
|
||||
}
|
||||
}
|
||||
|
||||
if informerUserCloud, ok := cloud.(cloudprovider.InformerUser); ok {
|
||||
informerUserCloud.SetInformers(sharedInformers)
|
||||
}
|
||||
|
||||
ctx := ControllerContext{
|
||||
ClientBuilder: clientBuilder,
|
||||
InformerFactory: sharedInformers,
|
||||
|
@@ -35,6 +35,7 @@ go_library(
|
||||
"//cmd/kubeadm/app/phases/bootstraptoken/clusterinfo:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/bootstraptoken/node:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/etcd:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
@@ -46,10 +47,14 @@ go_library(
|
||||
"//cmd/kubeadm/app/util/apiclient:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/util/pubkeypin:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/bootstrap/api:go_default_library",
|
||||
"//pkg/util/normalizer:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
|
@@ -17,13 +17,74 @@ limitations under the License.
|
||||
package phases
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/clusterinfo"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/pubkeypin"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
bootstrapapi "k8s.io/kubernetes/pkg/bootstrap/api"
|
||||
"k8s.io/kubernetes/pkg/util/normalizer"
|
||||
)
|
||||
|
||||
var (
|
||||
allTokenLongDesc = normalizer.LongDesc(`
|
||||
Bootstrap tokens are used for establishing bidirectional trust between a node joining
|
||||
the cluster and a the master node.
|
||||
|
||||
This command makes all the configurations required to make bootstrap tokens works
|
||||
and then creates an initial token.
|
||||
` + cmdutil.AlphaDisclaimer)
|
||||
|
||||
allTokenExamples = normalizer.Examples(`
|
||||
# Makes all the bootstrap token configurations and creates an initial token, functionally
|
||||
# equivalent to what generated by kubeadm init.
|
||||
kubeadm alpha phase bootstrap-token all
|
||||
`)
|
||||
|
||||
createTokenLongDesc = normalizer.LongDesc(`
|
||||
Creates a bootstrap token. If no token value is given, kubeadm will generate a random token instead.
|
||||
|
||||
Alternatively, you can use kubeadm token.
|
||||
` + cmdutil.AlphaDisclaimer)
|
||||
|
||||
clusterInfoLongDesc = fmt.Sprintf(normalizer.LongDesc(`
|
||||
Uploads the %q ConfigMap in the %q namespace, populating it with cluster information extracted from the
|
||||
given kubeconfig file. The ConfigMap is used for the node bootstrap process in its initial phases,
|
||||
before the client trusts the API server.
|
||||
|
||||
See online documentation about Authenticating with Bootstrap Tokens for more details.
|
||||
`+cmdutil.AlphaDisclaimer), bootstrapapi.ConfigMapClusterInfo, metav1.NamespacePublic)
|
||||
|
||||
nodePostCSRsLongDesc = normalizer.LongDesc(`
|
||||
Configures RBAC rules to allow node bootstrap tokens to post a certificate signing request,
|
||||
thus enabling nodes joining the cluster to request long term certificate credentials.
|
||||
|
||||
See online documentation about TLS bootstrapping for more details.
|
||||
` + cmdutil.AlphaDisclaimer)
|
||||
|
||||
nodeAutoApproveLongDesc = normalizer.LongDesc(`
|
||||
Configures RBAC rules to allow the csrapprover controller to automatically approve
|
||||
certificate signing requests generated by nodes joining the cluster.
|
||||
It configures also RBAC rules for certificates rotation (with auto approval of new certificates).
|
||||
|
||||
See online documentation about TLS bootstrapping for more details.
|
||||
` + cmdutil.AlphaDisclaimer)
|
||||
)
|
||||
|
||||
// NewCmdBootstrapToken returns the Cobra command for running the mark-master phase
|
||||
@@ -31,37 +92,131 @@ func NewCmdBootstrapToken() *cobra.Command {
|
||||
var kubeConfigFile string
|
||||
cmd := &cobra.Command{
|
||||
Use: "bootstrap-token",
|
||||
Short: "Manage kubeadm-specific bootstrap token functions.",
|
||||
Short: "Manage kubeadm-specific bootstrap token functions",
|
||||
Long: cmdutil.MacroCommandLongDescription,
|
||||
Aliases: []string{"bootstraptoken"},
|
||||
RunE: cmdutil.SubCmdRunE("bootstrap-token"),
|
||||
}
|
||||
|
||||
cmd.PersistentFlags().StringVar(&kubeConfigFile, "kubeconfig", "/etc/kubernetes/admin.conf", "The KubeConfig file to use when talking to the cluster.")
|
||||
cmd.PersistentFlags().StringVar(&kubeConfigFile, "kubeconfig", "/etc/kubernetes/admin.conf", "The KubeConfig file to use when talking to the cluster")
|
||||
|
||||
// Add subcommands
|
||||
cmd.AddCommand(NewSubCmdBootstrapTokenAll(&kubeConfigFile))
|
||||
cmd.AddCommand(NewSubCmdBootstrapToken(&kubeConfigFile))
|
||||
cmd.AddCommand(NewSubCmdClusterInfo(&kubeConfigFile))
|
||||
cmd.AddCommand(NewSubCmdNodeBootstrapToken(&kubeConfigFile))
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// NewSubCmdBootstrapTokenAll returns the Cobra command for running the token all sub-phase
|
||||
func NewSubCmdBootstrapTokenAll(kubeConfigFile *string) *cobra.Command {
|
||||
cfg := &kubeadmapiext.MasterConfiguration{
|
||||
// KubernetesVersion is not used by bootstrap-token, but we set this explicitly to avoid
|
||||
// the lookup of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig
|
||||
KubernetesVersion: "v1.9.0",
|
||||
}
|
||||
|
||||
// Default values for the cobra help text
|
||||
legacyscheme.Scheme.Default(cfg)
|
||||
|
||||
var cfgPath, description string
|
||||
var usages, extraGroups []string
|
||||
var skipTokenPrint bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "all",
|
||||
Short: "Makes all the bootstrap token configurations and creates an initial token",
|
||||
Long: allTokenLongDesc,
|
||||
Example: allTokenExamples,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := validation.ValidateMixedArguments(cmd.Flags())
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Creates the bootstap token
|
||||
err = createBootstrapToken(client, cfgPath, cfg, description, usages, extraGroups, skipTokenPrint)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create the cluster-info ConfigMap or update if it already exists
|
||||
err = clusterinfo.CreateBootstrapConfigMapIfNotExists(client, *kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create the RBAC rules that expose the cluster-info ConfigMap properly
|
||||
err = clusterinfo.CreateClusterInfoRBACRules(client)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create RBAC rules that makes the bootstrap tokens able to post CSRs
|
||||
err = node.AllowBootstrapTokensToPostCSRs(client)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create RBAC rules that makes the bootstrap tokens able to get their CSRs approved automatically
|
||||
err = node.AutoApproveNodeBootstrapTokens(client)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create/update RBAC rules that makes the nodes to rotate certificates and get their CSRs approved automatically
|
||||
err = node.AutoApproveNodeCertificateRotation(client)
|
||||
kubeadmutil.CheckErr(err)
|
||||
},
|
||||
}
|
||||
|
||||
// Adds flags to the command
|
||||
addBootstrapTokenFlags(cmd.Flags(), cfg, &cfgPath, &description, &usages, &extraGroups, &skipTokenPrint)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// NewSubCmdBootstrapToken returns the Cobra command for running the create token phase
|
||||
func NewSubCmdBootstrapToken(kubeConfigFile *string) *cobra.Command {
|
||||
cfg := &kubeadmapiext.MasterConfiguration{
|
||||
// KubernetesVersion is not used by bootstrap-token, but we set this explicitly to avoid
|
||||
// the lookup of the version from the internet when executing ConfigFileAndDefaultsToInternalConfig
|
||||
KubernetesVersion: "v1.9.0",
|
||||
}
|
||||
|
||||
// Default values for the cobra help text
|
||||
legacyscheme.Scheme.Default(cfg)
|
||||
|
||||
var cfgPath, description string
|
||||
var usages, extraGroups []string
|
||||
var skipTokenPrint bool
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Creates a bootstrap token to be used for node joining",
|
||||
Long: createTokenLongDesc,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := validation.ValidateMixedArguments(cmd.Flags())
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
err = createBootstrapToken(client, cfgPath, cfg, description, usages, extraGroups, skipTokenPrint)
|
||||
kubeadmutil.CheckErr(err)
|
||||
},
|
||||
}
|
||||
|
||||
// Adds flags to the command
|
||||
addBootstrapTokenFlags(cmd.Flags(), cfg, &cfgPath, &description, &usages, &extraGroups, &skipTokenPrint)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// NewSubCmdClusterInfo returns the Cobra command for running the cluster-info sub-phase
|
||||
func NewSubCmdClusterInfo(kubeConfigFile *string) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster-info <clusterinfo-file>",
|
||||
Short: "Uploads and exposes the cluster-info ConfigMap publicly from the given cluster-info file.",
|
||||
Use: "cluster-info",
|
||||
Short: "Uploads the cluster-info ConfigMap from the given kubeconfig file",
|
||||
Long: clusterInfoLongDesc,
|
||||
Aliases: []string{"clusterinfo"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := cmdutil.ValidateExactArgNumber(args, []string{"clusterinfo-file"})
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Here it's safe to get args[0], since we've validated that the argument exists above in validateExactArgNumber
|
||||
clusterInfoFile := args[0]
|
||||
// Create the cluster-info ConfigMap or update if it already exists
|
||||
err = clusterinfo.CreateBootstrapConfigMapIfNotExists(client, clusterInfoFile)
|
||||
err = clusterinfo.CreateBootstrapConfigMapIfNotExists(client, *kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create the RBAC rules that expose the cluster-info ConfigMap properly
|
||||
@@ -76,9 +231,9 @@ func NewSubCmdClusterInfo(kubeConfigFile *string) *cobra.Command {
|
||||
func NewSubCmdNodeBootstrapToken(kubeConfigFile *string) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "node",
|
||||
Short: "Manages node bootstrap tokens.",
|
||||
Short: "Configures the node bootstrap process",
|
||||
Aliases: []string{"clusterinfo"},
|
||||
RunE: cmdutil.SubCmdRunE("node"),
|
||||
Long: cmdutil.MacroCommandLongDescription,
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewSubCmdNodeBootstrapTokenPostCSRs(kubeConfigFile))
|
||||
@@ -91,11 +246,13 @@ func NewSubCmdNodeBootstrapToken(kubeConfigFile *string) *cobra.Command {
|
||||
func NewSubCmdNodeBootstrapTokenPostCSRs(kubeConfigFile *string) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "allow-post-csrs",
|
||||
Short: "Configure RBAC to allow node bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials.",
|
||||
Short: "Configures RBAC to allow node bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials",
|
||||
Long: nodePostCSRsLongDesc,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create RBAC rules that makes the bootstrap tokens able to post CSRs
|
||||
err = node.AllowBootstrapTokensToPostCSRs(client)
|
||||
kubeadmutil.CheckErr(err)
|
||||
},
|
||||
@@ -107,14 +264,95 @@ func NewSubCmdNodeBootstrapTokenPostCSRs(kubeConfigFile *string) *cobra.Command
|
||||
func NewSubCmdNodeBootstrapTokenAutoApprove(kubeConfigFile *string) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "allow-auto-approve",
|
||||
Short: "Configure RBAC rules to allow the csrapprover controller automatically approve CSRs from a node bootstrap token.",
|
||||
Short: "Configures RBAC rules to allow the csrapprover controller automatically approve CSRs from a node bootstrap token",
|
||||
Long: nodeAutoApproveLongDesc,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
client, err := kubeconfigutil.ClientSetFromFile(*kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create RBAC rules that makes the bootstrap tokens able to get their CSRs approved automatically
|
||||
err = node.AutoApproveNodeBootstrapTokens(client)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Create/update RBAC rules that makes the nodes to rotate certificates and get their CSRs approved automatically
|
||||
err = node.AutoApproveNodeCertificateRotation(client)
|
||||
kubeadmutil.CheckErr(err)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addBootstrapTokenFlags(flagSet *pflag.FlagSet, cfg *kubeadmapiext.MasterConfiguration, cfgPath, description *string, usages, extraGroups *[]string, skipTokenPrint *bool) {
|
||||
flagSet.StringVar(
|
||||
cfgPath, "config", *cfgPath,
|
||||
"Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)",
|
||||
)
|
||||
flagSet.StringVar(
|
||||
&cfg.CertificatesDir, "cert-dir", cfg.CertificatesDir,
|
||||
"The path where certificates are stored",
|
||||
)
|
||||
flagSet.StringVar(
|
||||
&cfg.Token, "token", cfg.Token,
|
||||
"The token to use for establishing bidirectional trust between nodes and masters",
|
||||
)
|
||||
flagSet.DurationVar(
|
||||
&cfg.TokenTTL.Duration, "ttl", kubeadmconstants.DefaultTokenDuration,
|
||||
"The duration before the token is automatically deleted (e.g. 1s, 2m, 3h). If set to '0', the token will never expire",
|
||||
)
|
||||
flagSet.StringSliceVar(
|
||||
usages, "usages", kubeadmconstants.DefaultTokenUsages,
|
||||
fmt.Sprintf("Describes the ways in which this token can be used. You can pass --usages multiple times or provide a comma separated list of options. Valid options: [%s]", strings.Join(kubeadmconstants.DefaultTokenUsages, ",")),
|
||||
)
|
||||
flagSet.StringSliceVar(
|
||||
extraGroups, "groups", []string{kubeadmconstants.NodeBootstrapTokenAuthGroup},
|
||||
fmt.Sprintf("Extra groups that this token will authenticate as when used for authentication. Must match %q", bootstrapapi.BootstrapGroupPattern),
|
||||
)
|
||||
flagSet.StringVar(
|
||||
description, "description", "The default bootstrap token generated by 'kubeadm init'.",
|
||||
"A human friendly description of how this token is used.",
|
||||
)
|
||||
flagSet.BoolVar(
|
||||
skipTokenPrint, "skip-token-print", *skipTokenPrint,
|
||||
"Skip printing of the bootstrap token",
|
||||
)
|
||||
}
|
||||
|
||||
func createBootstrapToken(client clientset.Interface, cfgPath string, cfg *kubeadmapiext.MasterConfiguration, description string, usages, extraGroups []string, skipTokenPrint bool) error {
|
||||
// adding groups only makes sense for authentication
|
||||
usagesSet := sets.NewString(usages...)
|
||||
usageAuthentication := strings.TrimPrefix(bootstrapapi.BootstrapTokenUsageAuthentication, bootstrapapi.BootstrapTokenUsagePrefix)
|
||||
if len(extraGroups) > 0 && !usagesSet.Has(usageAuthentication) {
|
||||
return fmt.Errorf("--groups cannot be specified unless --usages includes %q", usageAuthentication)
|
||||
}
|
||||
|
||||
// validate any extra group names
|
||||
for _, group := range extraGroups {
|
||||
if err := bootstrapapi.ValidateBootstrapGroupName(group); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// This call returns the ready-to-use configuration based on the configuration file that might or might not exist and the default cfg populated by flags
|
||||
internalcfg, err := configutil.ConfigFileAndDefaultsToInternalConfig(cfgPath, cfg)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Load the CA certificate from so we can pin its public key
|
||||
caCert, err := pkiutil.TryLoadCertFromDisk(internalcfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error loading ca cert from disk: %v", err)
|
||||
}
|
||||
|
||||
// Creates or updates the token
|
||||
if err := node.UpdateOrCreateToken(client, internalcfg.Token, false, internalcfg.TokenTTL.Duration, usages, extraGroups, description); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("[bootstraptoken] Bootstrap token Created")
|
||||
if skipTokenPrint {
|
||||
internalcfg.Token = "{token}"
|
||||
}
|
||||
fmt.Println("[bootstraptoken] You can now join any number of machines by running:")
|
||||
fmt.Printf("[bootstraptoken] kubeadm join {master} --token %s --discovery-token-ca-cert-hash %s \n", internalcfg.Token, pubkeypin.Hash(caCert))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -12,7 +12,9 @@ docs/admin/kubeadm_alpha_phase_addon_all.md
|
||||
docs/admin/kubeadm_alpha_phase_addon_kube-dns.md
|
||||
docs/admin/kubeadm_alpha_phase_addon_kube-proxy.md
|
||||
docs/admin/kubeadm_alpha_phase_bootstrap-token.md
|
||||
docs/admin/kubeadm_alpha_phase_bootstrap-token_all.md
|
||||
docs/admin/kubeadm_alpha_phase_bootstrap-token_cluster-info.md
|
||||
docs/admin/kubeadm_alpha_phase_bootstrap-token_create.md
|
||||
docs/admin/kubeadm_alpha_phase_bootstrap-token_node.md
|
||||
docs/admin/kubeadm_alpha_phase_bootstrap-token_node_allow-auto-approve.md
|
||||
docs/admin/kubeadm_alpha_phase_bootstrap-token_node_allow-post-csrs.md
|
||||
@@ -73,7 +75,9 @@ docs/man/man1/kubeadm-alpha-phase-addon-all.1
|
||||
docs/man/man1/kubeadm-alpha-phase-addon-kube-dns.1
|
||||
docs/man/man1/kubeadm-alpha-phase-addon-kube-proxy.1
|
||||
docs/man/man1/kubeadm-alpha-phase-addon.1
|
||||
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-all.1
|
||||
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-cluster-info.1
|
||||
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-create.1
|
||||
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-node-allow-auto-approve.1
|
||||
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-node-allow-post-csrs.1
|
||||
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-node.1
|
||||
|
3
docs/admin/kubeadm_alpha_phase_bootstrap-token_all.md
Normal file
3
docs/admin/kubeadm_alpha_phase_bootstrap-token_all.md
Normal file
@@ -0,0 +1,3 @@
|
||||
This file is autogenerated, but we've stopped checking such files into the
|
||||
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
|
||||
populate this file.
|
3
docs/admin/kubeadm_alpha_phase_bootstrap-token_create.md
Normal file
3
docs/admin/kubeadm_alpha_phase_bootstrap-token_create.md
Normal file
@@ -0,0 +1,3 @@
|
||||
This file is autogenerated, but we've stopped checking such files into the
|
||||
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
|
||||
populate this file.
|
3
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-all.1
Normal file
3
docs/man/man1/kubeadm-alpha-phase-bootstrap-token-all.1
Normal file
@@ -0,0 +1,3 @@
|
||||
This file is autogenerated, but we've stopped checking such files into the
|
||||
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
|
||||
populate this file.
|
@@ -0,0 +1,3 @@
|
||||
This file is autogenerated, but we've stopped checking such files into the
|
||||
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
|
||||
populate this file.
|
@@ -37,6 +37,12 @@ func IsHugePageResourceName(name core.ResourceName) bool {
|
||||
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix)
|
||||
}
|
||||
|
||||
// IsQuotaHugePageResourceName returns true if the resource name has the quota
|
||||
// related huge page resource prefix.
|
||||
func IsQuotaHugePageResourceName(name core.ResourceName) bool {
|
||||
return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix)
|
||||
}
|
||||
|
||||
// HugePageResourceName returns a ResourceName with the canonical hugepage
|
||||
// prefix prepended for the specified page size. The page size is converted
|
||||
// to its canonical representation.
|
||||
@@ -217,7 +223,7 @@ var standardQuotaResources = sets.NewString(
|
||||
// IsStandardQuotaResourceName returns true if the resource is known to
|
||||
// the quota tracking system
|
||||
func IsStandardQuotaResourceName(str string) bool {
|
||||
return standardQuotaResources.Has(str)
|
||||
return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str))
|
||||
}
|
||||
|
||||
var standardResources = sets.NewString(
|
||||
@@ -245,7 +251,7 @@ var standardResources = sets.NewString(
|
||||
|
||||
// IsStandardResourceName returns true if the resource is known to the system
|
||||
func IsStandardResourceName(str string) bool {
|
||||
return standardResources.Has(str) || IsHugePageResourceName(core.ResourceName(str))
|
||||
return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str))
|
||||
}
|
||||
|
||||
var integerResources = sets.NewString(
|
||||
|
@@ -59,6 +59,7 @@ func TestIsStandardResource(t *testing.T) {
|
||||
{"blah", false},
|
||||
{"x.y.z", false},
|
||||
{"hugepages-2Mi", true},
|
||||
{"requests.hugepages-2Mi", true},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
if IsStandardResourceName(tc.input) != tc.output {
|
||||
|
@@ -4071,6 +4071,13 @@ const (
|
||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||
)
|
||||
|
||||
// The following identify resource prefix for Kubernetes object types
|
||||
const (
|
||||
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
|
||||
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
|
||||
)
|
||||
|
||||
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
|
||||
type ResourceQuotaScope string
|
||||
|
||||
|
@@ -18,6 +18,7 @@ go_library(
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -49,6 +50,11 @@ type Interface interface {
|
||||
HasClusterID() bool
|
||||
}
|
||||
|
||||
type InformerUser interface {
|
||||
// SetInformers sets the informer on the cloud object.
|
||||
SetInformers(informerFactory informers.SharedInformerFactory)
|
||||
}
|
||||
|
||||
// Clusters is an abstract, pluggable interface for clusters of containers.
|
||||
type Clusters interface {
|
||||
// ListClusters lists the names of the available clusters.
|
||||
|
@@ -25,10 +25,14 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// awsInstanceRegMatch represents Regex Match for AWS instance.
|
||||
var awsInstanceRegMatch = regexp.MustCompile("^i-[^/]*$")
|
||||
|
||||
// awsInstanceID represents the ID of the instance in the AWS API, e.g. i-12345678
|
||||
// The "traditional" format is "i-12345678"
|
||||
// A new longer format is also being introduced: "i-12345678abcdef01"
|
||||
@@ -76,8 +80,7 @@ func (name kubernetesInstanceID) mapToAWSInstanceID() (awsInstanceID, error) {
|
||||
|
||||
// We sanity check the resulting volume; the two known formats are
|
||||
// i-12345678 and i-12345678abcdef01
|
||||
// TODO: Regex match?
|
||||
if awsID == "" || strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "i-") {
|
||||
if awsID == "" || !awsInstanceRegMatch.MatchString(awsID) {
|
||||
return "", fmt.Errorf("Invalid format for AWS instance (%s)", name)
|
||||
}
|
||||
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/request"
|
||||
"github.com/golang/glog"
|
||||
@@ -53,7 +54,15 @@ func (c *CrossRequestRetryDelay) BeforeSign(r *request.Request) {
|
||||
if delay > 0 {
|
||||
glog.Warningf("Inserting delay before AWS request (%s) to avoid RequestLimitExceeded: %s",
|
||||
describeRequest(r), delay.String())
|
||||
r.Config.SleepDelay(delay)
|
||||
|
||||
if sleepFn := r.Config.SleepDelay; sleepFn != nil {
|
||||
// Support SleepDelay for backwards compatibility
|
||||
sleepFn(delay)
|
||||
} else if err := aws.SleepWithContext(r.Context(), delay); err != nil {
|
||||
r.Error = awserr.New(request.CanceledErrorCode, "request context canceled", err)
|
||||
r.Retryable = aws.Bool(false)
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid clock skew problems
|
||||
r.Time = now
|
||||
|
@@ -19,11 +19,15 @@ package aws
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
)
|
||||
|
||||
// awsVolumeRegMatch represents Regex Match for AWS volume.
|
||||
var awsVolumeRegMatch = regexp.MustCompile("^vol-[^/]*$")
|
||||
|
||||
// awsVolumeID represents the ID of the volume in the AWS API, e.g. vol-12345678
|
||||
// The "traditional" format is "vol-12345678"
|
||||
// A new longer format is also being introduced: "vol-12345678abcdef01"
|
||||
@@ -75,8 +79,7 @@ func (name KubernetesVolumeID) mapToAWSVolumeID() (awsVolumeID, error) {
|
||||
|
||||
// We sanity check the resulting volume; the two known formats are
|
||||
// vol-12345678 and vol-12345678abcdef01
|
||||
// TODO: Regex match?
|
||||
if strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "vol-") {
|
||||
if !awsVolumeRegMatch.MatchString(awsID) {
|
||||
return "", fmt.Errorf("Invalid format for AWS volume (%s)", name)
|
||||
}
|
||||
|
||||
|
@@ -75,6 +75,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
|
@@ -34,13 +34,16 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@@ -99,18 +102,21 @@ type GCECloud struct {
|
||||
// for the cloudprovider to start watching the configmap.
|
||||
ClusterID ClusterID
|
||||
|
||||
service *compute.Service
|
||||
serviceBeta *computebeta.Service
|
||||
serviceAlpha *computealpha.Service
|
||||
containerService *container.Service
|
||||
client clientset.Interface
|
||||
clientBuilder controller.ControllerClientBuilder
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
projectID string
|
||||
region string
|
||||
localZone string // The zone in which we are running
|
||||
managedZones []string // List of zones we are spanning (for multi-AZ clusters, primarily when running on master)
|
||||
service *compute.Service
|
||||
serviceBeta *computebeta.Service
|
||||
serviceAlpha *computealpha.Service
|
||||
containerService *container.Service
|
||||
client clientset.Interface
|
||||
clientBuilder controller.ControllerClientBuilder
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
projectID string
|
||||
region string
|
||||
localZone string // The zone in which we are running
|
||||
// managedZones will be set to the 1 zone if running a single zone cluster
|
||||
// it will be set to ALL zones in region for any multi-zone cluster
|
||||
// Use GetAllCurrentZones to get only zones that contain nodes
|
||||
managedZones []string
|
||||
networkURL string
|
||||
isLegacyNetwork bool
|
||||
subnetworkURL string
|
||||
@@ -125,6 +131,12 @@ type GCECloud struct {
|
||||
useMetadataServer bool
|
||||
operationPollRateLimiter flowcontrol.RateLimiter
|
||||
manager diskServiceManager
|
||||
// Lock for access to nodeZones
|
||||
nodeZonesLock sync.Mutex
|
||||
// nodeZones is a mapping from Zone to a sets.String of Node's names in the Zone
|
||||
// it is updated by the nodeInformer
|
||||
nodeZones map[string]sets.String
|
||||
nodeInformerSynced cache.InformerSynced
|
||||
// sharedResourceLock is used to serialize GCE operations that may mutate shared state to
|
||||
// prevent inconsistencies. For example, load balancers manipulation methods will take the
|
||||
// lock to prevent shared resources from being prematurely deleted while the operation is
|
||||
@@ -470,6 +482,7 @@ func CreateGCECloud(config *CloudConfig) (*GCECloud, error) {
|
||||
useMetadataServer: config.UseMetadataServer,
|
||||
operationPollRateLimiter: operationPollRateLimiter,
|
||||
AlphaFeatureGate: config.AlphaFeatureGate,
|
||||
nodeZones: map[string]sets.String{},
|
||||
}
|
||||
|
||||
gce.manager = &gceServiceManager{gce}
|
||||
@@ -582,6 +595,68 @@ func (gce *GCECloud) IsLegacyNetwork() bool {
|
||||
return gce.isLegacyNetwork
|
||||
}
|
||||
|
||||
func (gce *GCECloud) SetInformers(informerFactory informers.SharedInformerFactory) {
|
||||
glog.Infof("Setting up informers for GCECloud")
|
||||
nodeInformer := informerFactory.Core().V1().Nodes().Informer()
|
||||
nodeInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
node := obj.(*v1.Node)
|
||||
gce.updateNodeZones(nil, node)
|
||||
},
|
||||
UpdateFunc: func(prev, obj interface{}) {
|
||||
prevNode := prev.(*v1.Node)
|
||||
newNode := obj.(*v1.Node)
|
||||
if newNode.Labels[kubeletapis.LabelZoneFailureDomain] ==
|
||||
prevNode.Labels[kubeletapis.LabelZoneFailureDomain] {
|
||||
return
|
||||
}
|
||||
gce.updateNodeZones(prevNode, newNode)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
node, isNode := obj.(*v1.Node)
|
||||
// We can get DeletedFinalStateUnknown instead of *v1.Node here
|
||||
// and we need to handle that correctly.
|
||||
if !isNode {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Received unexpected object: %v", obj)
|
||||
return
|
||||
}
|
||||
node, ok = deletedState.Obj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
gce.updateNodeZones(node, nil)
|
||||
},
|
||||
})
|
||||
gce.nodeInformerSynced = nodeInformer.HasSynced
|
||||
}
|
||||
|
||||
func (gce *GCECloud) updateNodeZones(prevNode, newNode *v1.Node) {
|
||||
gce.nodeZonesLock.Lock()
|
||||
defer gce.nodeZonesLock.Unlock()
|
||||
if prevNode != nil {
|
||||
prevZone, ok := prevNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok {
|
||||
gce.nodeZones[prevZone].Delete(prevNode.ObjectMeta.Name)
|
||||
if gce.nodeZones[prevZone].Len() == 0 {
|
||||
gce.nodeZones[prevZone] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if newNode != nil {
|
||||
newZone, ok := newNode.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
if ok {
|
||||
if gce.nodeZones[newZone] == nil {
|
||||
gce.nodeZones[newZone] = sets.NewString()
|
||||
}
|
||||
gce.nodeZones[newZone].Insert(newNode.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Known-useless DNS search path.
|
||||
var uselessDNSSearchRE = regexp.MustCompile(`^[0-9]+.google.internal.$`)
|
||||
|
||||
|
@@ -690,11 +690,14 @@ func (gce *GCECloud) DisksAreAttached(diskNames []string, nodeName types.NodeNam
|
||||
// JSON in Description field.
|
||||
func (gce *GCECloud) CreateDisk(
|
||||
name string, diskType string, zone string, sizeGb int64, tags map[string]string) error {
|
||||
|
||||
// Do not allow creation of PDs in zones that are not managed. Such PDs
|
||||
// then cannot be deleted by DeleteDisk.
|
||||
if isManaged := gce.verifyZoneIsManaged(zone); !isManaged {
|
||||
return fmt.Errorf("kubernetes does not manage zone %q", zone)
|
||||
// Do not allow creation of PDs in zones that are do not have nodes. Such PDs
|
||||
// are not currently usable.
|
||||
curZones, err := gce.GetAllCurrentZones()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !curZones.Has(zone) {
|
||||
return fmt.Errorf("kubernetes does not have a node in zone %q", zone)
|
||||
}
|
||||
|
||||
tagsStr, err := gce.encodeDiskTags(tags)
|
||||
@@ -733,17 +736,16 @@ func (gce *GCECloud) CreateDisk(
|
||||
func (gce *GCECloud) CreateRegionalDisk(
|
||||
name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error {
|
||||
|
||||
// Do not allow creation of PDs in zones that are not managed. Such PDs
|
||||
// then cannot be deleted by DeleteDisk.
|
||||
unmanagedZones := []string{}
|
||||
for _, zone := range replicaZones.UnsortedList() {
|
||||
if isManaged := gce.verifyZoneIsManaged(zone); !isManaged {
|
||||
unmanagedZones = append(unmanagedZones, zone)
|
||||
}
|
||||
// Do not allow creation of PDs in zones that are do not have nodes. Such PDs
|
||||
// are not currently usable. This functionality should be reverted to checking
|
||||
// against managed zones if we want users to be able to create RegionalDisks
|
||||
// in zones where there are no nodes
|
||||
curZones, err := gce.GetAllCurrentZones()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(unmanagedZones) > 0 {
|
||||
return fmt.Errorf("kubernetes does not manage specified zones: %q. Managed Zones: %q", unmanagedZones, gce.managedZones)
|
||||
if !curZones.IsSuperset(replicaZones) {
|
||||
return fmt.Errorf("kubernetes does not have nodes in specified zones: %q. Zones that contain nodes: %q", replicaZones.Difference(curZones), curZones)
|
||||
}
|
||||
|
||||
tagsStr, err := gce.encodeDiskTags(tags)
|
||||
@@ -776,16 +778,6 @@ func (gce *GCECloud) CreateRegionalDisk(
|
||||
return err
|
||||
}
|
||||
|
||||
func (gce *GCECloud) verifyZoneIsManaged(zone string) bool {
|
||||
for _, managedZone := range gce.managedZones {
|
||||
if zone == managedZone {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getDiskType(diskType string) (string, error) {
|
||||
switch diskType {
|
||||
case DiskTypeSSD, DiskTypeStandard:
|
||||
|
@@ -37,16 +37,19 @@ func TestCreateDisk_Basic(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
projectID: gceProjectId,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
projectID: gceProjectId,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
diskName := "disk"
|
||||
@@ -95,16 +98,20 @@ func TestCreateRegionalDisk_Basic(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1", "zone3", "zone2"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{AlphaFeatureGCEDisk})
|
||||
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1", "zone3", "zone2"},
|
||||
projectID: gceProjectId,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
projectID: gceProjectId,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
diskName := "disk"
|
||||
@@ -153,15 +160,18 @@ func TestCreateDisk_DiskAlreadyExists(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
// Inject disk AlreadyExists error.
|
||||
@@ -184,8 +194,13 @@ func TestCreateDisk_WrongZone(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true }}
|
||||
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
@@ -204,8 +219,13 @@ func TestCreateDisk_NoManagedZone(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
gce := GCECloud{manager: fakeManager, managedZones: []string{}}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true }}
|
||||
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
@@ -224,8 +244,12 @@ func TestCreateDisk_BadDiskType(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
gce := GCECloud{manager: fakeManager, managedZones: []string{"zone1"}}
|
||||
gce := GCECloud{manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true }}
|
||||
|
||||
diskName := "disk"
|
||||
diskType := "arbitrary-disk"
|
||||
@@ -245,15 +269,18 @@ func TestCreateDisk_MultiZone(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1", "zone2", "zone3"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
diskName := "disk"
|
||||
@@ -274,15 +301,18 @@ func TestDeleteDisk_Basic(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
@@ -311,15 +341,18 @@ func TestDeleteDisk_NotFound(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
diskName := "disk"
|
||||
|
||||
@@ -336,15 +369,18 @@ func TestDeleteDisk_ResourceBeingUsed(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
@@ -367,15 +403,18 @@ func TestDeleteDisk_SameDiskMultiZone(t *testing.T) {
|
||||
/* Assert */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1", "zone2", "zone3"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1", "zone2", "zone3"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
@@ -401,15 +440,18 @@ func TestDeleteDisk_DiffDiskMultiZone(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"zone1"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"zone1"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
@@ -435,19 +477,22 @@ func TestGetAutoLabelsForPD_Basic(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "us-central1"
|
||||
zone := "us-central1-c"
|
||||
zonesWithNodes := []string{zone}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeSSD
|
||||
zone := "us-central1-c"
|
||||
const sizeGb int64 = 128
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{zone},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
@@ -472,19 +517,22 @@ func TestGetAutoLabelsForPD_NoZone(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "europe-west1"
|
||||
zone := "europe-west1-d"
|
||||
zonesWithNodes := []string{zone}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeStandard
|
||||
zone := "europe-west1-d"
|
||||
const sizeGb int64 = 128
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
if featureGateErr != nil {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{zone},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
|
||||
@@ -508,10 +556,14 @@ func TestGetAutoLabelsForPD_DiskNotFound(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zone := "asia-northeast1-a"
|
||||
zonesWithNodes := []string{zone}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
diskName := "disk"
|
||||
zone := "asia-northeast1-a"
|
||||
gce := GCECloud{manager: fakeManager, managedZones: []string{zone}}
|
||||
gce := GCECloud{manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true }}
|
||||
|
||||
/* Act */
|
||||
_, err := gce.GetAutoLabelsForPD(diskName, zone)
|
||||
@@ -526,6 +578,7 @@ func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
diskName := "disk"
|
||||
alphaFeatureGate, featureGateErr := NewAlphaFeatureGate([]string{})
|
||||
@@ -533,9 +586,11 @@ func TestGetAutoLabelsForPD_DiskNotFoundAndNoZone(t *testing.T) {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
|
||||
/* Act */
|
||||
@@ -551,6 +606,7 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "us-west1"
|
||||
zonesWithNodes := []string{"us-west1-b", "asia-southeast1-a"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeStandard
|
||||
@@ -562,9 +618,11 @@ func TestGetAutoLabelsForPD_DupDisk(t *testing.T) {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"us-west1-b", "asia-southeast1-a"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
for _, zone := range gce.managedZones {
|
||||
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
@@ -590,6 +648,7 @@ func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
|
||||
/* Arrange */
|
||||
gceProjectId := "test-project"
|
||||
gceRegion := "fake-region"
|
||||
zonesWithNodes := []string{"us-west1-b", "asia-southeast1-a"}
|
||||
fakeManager := newFakeManager(gceProjectId, gceRegion)
|
||||
diskName := "disk"
|
||||
diskType := DiskTypeStandard
|
||||
@@ -600,9 +659,11 @@ func TestGetAutoLabelsForPD_DupDiskNoZone(t *testing.T) {
|
||||
t.Error(featureGateErr)
|
||||
}
|
||||
gce := GCECloud{
|
||||
manager: fakeManager,
|
||||
managedZones: []string{"us-west1-b", "asia-southeast1-a"},
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
manager: fakeManager,
|
||||
managedZones: zonesWithNodes,
|
||||
AlphaFeatureGate: alphaFeatureGate,
|
||||
nodeZones: createNodeZones(zonesWithNodes),
|
||||
nodeInformerSynced: func() bool { return true },
|
||||
}
|
||||
for _, zone := range gce.managedZones {
|
||||
gce.CreateDisk(diskName, diskType, zone, sizeGb, nil)
|
||||
@@ -925,3 +986,11 @@ func (manager *FakeServiceManager) WaitForRegionalOp(
|
||||
}
|
||||
return manager.waitForOpError
|
||||
}
|
||||
|
||||
func createNodeZones(zones []string) map[string]sets.String {
|
||||
nodeZones := map[string]sets.String{}
|
||||
for _, zone := range zones {
|
||||
nodeZones[zone] = sets.NewString("dummynode")
|
||||
}
|
||||
return nodeZones
|
||||
}
|
||||
|
@@ -273,35 +273,39 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error
|
||||
})
|
||||
}
|
||||
|
||||
// GetAllZones returns all the zones in which nodes are running
|
||||
func (gce *GCECloud) GetAllZones() (sets.String, error) {
|
||||
// Fast-path for non-multizone
|
||||
if len(gce.managedZones) == 1 {
|
||||
return sets.NewString(gce.managedZones...), nil
|
||||
// GetAllCurrentZones returns all the zones in which k8s nodes are currently running
|
||||
func (gce *GCECloud) GetAllCurrentZones() (sets.String, error) {
|
||||
if gce.nodeInformerSynced == nil {
|
||||
glog.Warningf("GCECloud object does not have informers set, should only happen in E2E binary.")
|
||||
return gce.GetAllZonesFromCloudProvider()
|
||||
}
|
||||
gce.nodeZonesLock.Lock()
|
||||
defer gce.nodeZonesLock.Unlock()
|
||||
if !gce.nodeInformerSynced() {
|
||||
return nil, fmt.Errorf("Node informer is not synced when trying to GetAllCurrentZones")
|
||||
}
|
||||
zones := sets.NewString()
|
||||
for zone, nodes := range gce.nodeZones {
|
||||
if len(nodes) > 0 {
|
||||
zones.Insert(zone)
|
||||
}
|
||||
}
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// TODO: Caching, but this is currently only called when we are creating a volume,
|
||||
// which is a relatively infrequent operation, and this is only # zones API calls
|
||||
// GetAllZonesFromCloudProvider returns all the zones in which nodes are running
|
||||
// Only use this in E2E tests to get zones, on real clusters this will
|
||||
// get all zones with compute instances in them even if not k8s instances!!!
|
||||
// ex. I have k8s nodes in us-central1-c and us-central1-b. I also have
|
||||
// a non-k8s compute in us-central1-a. This func will return a,b, and c.
|
||||
func (gce *GCECloud) GetAllZonesFromCloudProvider() (sets.String, error) {
|
||||
zones := sets.NewString()
|
||||
|
||||
// TODO: Parallelize, although O(zones) so not too bad (N <= 3 typically)
|
||||
for _, zone := range gce.managedZones {
|
||||
mc := newInstancesMetricContext("list", zone)
|
||||
// We only retrieve one page in each zone - we only care about existence
|
||||
listCall := gce.service.Instances.List(gce.projectID, zone)
|
||||
|
||||
// No filter: We assume that a zone is either used or unused
|
||||
// We could only consider running nodes (like we do in List above),
|
||||
// but probably if instances are starting we still want to consider them.
|
||||
// I think we should wait until we have a reason to make the
|
||||
// call one way or the other; we generally can't guarantee correct
|
||||
// volume spreading if the set of zones is changing
|
||||
// (and volume spreading is currently only a heuristic).
|
||||
// Long term we want to replace GetAllZones (which primarily supports volume
|
||||
// spreading) with a scheduler policy that is able to see the global state of
|
||||
// volumes and the health of zones.
|
||||
|
||||
// Just a minimal set of fields - we only care about existence
|
||||
listCall = listCall.Fields("items(name)")
|
||||
res, err := listCall.Do()
|
||||
if err != nil {
|
||||
@@ -317,6 +321,16 @@ func (gce *GCECloud) GetAllZones() (sets.String, error) {
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// InsertInstance creates a new instance on GCP
|
||||
func (gce *GCECloud) InsertInstance(project string, zone string, rb *compute.Instance) error {
|
||||
mc := newInstancesMetricContext("create", zone)
|
||||
op, err := gce.service.Instances.Insert(project, zone, rb).Do()
|
||||
if err != nil {
|
||||
return mc.Observe(err)
|
||||
}
|
||||
return gce.waitForZoneOp(op, zone, mc)
|
||||
}
|
||||
|
||||
// ListInstanceNames returns a string of instance names seperated by spaces.
|
||||
func (gce *GCECloud) ListInstanceNames(project, zone string) (string, error) {
|
||||
res, err := gce.service.Instances.List(project, zone).Fields("items(name)").Do()
|
||||
|
@@ -20,6 +20,7 @@ go_library(
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/volume/events:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/util/goroutinemap:go_default_library",
|
||||
"//pkg/util/goroutinemap/exponentialbackoff:go_default_library",
|
||||
"//pkg/util/io:go_default_library",
|
||||
@@ -37,6 +38,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
@@ -80,6 +82,7 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
)
|
||||
|
||||
// Test single call to syncClaim and syncVolume methods.
|
||||
@@ -33,6 +34,8 @@ func TestSync(t *testing.T) {
|
||||
"foo": "true",
|
||||
"bar": "false",
|
||||
}
|
||||
modeBlock := v1.PersistentVolumeBlock
|
||||
modeFile := v1.PersistentVolumeFilesystem
|
||||
|
||||
tests := []controllerTest{
|
||||
// [Unit test set 1] User did not care which PV they get.
|
||||
@@ -517,10 +520,216 @@ func TestSync(t *testing.T) {
|
||||
newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
// All of these should bind as feature set is not enabled for BlockVolume
|
||||
// meaning volumeMode will be ignored and dropped
|
||||
{
|
||||
// syncVolume binds a requested block claim to a block volume
|
||||
"14-1 - binding to volumeMode block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to a filesystem volume
|
||||
"14-2 - binding to volumeMode filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds an unspecified volumemode for claim to a specified filesystem volume
|
||||
"14-3 - binding to volumeMode filesystem using default for claim",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "uid14-3", "claim14-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(nil, newClaimArray("claim14-3", "uid14-3", "10Gi", "volume14-3", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-4 - binding to unspecified volumeMode using requested filesystem for claim",
|
||||
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(nil, newVolumeArray("volume14-4", "10Gi", "uid14-4", "claim14-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "volume14-4", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-5 - binding different volumeModes should be ignored",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-5", "10Gi", "uid14-5", "claim14-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5", "uid14-5", "10Gi", "volume14-5", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
|
||||
runSyncTests(t, tests, []*storage.StorageClass{})
|
||||
}
|
||||
|
||||
func TestSyncAlphaBlockVolume(t *testing.T) {
|
||||
modeBlock := v1.PersistentVolumeBlock
|
||||
modeFile := v1.PersistentVolumeFilesystem
|
||||
|
||||
// Tests assume defaulting, so feature enabled will never have nil volumeMode
|
||||
tests := []controllerTest{
|
||||
// PVC with VolumeMode
|
||||
{
|
||||
// syncVolume binds a requested block claim to a block volume
|
||||
"14-1 - binding to volumeMode block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-1", "10Gi", "uid14-1", "claim14-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-1", "uid14-1", "10Gi", "volume14-1", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a requested filesystem claim to a filesystem volume
|
||||
"14-2 - binding to volumeMode filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-2", "10Gi", "uid14-2", "claim14-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-2", "uid14-2", "10Gi", "volume14-2", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind to an unspecified volumemode for claim to a specified filesystem volume
|
||||
"14-3 - do not bind pv volumeMode filesystem and pvc volumeMode block",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-3", "uid14-3", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Normal FailedBinding"},
|
||||
noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind a requested filesystem claim to an unspecified volumeMode for volume
|
||||
"14-4 - do not bind pv volumeMode block and pvc volumeMode filesystem",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-4", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-4", "uid14-4", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Normal FailedBinding"},
|
||||
noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind when matching class but not matching volumeModes
|
||||
"14-5 - do not bind when matching class but not volumeMode",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-5", "uid14-5", "10Gi", "", v1.ClaimPending, &classGold)),
|
||||
[]string{"Warning ProvisioningFailed"},
|
||||
noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind when matching volumeModes but class does not match
|
||||
"14-5-1 - do not bind when matching volumeModes but class does not match",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-1", "uid14-5-1", "10Gi", "", v1.ClaimPending, &classSilver)),
|
||||
[]string{"Warning ProvisioningFailed"},
|
||||
noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind when pvc is prebound to pv with matching volumeModes but class does not match
|
||||
"14-5-2 - do not bind when pvc is prebound to pv with matching volumeModes but class does not match",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-5-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classGold)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-5-2", "uid14-5-2", "10Gi", "volume14-5-2", v1.ClaimPending, &classSilver)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume bind when pv is prebound and volumeModes match
|
||||
"14-7 - bind when pv volume is prebound and volumeModes match",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "", "claim14-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-7", "10Gi", "uid14-7", "claim14-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-7", "uid14-7", "10Gi", "volume14-7", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
|
||||
"14-8 - do not bind when pvc is prebound to pv with mismatching volumeModes",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8", "uid14-8", "10Gi", "volume14-8", v1.ClaimPending, nil)),
|
||||
[]string{"Warning VolumeMismatch"},
|
||||
noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// failed syncVolume do not bind when pvc is prebound to pv with mismatching volumeModes
|
||||
"14-8-1 - do not bind when pv is prebound to pvc with mismatching volumeModes",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-8-1", "10Gi", "", "claim14-8-1", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-8-1", "uid14-8-1", "10Gi", "", v1.ClaimPending, nil)),
|
||||
[]string{"Normal FailedBinding"},
|
||||
noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds when pvc is prebound to pv with matching volumeModes block
|
||||
"14-9 - bind when pvc is prebound to pv with matching volumeModes block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-9", "10Gi", "uid14-9", "claim14-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-9", "uid14-9", "10Gi", "volume14-9", v1.ClaimBound, nil, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds when pv is prebound to pvc with matching volumeModes block
|
||||
"14-10 - bind when pv is prebound to pvc with matching volumeModes block",
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "", "claim14-10", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeBlock, newVolumeArray("volume14-10", "10Gi", "uid14-10", "claim14-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeBlock, newClaimArray("claim14-10", "uid14-10", "10Gi", "volume14-10", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds when pvc is prebound to pv with matching volumeModes filesystem
|
||||
"14-11 - bind when pvc is prebound to pv with matching volumeModes filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-11", "10Gi", "uid14-11", "claim14-11", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty, annBoundByController)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-11", "uid14-11", "10Gi", "volume14-11", v1.ClaimBound, nil, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds when pv is prebound to pvc with matching volumeModes filesystem
|
||||
"14-12 - bind when pv is prebound to pvc with matching volumeModes filesystem",
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "", "claim14-12", v1.VolumePending, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withVolumeVolumeMode(&modeFile, newVolumeArray("volume14-12", "10Gi", "uid14-12", "claim14-12", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, classEmpty)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "", v1.ClaimPending, nil)),
|
||||
withClaimVolumeMode(&modeFile, newClaimArray("claim14-12", "uid14-12", "10Gi", "volume14-12", v1.ClaimBound, nil, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
|
||||
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to enable feature gate for BlockVolume: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
runSyncTests(t, tests, []*storage.StorageClass{})
|
||||
|
||||
err1 := utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
if err1 != nil {
|
||||
t.Errorf("Failed to disable feature gate for BlockVolume: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
// volume/claims. The test follows this pattern:
|
||||
// 0. Load the controller with initial data.
|
||||
|
@@ -680,6 +680,22 @@ func withLabelSelector(labels map[string]string, claims []*v1.PersistentVolumeCl
|
||||
return claims
|
||||
}
|
||||
|
||||
// withVolumeVolumeMode applies the given VolumeMode to the first volume in the array and
|
||||
// returns the array. Meant to be used to compose volumes specified inline in
|
||||
// a test.
|
||||
func withVolumeVolumeMode(mode *v1.PersistentVolumeMode, volumes []*v1.PersistentVolume) []*v1.PersistentVolume {
|
||||
volumes[0].Spec.VolumeMode = mode
|
||||
return volumes
|
||||
}
|
||||
|
||||
// withClaimVolumeMode applies the given VolumeMode to the first claim in the array and
|
||||
// returns the array. Meant to be used to compose volumes specified inline in
|
||||
// a test.
|
||||
func withClaimVolumeMode(mode *v1.PersistentVolumeMode, claims []*v1.PersistentVolumeClaim) []*v1.PersistentVolumeClaim {
|
||||
claims[0].Spec.VolumeMode = mode
|
||||
return claims
|
||||
}
|
||||
|
||||
// withExpectedCapacity sets the claim.Spec.Capacity of the first claim in the
|
||||
// array to given value and returns the array. Meant to be used to compose
|
||||
// claims specified inline in a test.
|
||||
|
@@ -24,8 +24,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
@@ -116,6 +118,16 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVol
|
||||
// - find the smallest matching one if there is no volume pre-bound to
|
||||
// the claim.
|
||||
for _, volume := range volumes {
|
||||
// check if volumeModes do not match (Alpha and feature gate protected)
|
||||
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
|
||||
}
|
||||
// filter out mismatching volumeModes
|
||||
if isMisMatch {
|
||||
continue
|
||||
}
|
||||
|
||||
if isVolumeBoundToClaim(volume, claim) {
|
||||
// this claim and volume are pre-bound; return
|
||||
// the volume if the size request is satisfied,
|
||||
@@ -157,6 +169,27 @@ func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *v1.PersistentVol
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// checkVolumeModeMatches is a convenience method that checks volumeMode for PersistentVolume
|
||||
// and PersistentVolumeClaims along with making sure that the Alpha feature gate BlockVolume is
|
||||
// enabled.
|
||||
// This is Alpha and could change in the future.
|
||||
func checkVolumeModeMisMatches(pvcSpec *v1.PersistentVolumeClaimSpec, pvSpec *v1.PersistentVolumeSpec) (bool, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) {
|
||||
if pvSpec.VolumeMode != nil && pvcSpec.VolumeMode != nil {
|
||||
requestedVolumeMode := *pvcSpec.VolumeMode
|
||||
pvVolumeMode := *pvSpec.VolumeMode
|
||||
return requestedVolumeMode != pvVolumeMode, nil
|
||||
} else {
|
||||
// This also should retrun an error, this means that
|
||||
// the defaulting has failed.
|
||||
return true, fmt.Errorf("api defaulting for volumeMode failed")
|
||||
}
|
||||
} else {
|
||||
// feature gate is disabled
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
|
||||
return pvIndex.findByClaim(claim)
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
ref "k8s.io/client-go/tools/reference"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
@@ -50,6 +51,28 @@ func makePVC(size string, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentV
|
||||
return &pvc
|
||||
}
|
||||
|
||||
func makeVolumeModePVC(size string, mode *v1.PersistentVolumeMode, modfn func(*v1.PersistentVolumeClaim)) *v1.PersistentVolumeClaim {
|
||||
pvc := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeMode: mode,
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(size),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if modfn != nil {
|
||||
modfn(&pvc)
|
||||
}
|
||||
return &pvc
|
||||
}
|
||||
|
||||
func TestMatchVolume(t *testing.T) {
|
||||
volList := newPersistentVolumeOrderedIndex()
|
||||
for _, pv := range createTestVolumes() {
|
||||
@@ -669,6 +692,249 @@ func testVolume(name, size string) *v1.PersistentVolume {
|
||||
}
|
||||
}
|
||||
|
||||
func createVolumeModeBlockTestVolume() *v1.PersistentVolume {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "local-1",
|
||||
Name: "block",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
VolumeMode: &blockMode,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createVolumeModeFilesystemTestVolume() *v1.PersistentVolume {
|
||||
filesystemMode := v1.PersistentVolumeFilesystem
|
||||
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "local-1",
|
||||
Name: "block",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
VolumeMode: &filesystemMode,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createTestVolOrderedIndex(pv *v1.PersistentVolume) persistentVolumeOrderedIndex {
|
||||
volFile := newPersistentVolumeOrderedIndex()
|
||||
volFile.store.Add(pv)
|
||||
return volFile
|
||||
}
|
||||
|
||||
func toggleBlockVolumeFeature(toggleFlag bool, t *testing.T) {
|
||||
if toggleFlag {
|
||||
// Enable alpha feature BlockVolume
|
||||
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=true")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to enable feature gate for BlockVolume: %v", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err := utilfeature.DefaultFeatureGate.Set("BlockVolume=false")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to disable feature gate for BlockVolume: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlphaVolumeModeCheck(t *testing.T) {
|
||||
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
filesystemMode := v1.PersistentVolumeFilesystem
|
||||
|
||||
// If feature gate is enabled, VolumeMode will always be defaulted
|
||||
// If feature gate is disabled, VolumeMode is dropped by API and ignored
|
||||
scenarios := map[string]struct {
|
||||
isExpectedMisMatch bool
|
||||
vol *v1.PersistentVolume
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
enableBlock bool
|
||||
}{
|
||||
"feature enabled - pvc block and pv filesystem": {
|
||||
isExpectedMisMatch: true,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc filesystem and pv block": {
|
||||
isExpectedMisMatch: true,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc block and pv block": {
|
||||
isExpectedMisMatch: false,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature enabled - pvc filesystem and pv filesystem": {
|
||||
isExpectedMisMatch: false,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"feature disabled - pvc block and pv filesystem": {
|
||||
isExpectedMisMatch: false,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc filesystem and pv block": {
|
||||
isExpectedMisMatch: false,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc block and pv block": {
|
||||
isExpectedMisMatch: false,
|
||||
vol: createVolumeModeBlockTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"feature disabled - pvc filesystem and pv filesystem": {
|
||||
isExpectedMisMatch: false,
|
||||
vol: createVolumeModeFilesystemTestVolume(),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
toggleBlockVolumeFeature(scenario.enableBlock, t)
|
||||
expectedMisMatch, err := checkVolumeModeMisMatches(&scenario.pvc.Spec, &scenario.vol.Spec)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected failure for checkVolumeModeMisMatches: %v", err)
|
||||
}
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if expectedMisMatch && !scenario.isExpectedMisMatch {
|
||||
t.Errorf("Unexpected failure for scenario, expected not to mismatch on modes but did: %s", name)
|
||||
}
|
||||
if !expectedMisMatch && scenario.isExpectedMisMatch {
|
||||
t.Errorf("Unexpected failure for scenario, did not mismatch on mode when expected to mismatch: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAlphaFilteringVolumeModes(t *testing.T) {
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
filesystemMode := v1.PersistentVolumeFilesystem
|
||||
|
||||
// If feature gate is enabled, VolumeMode will always be defaulted
|
||||
// If feature gate is disabled, VolumeMode is dropped by API and ignored
|
||||
scenarios := map[string]struct {
|
||||
isExpectedMatch bool
|
||||
vol persistentVolumeOrderedIndex
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
enableBlock bool
|
||||
}{
|
||||
"1-1 feature enabled - pvc block and pv filesystem": {
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeFilesystemTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"1-2 feature enabled - pvc filesystem and pv block": {
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeBlockTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"1-3 feature enabled - pvc block and pv no mode with default filesystem": {
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeFilesystemTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"1-4 feature enabled - pvc no mode defaulted to filesystem and pv block": {
|
||||
isExpectedMatch: false,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeBlockTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"1-5 feature enabled - pvc block and pv block": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeBlockTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"1-6 feature enabled - pvc filesystem and pv filesystem": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeFilesystemTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"1-7 feature enabled - pvc mode is nil and defaulted and pv mode is nil and defaulted": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeFilesystemTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: true,
|
||||
},
|
||||
"2-1 feature disabled - pvc mode is nil and pv mode is nil": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(testVolume("nomode-1", "8G")),
|
||||
pvc: makeVolumeModePVC("8G", nil, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"2-2 feature disabled - pvc mode is block and pv mode is block - fields should be dropped by api and not analyzed with gate disabled": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeBlockTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &blockMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
"2-3 feature disabled - pvc mode is filesystem and pv mode is filesystem - fields should be dropped by api and not analyzed with gate disabled": {
|
||||
isExpectedMatch: true,
|
||||
vol: createTestVolOrderedIndex(createVolumeModeFilesystemTestVolume()),
|
||||
pvc: makeVolumeModePVC("8G", &filesystemMode, nil),
|
||||
enableBlock: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
toggleBlockVolumeFeature(scenario.enableBlock, t)
|
||||
pvmatch, err := scenario.vol.findBestMatchForClaim(scenario.pvc)
|
||||
// expected to match but either got an error or no returned pvmatch
|
||||
if pvmatch == nil && scenario.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for scenario, no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && scenario.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for scenario: %s - %+v", name, err)
|
||||
}
|
||||
// expected to not match but either got an error or a returned pvmatch
|
||||
if pvmatch != nil && !scenario.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for scenario, expected no matching volume: %s", name)
|
||||
}
|
||||
if err != nil && !scenario.isExpectedMatch {
|
||||
t.Errorf("Unexpected failure for scenario: %s - %+v", name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindingPreboundVolumes(t *testing.T) {
|
||||
claim := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@@ -231,6 +231,10 @@ func (ctrl *PersistentVolumeController) syncClaim(claim *v1.PersistentVolumeClai
|
||||
func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) error {
|
||||
requestedQty := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestedSize := requestedQty.Value()
|
||||
isMisMatch, err := checkVolumeModeMisMatches(&claim.Spec, &volume.Spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking if volumeMode was a mismatch: %v", err)
|
||||
}
|
||||
|
||||
volumeQty := volume.Spec.Capacity[v1.ResourceStorage]
|
||||
volumeSize := volumeQty.Value()
|
||||
@@ -243,6 +247,10 @@ func checkVolumeSatisfyClaim(volume *v1.PersistentVolume, claim *v1.PersistentVo
|
||||
return fmt.Errorf("Class of volume[%s] is not the same as claim[%v]", volume.Name, claimToClaimKey(claim))
|
||||
}
|
||||
|
||||
if isMisMatch {
|
||||
return fmt.Errorf("VolumeMode[%v] of volume[%s] is incompatible with VolumeMode[%v] of claim[%v]", volume.Spec.VolumeMode, volume.Name, claim.Spec.VolumeMode, claim.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -548,7 +548,7 @@ func Example_printPodHideTerminated() {
|
||||
podList := newAllPhasePodList()
|
||||
// filter pods
|
||||
filterFuncs := f.DefaultResourceFilterFunc()
|
||||
filterOpts := f.DefaultResourceFilterOptions(cmd, false)
|
||||
filterOpts := cmdutil.ExtractCmdPrintOptions(cmd, false)
|
||||
_, filteredPodList, errs := cmdutil.FilterResourceList(podList, filterFuncs, filterOpts)
|
||||
if errs != nil {
|
||||
fmt.Printf("Unexpected filter error: %v\n", errs)
|
||||
|
@@ -81,7 +81,8 @@ func NewCmdConfigView(out, errOut io.Writer, ConfigAccess clientcmd.ConfigAccess
|
||||
cmd.Flags().Set("output", defaultOutputFormat)
|
||||
}
|
||||
|
||||
printer, err := cmdutil.PrinterForCommand(cmd, nil, meta.NewDefaultRESTMapper(nil, nil), latest.Scheme, nil, []runtime.Decoder{latest.Codec}, printers.PrintOptions{})
|
||||
printOpts := cmdutil.ExtractCmdPrintOptions(cmd, false)
|
||||
printer, err := cmdutil.PrinterForOptions(meta.NewDefaultRESTMapper(nil, nil), latest.Scheme, nil, []runtime.Decoder{latest.Codec}, printOpts)
|
||||
cmdutil.CheckErr(err)
|
||||
printer = printers.NewVersionedPrinter(printer, latest.Scheme, latest.ExternalVersion)
|
||||
|
||||
|
@@ -162,7 +162,7 @@ func (o *ConvertOptions) Complete(f cmdutil.Factory, out io.Writer, cmd *cobra.C
|
||||
cmd.Flags().Set("output", outputFormat)
|
||||
}
|
||||
o.encoder = f.JSONEncoder()
|
||||
o.printer, err = f.PrinterForCommand(cmd, o.local, nil, printers.PrintOptions{})
|
||||
o.printer, err = f.PrinterForOptions(cmdutil.ExtractCmdPrintOptions(cmd, false))
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -254,12 +254,13 @@ func (options *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []str
|
||||
return err
|
||||
}
|
||||
|
||||
printer, err := f.PrinterForCommand(cmd, false, nil, printers.PrintOptions{})
|
||||
printOpts := cmdutil.ExtractCmdPrintOptions(cmd, options.AllNamespaces)
|
||||
printer, err := f.PrinterForOptions(printOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filterOpts := f.DefaultResourceFilterOptions(cmd, options.AllNamespaces)
|
||||
filterOpts := cmdutil.ExtractCmdPrintOptions(cmd, options.AllNamespaces)
|
||||
filterFuncs := f.DefaultResourceFilterFunc()
|
||||
if r.TargetsSingleItems() {
|
||||
filterFuncs = nil
|
||||
@@ -330,14 +331,14 @@ func (options *GetOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []str
|
||||
printWithNamespace = false
|
||||
}
|
||||
|
||||
var outputOpts *printers.OutputOptions
|
||||
printOpts := cmdutil.ExtractCmdPrintOptions(cmd, printWithNamespace)
|
||||
// if cmd does not specify output format and useOpenAPIPrintColumnFlagLabel flag is true,
|
||||
// then get the default output options for this mapping from OpenAPI schema.
|
||||
if !cmdSpecifiesOutputFmt(cmd) && useOpenAPIPrintColumns {
|
||||
outputOpts, _ = outputOptsForMappingFromOpenAPI(f, mapping)
|
||||
updatePrintOptionsForOpenAPI(f, mapping, printOpts)
|
||||
}
|
||||
|
||||
printer, err = f.PrinterForMapping(cmd, false, outputOpts, mapping, printWithNamespace)
|
||||
printer, err = f.PrinterForMapping(printOpts, mapping)
|
||||
if err != nil {
|
||||
if !errs.Has(err.Error()) {
|
||||
errs.Insert(err.Error())
|
||||
@@ -470,7 +471,7 @@ func (options *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []s
|
||||
return i18n.Errorf("watch is only supported on individual resources and resource collections - %d resources were found", len(infos))
|
||||
}
|
||||
|
||||
filterOpts := f.DefaultResourceFilterOptions(cmd, options.AllNamespaces)
|
||||
filterOpts := cmdutil.ExtractCmdPrintOptions(cmd, options.AllNamespaces)
|
||||
filterFuncs := f.DefaultResourceFilterFunc()
|
||||
if r.TargetsSingleItems() {
|
||||
filterFuncs = nil
|
||||
@@ -478,7 +479,8 @@ func (options *GetOptions) watch(f cmdutil.Factory, cmd *cobra.Command, args []s
|
||||
|
||||
info := infos[0]
|
||||
mapping := info.ResourceMapping()
|
||||
printer, err := f.PrinterForMapping(cmd, false, nil, mapping, options.AllNamespaces)
|
||||
printOpts := cmdutil.ExtractCmdPrintOptions(cmd, options.AllNamespaces)
|
||||
printer, err := f.PrinterForMapping(printOpts, mapping)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -661,44 +663,45 @@ func cmdSpecifiesOutputFmt(cmd *cobra.Command) bool {
|
||||
}
|
||||
|
||||
// outputOptsForMappingFromOpenAPI looks for the output format metatadata in the
|
||||
// openapi schema and returns the output options for the mapping if found.
|
||||
func outputOptsForMappingFromOpenAPI(f cmdutil.Factory, mapping *meta.RESTMapping) (*printers.OutputOptions, bool) {
|
||||
// openapi schema and modifies the passed print options for the mapping if found.
|
||||
func updatePrintOptionsForOpenAPI(f cmdutil.Factory, mapping *meta.RESTMapping, printOpts *printers.PrintOptions) bool {
|
||||
|
||||
// user has not specified any output format, check if OpenAPI has
|
||||
// default specification to print this resource type
|
||||
api, err := f.OpenAPISchema()
|
||||
if err != nil {
|
||||
// Error getting schema
|
||||
return nil, false
|
||||
return false
|
||||
}
|
||||
// Found openapi metadata for this resource
|
||||
schema := api.LookupResource(mapping.GroupVersionKind)
|
||||
if schema == nil {
|
||||
// Schema not found, return empty columns
|
||||
return nil, false
|
||||
return false
|
||||
}
|
||||
|
||||
columns, found := openapi.GetPrintColumns(schema.GetExtensions())
|
||||
if !found {
|
||||
// Extension not found, return empty columns
|
||||
return nil, false
|
||||
return false
|
||||
}
|
||||
|
||||
return outputOptsFromStr(columns)
|
||||
return outputOptsFromStr(columns, printOpts)
|
||||
}
|
||||
|
||||
// outputOptsFromStr parses the print-column metadata and generates printer.OutputOptions object.
|
||||
func outputOptsFromStr(columnStr string) (*printers.OutputOptions, bool) {
|
||||
func outputOptsFromStr(columnStr string, printOpts *printers.PrintOptions) bool {
|
||||
if columnStr == "" {
|
||||
return nil, false
|
||||
return false
|
||||
}
|
||||
parts := strings.SplitN(columnStr, "=", 2)
|
||||
if len(parts) < 2 {
|
||||
return nil, false
|
||||
return false
|
||||
}
|
||||
return &printers.OutputOptions{
|
||||
FmtType: parts[0],
|
||||
FmtArg: parts[1],
|
||||
AllowMissingKeys: true,
|
||||
}, true
|
||||
|
||||
printOpts.OutputFormatType = parts[0]
|
||||
printOpts.OutputFormatArgument = parts[1]
|
||||
printOpts.AllowMissingKeys = true
|
||||
|
||||
return true
|
||||
}
|
||||
|
@@ -352,17 +352,17 @@ func (f *FakeFactory) Describer(*meta.RESTMapping) (printers.Describer, error) {
|
||||
return f.tf.Describer, f.tf.Err
|
||||
}
|
||||
|
||||
func (f *FakeFactory) PrinterForCommand(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, options printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
func (f *FakeFactory) PrinterForOptions(options *printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
return f.tf.Printer, f.tf.Err
|
||||
}
|
||||
|
||||
func (f *FakeFactory) PrintResourceInfoForCommand(cmd *cobra.Command, info *resource.Info, out io.Writer) error {
|
||||
printer, err := f.PrinterForCommand(cmd, false, nil, printers.PrintOptions{})
|
||||
printer, err := f.PrinterForOptions(&printers.PrintOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !printer.IsGeneric() {
|
||||
printer, err = f.PrinterForMapping(cmd, false, nil, nil, false)
|
||||
printer, err = f.PrinterForMapping(&printers.PrintOptions{}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -513,7 +513,7 @@ func (f *FakeFactory) PrintObject(cmd *cobra.Command, isLocal bool, mapper meta.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeFactory) PrinterForMapping(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinter, error) {
|
||||
func (f *FakeFactory) PrinterForMapping(printOpts *printers.PrintOptions, mapping *meta.RESTMapping) (printers.ResourcePrinter, error) {
|
||||
return f.tf.Printer, f.tf.Err
|
||||
}
|
||||
|
||||
@@ -744,17 +744,17 @@ func (f *fakeAPIFactory) UnstructuredClientForMapping(m *meta.RESTMapping) (reso
|
||||
return f.tf.UnstructuredClient, f.tf.Err
|
||||
}
|
||||
|
||||
func (f *fakeAPIFactory) PrinterForCommand(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, options printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
func (f *fakeAPIFactory) PrinterForOptions(options *printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
return f.tf.Printer, f.tf.Err
|
||||
}
|
||||
|
||||
func (f *fakeAPIFactory) PrintResourceInfoForCommand(cmd *cobra.Command, info *resource.Info, out io.Writer) error {
|
||||
printer, err := f.PrinterForCommand(cmd, false, nil, printers.PrintOptions{})
|
||||
printer, err := f.PrinterForOptions(&printers.PrintOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !printer.IsGeneric() {
|
||||
printer, err = f.PrinterForMapping(cmd, false, nil, nil, false)
|
||||
printer, err = f.PrinterForMapping(&printers.PrintOptions{}, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -851,14 +851,14 @@ func (f *fakeAPIFactory) PrintObject(cmd *cobra.Command, isLocal bool, mapper me
|
||||
return err
|
||||
}
|
||||
|
||||
printer, err := f.PrinterForMapping(cmd, isLocal, nil, mapping, false)
|
||||
printer, err := f.PrinterForMapping(&printers.PrintOptions{}, mapping)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return printer.PrintObj(obj, out)
|
||||
}
|
||||
|
||||
func (f *fakeAPIFactory) PrinterForMapping(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinter, error) {
|
||||
func (f *fakeAPIFactory) PrinterForMapping(outputOpts *printers.PrintOptions, mapping *meta.RESTMapping) (printers.ResourcePrinter, error) {
|
||||
return f.tf.Printer, f.tf.Err
|
||||
}
|
||||
|
||||
|
@@ -140,8 +140,6 @@ type ClientAccessFactory interface {
|
||||
// BindExternalFlags adds any flags defined by external projects (not part of pflags)
|
||||
BindExternalFlags(flags *pflag.FlagSet)
|
||||
|
||||
// TODO: Break the dependency on cmd here.
|
||||
DefaultResourceFilterOptions(cmd *cobra.Command, withNamespace bool) *printers.PrintOptions
|
||||
// DefaultResourceFilterFunc returns a collection of FilterFuncs suitable for filtering specific resource types.
|
||||
DefaultResourceFilterFunc() kubectl.Filters
|
||||
|
||||
@@ -232,13 +230,12 @@ type BuilderFactory interface {
|
||||
// PrinterForCommand returns the default printer for the command. It requires that certain options
|
||||
// are declared on the command (see AddPrinterFlags). Returns a printer, or an error if a printer
|
||||
// could not be found.
|
||||
// TODO: Break the dependency on cmd here.
|
||||
PrinterForCommand(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, options printers.PrintOptions) (printers.ResourcePrinter, error)
|
||||
PrinterForOptions(options *printers.PrintOptions) (printers.ResourcePrinter, error)
|
||||
// PrinterForMapping returns a printer suitable for displaying the provided resource type.
|
||||
// Requires that printer flags have been added to cmd (see AddPrinterFlags).
|
||||
// Returns a printer, true if the printer is generic (is not internal), or
|
||||
// an error if a printer could not be found.
|
||||
PrinterForMapping(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinter, error)
|
||||
PrinterForMapping(options *printers.PrintOptions, mapping *meta.RESTMapping) (printers.ResourcePrinter, error)
|
||||
// PrintObject prints an api object given command line flags to modify the output format
|
||||
PrintObject(cmd *cobra.Command, isLocal bool, mapper meta.RESTMapper, obj runtime.Object, out io.Writer) error
|
||||
// PrintResourceInfoForCommand receives a *cobra.Command and a *resource.Info and
|
||||
|
@@ -47,7 +47,7 @@ func NewBuilderFactory(clientAccessFactory ClientAccessFactory, objectMappingFac
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *ring2Factory) PrinterForCommand(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, options printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
func (f *ring2Factory) PrinterForOptions(options *printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
var mapper meta.RESTMapper
|
||||
var typer runtime.ObjectTyper
|
||||
|
||||
@@ -56,27 +56,11 @@ func (f *ring2Factory) PrinterForCommand(cmd *cobra.Command, isLocal bool, outpu
|
||||
// TODO: used by the custom column implementation and the name implementation, break this dependency
|
||||
decoders := []runtime.Decoder{f.clientAccessFactory.Decoder(true), unstructured.UnstructuredJSONScheme}
|
||||
encoder := f.clientAccessFactory.JSONEncoder()
|
||||
return PrinterForCommand(cmd, outputOpts, mapper, typer, encoder, decoders, options)
|
||||
return PrinterForOptions(mapper, typer, encoder, decoders, options)
|
||||
}
|
||||
|
||||
func (f *ring2Factory) PrinterForMapping(cmd *cobra.Command, isLocal bool, outputOpts *printers.OutputOptions, mapping *meta.RESTMapping, withNamespace bool) (printers.ResourcePrinter, error) {
|
||||
// Some callers do not have "label-columns" so we can't use the GetFlagStringSlice() helper
|
||||
columnLabel, err := cmd.Flags().GetStringSlice("label-columns")
|
||||
if err != nil {
|
||||
columnLabel = []string{}
|
||||
}
|
||||
|
||||
options := printers.PrintOptions{
|
||||
NoHeaders: GetFlagBool(cmd, "no-headers"),
|
||||
WithNamespace: withNamespace,
|
||||
Wide: GetWideFlag(cmd),
|
||||
ShowAll: GetFlagBool(cmd, "show-all"),
|
||||
ShowLabels: GetFlagBool(cmd, "show-labels"),
|
||||
AbsoluteTimestamps: isWatch(cmd),
|
||||
ColumnLabels: columnLabel,
|
||||
}
|
||||
|
||||
printer, err := f.PrinterForCommand(cmd, isLocal, outputOpts, options)
|
||||
func (f *ring2Factory) PrinterForMapping(options *printers.PrintOptions, mapping *meta.RESTMapping) (printers.ResourcePrinter, error) {
|
||||
printer, err := f.PrinterForOptions(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -140,7 +124,7 @@ func (f *ring2Factory) PrintObject(cmd *cobra.Command, isLocal bool, mapper meta
|
||||
return err
|
||||
}
|
||||
|
||||
printer, err := f.PrinterForMapping(cmd, isLocal, nil, mapping, false)
|
||||
printer, err := f.PrinterForMapping(ExtractCmdPrintOptions(cmd, false), mapping)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -148,12 +132,13 @@ func (f *ring2Factory) PrintObject(cmd *cobra.Command, isLocal bool, mapper meta
|
||||
}
|
||||
|
||||
func (f *ring2Factory) PrintResourceInfoForCommand(cmd *cobra.Command, info *resource.Info, out io.Writer) error {
|
||||
printer, err := f.PrinterForCommand(cmd, false, nil, printers.PrintOptions{})
|
||||
printOpts := ExtractCmdPrintOptions(cmd, false)
|
||||
printer, err := f.PrinterForOptions(printOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !printer.IsGeneric() {
|
||||
printer, err = f.PrinterForMapping(cmd, false, nil, nil, false)
|
||||
printer, err = f.PrinterForMapping(printOpts, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -411,24 +411,6 @@ func (f *ring0Factory) BindExternalFlags(flags *pflag.FlagSet) {
|
||||
flags.AddGoFlagSet(flag.CommandLine)
|
||||
}
|
||||
|
||||
func (f *ring0Factory) DefaultResourceFilterOptions(cmd *cobra.Command, withNamespace bool) *printers.PrintOptions {
|
||||
columnLabel, err := cmd.Flags().GetStringSlice("label-columns")
|
||||
if err != nil {
|
||||
columnLabel = []string{}
|
||||
}
|
||||
opts := &printers.PrintOptions{
|
||||
NoHeaders: GetFlagBool(cmd, "no-headers"),
|
||||
WithNamespace: withNamespace,
|
||||
Wide: GetWideFlag(cmd),
|
||||
ShowAll: GetFlagBool(cmd, "show-all"),
|
||||
ShowLabels: GetFlagBool(cmd, "show-labels"),
|
||||
AbsoluteTimestamps: isWatch(cmd),
|
||||
ColumnLabels: columnLabel,
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
func (f *ring0Factory) DefaultResourceFilterFunc() kubectl.Filters {
|
||||
return kubectl.NewResourceFilter()
|
||||
}
|
||||
|
@@ -81,23 +81,11 @@ func ValidateOutputArgs(cmd *cobra.Command) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrinterForCommand returns the printer for the outputOptions (if given) or
|
||||
// PrinterForOptions returns the printer for the outputOptions (if given) or
|
||||
// returns the default printer for the command. Requires that printer flags have
|
||||
// been added to cmd (see AddPrinterFlags).
|
||||
// TODO: remove the dependency on cmd object
|
||||
func PrinterForCommand(cmd *cobra.Command, outputOpts *printers.OutputOptions, mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
|
||||
if outputOpts == nil {
|
||||
outputOpts = extractOutputOptions(cmd)
|
||||
}
|
||||
|
||||
// this function may be invoked by a command that did not call AddPrinterFlags first, so we need
|
||||
// to be safe about how we access the no-headers flag
|
||||
noHeaders := false
|
||||
if cmd.Flags().Lookup("no-headers") != nil {
|
||||
noHeaders = GetFlagBool(cmd, "no-headers")
|
||||
}
|
||||
printer, err := printers.GetStandardPrinter(outputOpts, noHeaders, mapper, typer, encoder, decoders, options)
|
||||
func PrinterForOptions(mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options *printers.PrintOptions) (printers.ResourcePrinter, error) {
|
||||
printer, err := printers.GetStandardPrinter(mapper, typer, encoder, decoders, *options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -109,19 +97,39 @@ func PrinterForCommand(cmd *cobra.Command, outputOpts *printers.OutputOptions, m
|
||||
printersinternal.AddHandlers(humanReadablePrinter)
|
||||
}
|
||||
|
||||
return maybeWrapSortingPrinter(cmd, printer), nil
|
||||
return maybeWrapSortingPrinter(printer, *options), nil
|
||||
}
|
||||
|
||||
// extractOutputOptions parses printer specific commandline args and returns
|
||||
// printers.OutputsOptions object.
|
||||
func extractOutputOptions(cmd *cobra.Command) *printers.OutputOptions {
|
||||
// ExtractCmdPrintOptions parses printer specific commandline args and
|
||||
// returns a PrintOptions object.
|
||||
// Requires that printer flags have been added to cmd (see AddPrinterFlags)
|
||||
func ExtractCmdPrintOptions(cmd *cobra.Command, withNamespace bool) *printers.PrintOptions {
|
||||
flags := cmd.Flags()
|
||||
|
||||
columnLabel, err := flags.GetStringSlice("label-columns")
|
||||
if err != nil {
|
||||
columnLabel = []string{}
|
||||
}
|
||||
|
||||
options := &printers.PrintOptions{
|
||||
NoHeaders: GetFlagBool(cmd, "no-headers"),
|
||||
Wide: GetWideFlag(cmd),
|
||||
ShowAll: GetFlagBool(cmd, "show-all"),
|
||||
ShowLabels: GetFlagBool(cmd, "show-labels"),
|
||||
AbsoluteTimestamps: isWatch(cmd),
|
||||
ColumnLabels: columnLabel,
|
||||
WithNamespace: withNamespace,
|
||||
}
|
||||
|
||||
var outputFormat string
|
||||
if flags.Lookup("output") != nil {
|
||||
outputFormat = GetFlagString(cmd, "output")
|
||||
}
|
||||
|
||||
if flags.Lookup("sort-by") != nil {
|
||||
options.SortBy = GetFlagString(cmd, "sort-by")
|
||||
}
|
||||
|
||||
// templates are logically optional for specifying a format.
|
||||
// TODO once https://github.com/kubernetes/kubernetes/issues/12668 is fixed, this should fall back to GetFlagString
|
||||
var templateFile string
|
||||
@@ -146,29 +154,21 @@ func extractOutputOptions(cmd *cobra.Command) *printers.OutputOptions {
|
||||
|
||||
// this function may be invoked by a command that did not call AddPrinterFlags first, so we need
|
||||
// to be safe about how we access the allow-missing-template-keys flag
|
||||
allowMissingTemplateKeys := false
|
||||
if flags.Lookup("allow-missing-template-keys") != nil {
|
||||
allowMissingTemplateKeys = GetFlagBool(cmd, "allow-missing-template-keys")
|
||||
options.AllowMissingKeys = GetFlagBool(cmd, "allow-missing-template-keys")
|
||||
}
|
||||
|
||||
return &printers.OutputOptions{
|
||||
FmtType: outputFormat,
|
||||
FmtArg: templateFile,
|
||||
AllowMissingKeys: allowMissingTemplateKeys,
|
||||
}
|
||||
options.OutputFormatType = outputFormat
|
||||
options.OutputFormatArgument = templateFile
|
||||
|
||||
return options
|
||||
}
|
||||
|
||||
func maybeWrapSortingPrinter(cmd *cobra.Command, printer printers.ResourcePrinter) printers.ResourcePrinter {
|
||||
sorting, err := cmd.Flags().GetString("sort-by")
|
||||
if err != nil {
|
||||
// error can happen on missing flag or bad flag type. In either case, this command didn't intent to sort
|
||||
return printer
|
||||
}
|
||||
|
||||
if len(sorting) != 0 {
|
||||
func maybeWrapSortingPrinter(printer printers.ResourcePrinter, printOpts printers.PrintOptions) printers.ResourcePrinter {
|
||||
if len(printOpts.SortBy) != 0 {
|
||||
return &kubectl.SortingPrinter{
|
||||
Delegate: printer,
|
||||
SortField: fmt.Sprintf("{%s}", sorting),
|
||||
SortField: fmt.Sprintf("{%s}", printOpts.SortBy),
|
||||
}
|
||||
}
|
||||
return printer
|
||||
|
@@ -25,3 +25,31 @@ const (
|
||||
// NetworkReady means the runtime network is up and ready to accept containers which require network.
|
||||
NetworkReady = "NetworkReady"
|
||||
)
|
||||
|
||||
// LogStreamType is the type of the stream in CRI container log.
|
||||
type LogStreamType string
|
||||
|
||||
const (
|
||||
// Stdout is the stream type for stdout.
|
||||
Stdout LogStreamType = "stdout"
|
||||
// Stderr is the stream type for stderr.
|
||||
Stderr LogStreamType = "stderr"
|
||||
)
|
||||
|
||||
// LogTag is the tag of a log line in CRI container log.
|
||||
// Currently defined log tags:
|
||||
// * First tag: Partial/End - P/E.
|
||||
// The field in the container log format can be extended to include multiple
|
||||
// tags by using a delimiter, but changes should be rare. If it becomes clear
|
||||
// that better extensibility is desired, a more extensible format (e.g., json)
|
||||
// should be adopted as a replacement and/or addition.
|
||||
type LogTag string
|
||||
|
||||
const (
|
||||
// LogTagPartial means the line is part of multiple lines.
|
||||
LogTagPartial LogTag = "P"
|
||||
// LogTagFull means the line is a single full line or the end of multiple lines.
|
||||
LogTagFull LogTag = "F"
|
||||
// LogTagDelimiter is the delimiter for different log tags.
|
||||
LogTagDelimiter = ":"
|
||||
)
|
||||
|
@@ -233,7 +233,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
|
||||
|
||||
activePods := podFunc()
|
||||
// make observations and get a function to derive pod usage stats relative to those observations.
|
||||
observations, statsFunc, err := makeSignalObservations(m.summaryProvider, capacityProvider, activePods, *m.dedicatedImageFs)
|
||||
observations, statsFunc, err := makeSignalObservations(m.summaryProvider, capacityProvider, activePods)
|
||||
if err != nil {
|
||||
glog.Errorf("eviction manager: unexpected err: %v", err)
|
||||
return nil
|
||||
|
@@ -1436,297 +1436,3 @@ func TestAllocatableMemoryPressure(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestAllocatableNodeFsPressure
|
||||
func TestAllocatableNodeFsPressure(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=True")
|
||||
enablePodPriority(true)
|
||||
podMaker := makePodWithDiskStats
|
||||
summaryStatsMaker := makeDiskStats
|
||||
|
||||
podsToMake := []podToMake{
|
||||
{name: "low-priority-high-usage", priority: lowPriority, requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "900Mi"},
|
||||
{name: "below-requests", priority: defaultPriority, requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), logsFsUsed: "50Mi"},
|
||||
{name: "above-requests", priority: defaultPriority, requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "2Gi"), rootFsUsed: "1750Mi"},
|
||||
{name: "high-priority-high-usage", priority: highPriority, requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "400Mi"},
|
||||
{name: "low-priority-low-usage", priority: lowPriority, requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "100Mi"},
|
||||
}
|
||||
pods := []*v1.Pod{}
|
||||
podStats := map[*v1.Pod]statsapi.PodStats{}
|
||||
for _, podToMake := range podsToMake {
|
||||
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
|
||||
pods = append(pods, pod)
|
||||
podStats[pod] = podStat
|
||||
}
|
||||
podToEvict := pods[0]
|
||||
activePodsFunc := func() []*v1.Pod {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
capacityProvider := newMockCapacityProvider(newEphemeralStorageResourceList("6Gi", "1000m", "10Gi"), newEphemeralStorageResourceList("1Gi", "1000m", "10Gi"))
|
||||
diskGC := &mockDiskGC{imageBytesFreed: int64(0), err: nil}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
PressureTransitionPeriod: time.Minute * 5,
|
||||
Thresholds: []evictionapi.Threshold{
|
||||
{
|
||||
Signal: evictionapi.SignalAllocatableNodeFsAvailable,
|
||||
Operator: evictionapi.OpLessThan,
|
||||
Value: evictionapi.ThresholdValue{
|
||||
Quantity: quantityMustParse("1Ki"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("3Gi", "6Gi", podStats)}
|
||||
manager := &managerImpl{
|
||||
clock: fakeClock,
|
||||
killPodFunc: podKiller.killPodNow,
|
||||
imageGC: diskGC,
|
||||
containerGC: diskGC,
|
||||
config: config,
|
||||
recorder: &record.FakeRecorder{},
|
||||
summaryProvider: summaryProvider,
|
||||
nodeRef: nodeRef,
|
||||
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
|
||||
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
||||
}
|
||||
|
||||
// create a best effort pod to test admission
|
||||
bestEffortPodToAdmit, _ := podMaker("best-admit", defaultPriority, newEphemeralStorageResourceList("", "", ""), newEphemeralStorageResourceList("", "", ""), "0Gi", "", "")
|
||||
burstablePodToAdmit, _ := podMaker("burst-admit", defaultPriority, newEphemeralStorageResourceList("1Gi", "", ""), newEphemeralStorageResourceList("1Gi", "", ""), "1Gi", "", "")
|
||||
|
||||
// synchronize
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
||||
|
||||
// we should not have disk pressure
|
||||
if manager.IsUnderDiskPressure() {
|
||||
t.Fatalf("Manager should not report disk pressure")
|
||||
}
|
||||
|
||||
// try to admit our pods (they should succeed)
|
||||
expected := []bool{true, true}
|
||||
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
||||
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
||||
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
||||
}
|
||||
}
|
||||
|
||||
// induce disk pressure!
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
pod, podStat := podMaker("guaranteed-high-2", defaultPriority, newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), "2000Mi", "", "")
|
||||
podStats[pod] = podStat
|
||||
pods = append(pods, pod)
|
||||
summaryProvider.result = summaryStatsMaker("6Gi", "6Gi", podStats)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
||||
|
||||
// we should have disk pressure
|
||||
if !manager.IsUnderDiskPressure() {
|
||||
t.Fatalf("Manager should report disk pressure")
|
||||
}
|
||||
|
||||
// check the right pod was killed
|
||||
if podKiller.pod != podToEvict {
|
||||
t.Fatalf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, podToEvict.Name)
|
||||
}
|
||||
observedGracePeriod := *podKiller.gracePeriodOverride
|
||||
if observedGracePeriod != int64(0) {
|
||||
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
|
||||
}
|
||||
// reset state
|
||||
podKiller.pod = nil
|
||||
podKiller.gracePeriodOverride = nil
|
||||
|
||||
// try to admit our pod (should fail)
|
||||
expected = []bool{false, false}
|
||||
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
||||
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
||||
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
||||
}
|
||||
}
|
||||
|
||||
// reduce disk pressure
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
pods[5] = pods[len(pods)-1]
|
||||
pods = pods[:len(pods)-1]
|
||||
|
||||
// we should have disk pressure (because transition period not yet met)
|
||||
if !manager.IsUnderDiskPressure() {
|
||||
t.Fatalf("Manager should report disk pressure")
|
||||
}
|
||||
|
||||
// try to admit our pod (should fail)
|
||||
expected = []bool{false, false}
|
||||
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
||||
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
||||
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
||||
}
|
||||
}
|
||||
|
||||
// move the clock past transition period to ensure that we stop reporting pressure
|
||||
fakeClock.Step(5 * time.Minute)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
||||
|
||||
// we should not have disk pressure (because transition period met)
|
||||
if manager.IsUnderDiskPressure() {
|
||||
t.Fatalf("Manager should not report disk pressure")
|
||||
}
|
||||
|
||||
// no pod should have been killed
|
||||
if podKiller.pod != nil {
|
||||
t.Fatalf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
|
||||
}
|
||||
|
||||
// all pods should admit now
|
||||
expected = []bool{true, true}
|
||||
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
|
||||
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
|
||||
t.Fatalf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeReclaimForAllocatableFuncs(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("LocalStorageCapacityIsolation=True")
|
||||
enablePodPriority(true)
|
||||
podMaker := makePodWithDiskStats
|
||||
summaryStatsMaker := makeDiskStats
|
||||
podsToMake := []podToMake{
|
||||
{name: "low-priority-high-usage", priority: lowPriority, requests: newResourceList("100m", "1Gi"), limits: newResourceList("100m", "1Gi"), rootFsUsed: "900Mi"},
|
||||
{name: "below-requests", priority: defaultPriority, requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "1Gi"), logsFsUsed: "50Mi"},
|
||||
{name: "above-requests", priority: defaultPriority, requests: newResourceList("100m", "100Mi"), limits: newResourceList("200m", "2Gi"), rootFsUsed: "1750Mi"},
|
||||
{name: "high-priority-high-usage", priority: highPriority, requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "400Mi"},
|
||||
{name: "low-priority-low-usage", priority: lowPriority, requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "100Mi"},
|
||||
}
|
||||
pods := []*v1.Pod{}
|
||||
podStats := map[*v1.Pod]statsapi.PodStats{}
|
||||
for _, podToMake := range podsToMake {
|
||||
pod, podStat := podMaker(podToMake.name, podToMake.priority, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
|
||||
pods = append(pods, pod)
|
||||
podStats[pod] = podStat
|
||||
}
|
||||
podToEvict := pods[0]
|
||||
activePodsFunc := func() []*v1.Pod {
|
||||
return pods
|
||||
}
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
podKiller := &mockPodKiller{}
|
||||
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
|
||||
capacityProvider := newMockCapacityProvider(newEphemeralStorageResourceList("6Gi", "1000m", "10Gi"), newEphemeralStorageResourceList("1Gi", "1000m", "10Gi"))
|
||||
imageGcFree := resource.MustParse("800Mi")
|
||||
diskGC := &mockDiskGC{imageBytesFreed: imageGcFree.Value(), err: nil}
|
||||
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
|
||||
|
||||
config := Config{
|
||||
MaxPodGracePeriodSeconds: 5,
|
||||
PressureTransitionPeriod: time.Minute * 5,
|
||||
Thresholds: []evictionapi.Threshold{
|
||||
{
|
||||
Signal: evictionapi.SignalAllocatableNodeFsAvailable,
|
||||
Operator: evictionapi.OpLessThan,
|
||||
Value: evictionapi.ThresholdValue{
|
||||
Quantity: quantityMustParse("10Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
summaryProvider := &fakeSummaryProvider{result: summaryStatsMaker("6Gi", "6Gi", podStats)}
|
||||
manager := &managerImpl{
|
||||
clock: fakeClock,
|
||||
killPodFunc: podKiller.killPodNow,
|
||||
imageGC: diskGC,
|
||||
containerGC: diskGC,
|
||||
config: config,
|
||||
recorder: &record.FakeRecorder{},
|
||||
summaryProvider: summaryProvider,
|
||||
nodeRef: nodeRef,
|
||||
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
|
||||
thresholdsFirstObservedAt: thresholdsObservedAt{},
|
||||
}
|
||||
|
||||
// synchronize
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
||||
|
||||
// we should not have disk pressure
|
||||
if manager.IsUnderDiskPressure() {
|
||||
t.Errorf("Manager should not report disk pressure")
|
||||
}
|
||||
|
||||
// induce hard threshold
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
|
||||
pod, podStat := podMaker("guaranteed-high-2", defaultPriority, newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), newEphemeralStorageResourceList("2000Mi", "100m", "1Gi"), "2000Mi", "", "")
|
||||
podStats[pod] = podStat
|
||||
pods = append(pods, pod)
|
||||
summaryProvider.result = summaryStatsMaker("6Gi", "6Gi", podStats)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
||||
|
||||
// we should have disk pressure
|
||||
if !manager.IsUnderDiskPressure() {
|
||||
t.Fatalf("Manager should report disk pressure since soft threshold was met")
|
||||
}
|
||||
|
||||
// verify image gc was invoked
|
||||
if !diskGC.imageGCInvoked || !diskGC.containerGCInvoked {
|
||||
t.Fatalf("Manager should have invoked image gc")
|
||||
}
|
||||
|
||||
// verify no pod was killed because image gc was sufficient
|
||||
if podKiller.pod == nil {
|
||||
t.Fatalf("Manager should have killed a pod, but not killed")
|
||||
}
|
||||
// check the right pod was killed
|
||||
if podKiller.pod != podToEvict {
|
||||
t.Fatalf("Manager chose to kill pod: %v, but should have chosen %v", podKiller.pod, podToEvict.Name)
|
||||
}
|
||||
observedGracePeriod := *podKiller.gracePeriodOverride
|
||||
if observedGracePeriod != int64(0) {
|
||||
t.Fatalf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod)
|
||||
}
|
||||
|
||||
// reset state
|
||||
diskGC.imageGCInvoked = false
|
||||
diskGC.containerGCInvoked = false
|
||||
podKiller.pod = nil
|
||||
podKiller.gracePeriodOverride = nil
|
||||
|
||||
// reduce disk pressure
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
pods[5] = pods[len(pods)-1]
|
||||
pods = pods[:len(pods)-1]
|
||||
|
||||
// we should have disk pressure (because transition period not yet met)
|
||||
if !manager.IsUnderDiskPressure() {
|
||||
t.Fatalf("Manager should report disk pressure")
|
||||
}
|
||||
|
||||
// move the clock past transition period to ensure that we stop reporting pressure
|
||||
fakeClock.Step(5 * time.Minute)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc, capacityProvider)
|
||||
|
||||
// we should not have disk pressure (because transition period met)
|
||||
if manager.IsUnderDiskPressure() {
|
||||
t.Fatalf("Manager should not report disk pressure")
|
||||
}
|
||||
|
||||
// no pod should have been killed
|
||||
if podKiller.pod != nil {
|
||||
t.Fatalf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
|
||||
}
|
||||
|
||||
// no image gc should have occurred
|
||||
if diskGC.imageGCInvoked || diskGC.containerGCInvoked {
|
||||
t.Errorf("Manager chose to perform image gc when it was not neeed")
|
||||
}
|
||||
|
||||
// no pod should have been killed
|
||||
if podKiller.pod != nil {
|
||||
t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name)
|
||||
}
|
||||
}
|
||||
|
@@ -73,13 +73,11 @@ func init() {
|
||||
signalToNodeCondition[evictionapi.SignalNodeFsAvailable] = v1.NodeDiskPressure
|
||||
signalToNodeCondition[evictionapi.SignalImageFsInodesFree] = v1.NodeDiskPressure
|
||||
signalToNodeCondition[evictionapi.SignalNodeFsInodesFree] = v1.NodeDiskPressure
|
||||
signalToNodeCondition[evictionapi.SignalAllocatableNodeFsAvailable] = v1.NodeDiskPressure
|
||||
|
||||
// map signals to resources (and vice-versa)
|
||||
signalToResource = map[evictionapi.Signal]v1.ResourceName{}
|
||||
signalToResource[evictionapi.SignalMemoryAvailable] = v1.ResourceMemory
|
||||
signalToResource[evictionapi.SignalAllocatableMemoryAvailable] = v1.ResourceMemory
|
||||
signalToResource[evictionapi.SignalAllocatableNodeFsAvailable] = resourceNodeFs
|
||||
signalToResource[evictionapi.SignalImageFsAvailable] = resourceImageFs
|
||||
signalToResource[evictionapi.SignalImageFsInodesFree] = resourceImageFsInodes
|
||||
signalToResource[evictionapi.SignalNodeFsAvailable] = resourceNodeFs
|
||||
@@ -212,16 +210,6 @@ func getAllocatableThreshold(allocatableConfig []string) []evictionapi.Threshold
|
||||
Quantity: resource.NewQuantity(int64(0), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Signal: evictionapi.SignalAllocatableNodeFsAvailable,
|
||||
Operator: evictionapi.OpLessThan,
|
||||
Value: evictionapi.ThresholdValue{
|
||||
Quantity: resource.NewQuantity(int64(0), resource.BinarySI),
|
||||
},
|
||||
MinReclaim: &evictionapi.ThresholdValue{
|
||||
Quantity: resource.NewQuantity(int64(0), resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -704,7 +692,7 @@ func (a byEvictionPriority) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
// makeSignalObservations derives observations using the specified summary provider.
|
||||
func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvider CapacityProvider, pods []*v1.Pod, withImageFs bool) (signalObservations, statsFunc, error) {
|
||||
func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvider CapacityProvider, pods []*v1.Pod) (signalObservations, statsFunc, error) {
|
||||
summary, err := summaryProvider.Get()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -756,11 +744,11 @@ func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvi
|
||||
}
|
||||
}
|
||||
|
||||
nodeCapacity := capacityProvider.GetCapacity()
|
||||
allocatableReservation := capacityProvider.GetNodeAllocatableReservation()
|
||||
|
||||
memoryAllocatableCapacity, memoryAllocatableAvailable, exist := getResourceAllocatable(nodeCapacity, allocatableReservation, v1.ResourceMemory)
|
||||
if exist {
|
||||
if memoryAllocatableCapacity, ok := capacityProvider.GetCapacity()[v1.ResourceMemory]; ok {
|
||||
memoryAllocatableAvailable := memoryAllocatableCapacity.Copy()
|
||||
if reserved, exists := capacityProvider.GetNodeAllocatableReservation()[v1.ResourceMemory]; exists {
|
||||
memoryAllocatableAvailable.Sub(reserved)
|
||||
}
|
||||
for _, pod := range summary.Pods {
|
||||
mu, err := podMemoryUsage(pod)
|
||||
if err == nil {
|
||||
@@ -769,55 +757,15 @@ func makeSignalObservations(summaryProvider stats.SummaryProvider, capacityProvi
|
||||
}
|
||||
result[evictionapi.SignalAllocatableMemoryAvailable] = signalObservation{
|
||||
available: memoryAllocatableAvailable,
|
||||
capacity: memoryAllocatableCapacity,
|
||||
}
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||
ephemeralStorageCapacity, ephemeralStorageAllocatable, exist := getResourceAllocatable(nodeCapacity, allocatableReservation, v1.ResourceEphemeralStorage)
|
||||
if exist {
|
||||
for _, pod := range pods {
|
||||
podStat, ok := statsFunc(pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
fsStatsSet := []fsStatsType{}
|
||||
if withImageFs {
|
||||
fsStatsSet = []fsStatsType{fsStatsLogs, fsStatsLocalVolumeSource}
|
||||
} else {
|
||||
fsStatsSet = []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}
|
||||
}
|
||||
|
||||
usage, err := podDiskUsage(podStat, pod, fsStatsSet)
|
||||
if err != nil {
|
||||
glog.Warningf("eviction manager: error getting pod disk usage %v", err)
|
||||
continue
|
||||
}
|
||||
ephemeralStorageAllocatable.Sub(usage[resourceDisk])
|
||||
}
|
||||
result[evictionapi.SignalAllocatableNodeFsAvailable] = signalObservation{
|
||||
available: ephemeralStorageAllocatable,
|
||||
capacity: ephemeralStorageCapacity,
|
||||
}
|
||||
capacity: &memoryAllocatableCapacity,
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Could not find capacity information for resource %v", v1.ResourceMemory)
|
||||
}
|
||||
|
||||
return result, statsFunc, nil
|
||||
}
|
||||
|
||||
func getResourceAllocatable(capacity v1.ResourceList, reservation v1.ResourceList, resourceName v1.ResourceName) (*resource.Quantity, *resource.Quantity, bool) {
|
||||
if capacity, ok := capacity[resourceName]; ok {
|
||||
allocate := capacity.Copy()
|
||||
if reserved, exists := reservation[resourceName]; exists {
|
||||
allocate.Sub(reserved)
|
||||
}
|
||||
return capacity.Copy(), allocate, true
|
||||
}
|
||||
glog.Errorf("Could not find capacity information for resource %v", resourceName)
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// thresholdsMet returns the set of thresholds that were met independent of grace period
|
||||
func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObservations, enforceMinReclaim bool) []evictionapi.Threshold {
|
||||
results := []evictionapi.Threshold{}
|
||||
|
@@ -78,16 +78,6 @@ func TestParseThresholdConfig(t *testing.T) {
|
||||
Quantity: quantityMustParse("0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Signal: evictionapi.SignalAllocatableNodeFsAvailable,
|
||||
Operator: evictionapi.OpLessThan,
|
||||
Value: evictionapi.ThresholdValue{
|
||||
Quantity: quantityMustParse("0"),
|
||||
},
|
||||
MinReclaim: &evictionapi.ThresholdValue{
|
||||
Quantity: quantityMustParse("0"),
|
||||
},
|
||||
},
|
||||
{
|
||||
Signal: evictionapi.SignalMemoryAvailable,
|
||||
Operator: evictionapi.OpLessThan,
|
||||
@@ -793,7 +783,7 @@ func TestMakeSignalObservations(t *testing.T) {
|
||||
if res.CmpInt64(int64(allocatableMemoryCapacity)) != 0 {
|
||||
t.Errorf("Expected Threshold %v to be equal to value %v", res.Value(), allocatableMemoryCapacity)
|
||||
}
|
||||
actualObservations, statsFunc, err := makeSignalObservations(provider, capacityProvider, pods, false)
|
||||
actualObservations, statsFunc, err := makeSignalObservations(provider, capacityProvider, pods)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected err: %v", err)
|
||||
}
|
||||
|
@@ -22,6 +22,7 @@ go_test(
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@@ -45,13 +45,7 @@ import (
|
||||
// * If the rotation is using copytruncate, we'll be reading at the original position and get nothing.
|
||||
// TODO(random-liu): Support log rotation.
|
||||
|
||||
// streamType is the type of the stream.
|
||||
type streamType string
|
||||
|
||||
const (
|
||||
stderrType streamType = "stderr"
|
||||
stdoutType streamType = "stdout"
|
||||
|
||||
// timeFormat is the time format used in the log.
|
||||
timeFormat = time.RFC3339Nano
|
||||
// blockSize is the block size used in tail.
|
||||
@@ -66,14 +60,16 @@ const (
|
||||
var (
|
||||
// eol is the end-of-line sign in the log.
|
||||
eol = []byte{'\n'}
|
||||
// delimiter is the delimiter for timestamp and streamtype in log line.
|
||||
// delimiter is the delimiter for timestamp and stream type in log line.
|
||||
delimiter = []byte{' '}
|
||||
// tagDelimiter is the delimiter for log tags.
|
||||
tagDelimiter = []byte(runtimeapi.LogTagDelimiter)
|
||||
)
|
||||
|
||||
// logMessage is the CRI internal log type.
|
||||
type logMessage struct {
|
||||
timestamp time.Time
|
||||
stream streamType
|
||||
stream runtimeapi.LogStreamType
|
||||
log []byte
|
||||
}
|
||||
|
||||
@@ -126,8 +122,8 @@ var parseFuncs = []parseFunc{
|
||||
}
|
||||
|
||||
// parseCRILog parses logs in CRI log format. CRI Log format example:
|
||||
// 2016-10-06T00:17:09.669794202Z stdout log content 1
|
||||
// 2016-10-06T00:17:09.669794203Z stderr log content 2
|
||||
// 2016-10-06T00:17:09.669794202Z stdout P log content 1
|
||||
// 2016-10-06T00:17:09.669794203Z stderr F log content 2
|
||||
func parseCRILog(log []byte, msg *logMessage) error {
|
||||
var err error
|
||||
// Parse timestamp
|
||||
@@ -146,11 +142,25 @@ func parseCRILog(log []byte, msg *logMessage) error {
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("stream type is not found")
|
||||
}
|
||||
msg.stream = streamType(log[:idx])
|
||||
if msg.stream != stdoutType && msg.stream != stderrType {
|
||||
msg.stream = runtimeapi.LogStreamType(log[:idx])
|
||||
if msg.stream != runtimeapi.Stdout && msg.stream != runtimeapi.Stderr {
|
||||
return fmt.Errorf("unexpected stream type %q", msg.stream)
|
||||
}
|
||||
|
||||
// Parse log tag
|
||||
log = log[idx+1:]
|
||||
idx = bytes.Index(log, delimiter)
|
||||
if idx < 0 {
|
||||
return fmt.Errorf("log tag is not found")
|
||||
}
|
||||
// Keep this forward compatible.
|
||||
tags := bytes.Split(log[:idx], tagDelimiter)
|
||||
partial := (runtimeapi.LogTag(tags[0]) == runtimeapi.LogTagPartial)
|
||||
// Trim the tailing new line if this is a partial line.
|
||||
if partial && len(log) > 0 && log[len(log)-1] == '\n' {
|
||||
log = log[:len(log)-1]
|
||||
}
|
||||
|
||||
// Get log content
|
||||
msg.log = log[idx+1:]
|
||||
|
||||
@@ -170,7 +180,7 @@ func parseDockerJSONLog(log []byte, msg *logMessage) error {
|
||||
return fmt.Errorf("failed with %v to unmarshal log %q", err, l)
|
||||
}
|
||||
msg.timestamp = l.Created
|
||||
msg.stream = streamType(l.Stream)
|
||||
msg.stream = runtimeapi.LogStreamType(l.Stream)
|
||||
msg.log = []byte(l.Log)
|
||||
return nil
|
||||
}
|
||||
@@ -230,9 +240,9 @@ func (w *logWriter) write(msg *logMessage) error {
|
||||
// Get the proper stream to write to.
|
||||
var stream io.Writer
|
||||
switch msg.stream {
|
||||
case stdoutType:
|
||||
case runtimeapi.Stdout:
|
||||
stream = w.stdout
|
||||
case stderrType:
|
||||
case runtimeapi.Stderr:
|
||||
stream = w.stderr
|
||||
default:
|
||||
return fmt.Errorf("unexpected stream type %q", msg.stream)
|
||||
@@ -277,63 +287,47 @@ func ReadLogs(path, containerID string, opts *LogOptions, runtimeService interna
|
||||
// Do not create watcher here because it is not needed if `Follow` is false.
|
||||
var watcher *fsnotify.Watcher
|
||||
var parse parseFunc
|
||||
var stop bool
|
||||
writer := newLogWriter(stdout, stderr, opts)
|
||||
msg := &logMessage{}
|
||||
for {
|
||||
if stop {
|
||||
glog.V(2).Infof("Finish parsing log file %q", path)
|
||||
return nil
|
||||
}
|
||||
l, err := r.ReadBytes(eol[0])
|
||||
if err != nil {
|
||||
if err != io.EOF { // This is an real error
|
||||
return fmt.Errorf("failed to read log file %q: %v", path, err)
|
||||
}
|
||||
if !opts.follow {
|
||||
// Return directly when reading to the end if not follow.
|
||||
if len(l) > 0 {
|
||||
glog.Warningf("Incomplete line in log file %q: %q", path, l)
|
||||
if parse == nil {
|
||||
// Intialize the log parsing function.
|
||||
parse, err = getParseFunc(l)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get parse function: %v", err)
|
||||
}
|
||||
if opts.follow {
|
||||
// Reset seek so that if this is an incomplete line,
|
||||
// it will be read again.
|
||||
if _, err := f.Seek(-int64(len(l)), os.SEEK_CUR); err != nil {
|
||||
return fmt.Errorf("failed to reset seek in log file %q: %v", path, err)
|
||||
}
|
||||
if watcher == nil {
|
||||
// Intialize the watcher if it has not been initialized yet.
|
||||
if watcher, err = fsnotify.NewWatcher(); err != nil {
|
||||
return fmt.Errorf("failed to create fsnotify watcher: %v", err)
|
||||
}
|
||||
// Log a warning and exit if we can't parse the partial line.
|
||||
if err := parse(l, msg); err != nil {
|
||||
glog.Warningf("Failed with err %v when parsing partial line for log file %q: %q", err, path, l)
|
||||
return nil
|
||||
}
|
||||
// Write the log line into the stream.
|
||||
if err := writer.write(msg); err != nil {
|
||||
if err == errMaximumWrite {
|
||||
glog.V(2).Infof("Finish parsing log file %q, hit bytes limit %d(bytes)", path, opts.bytes)
|
||||
return nil
|
||||
}
|
||||
glog.Errorf("Failed with err %v when writing partial log for log file %q: %+v", err, path, msg)
|
||||
return err
|
||||
defer watcher.Close()
|
||||
if err := watcher.Add(f.Name()); err != nil {
|
||||
return fmt.Errorf("failed to watch file %q: %v", f.Name(), err)
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("Finish parsing log file %q", path)
|
||||
return nil
|
||||
}
|
||||
// Reset seek so that if this is an incomplete line,
|
||||
// it will be read again.
|
||||
if _, err := f.Seek(-int64(len(l)), os.SEEK_CUR); err != nil {
|
||||
return fmt.Errorf("failed to reset seek in log file %q: %v", path, err)
|
||||
}
|
||||
if watcher == nil {
|
||||
// Intialize the watcher if it has not been initialized yet.
|
||||
if watcher, err = fsnotify.NewWatcher(); err != nil {
|
||||
return fmt.Errorf("failed to create fsnotify watcher: %v", err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
if err := watcher.Add(f.Name()); err != nil {
|
||||
return fmt.Errorf("failed to watch file %q: %v", f.Name(), err)
|
||||
// Wait until the next log change.
|
||||
if found, err := waitLogs(containerID, watcher, runtimeService); !found {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Wait until the next log change.
|
||||
if found, err := waitLogs(containerID, watcher, runtimeService); !found {
|
||||
return err
|
||||
// Should stop after writing the remaining content.
|
||||
stop = true
|
||||
if len(l) == 0 {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
glog.Warningf("Incomplete line in log file %q: %q", path, l)
|
||||
}
|
||||
if parse == nil {
|
||||
// Intialize the log parsing function.
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
func TestLogOptions(t *testing.T) {
|
||||
@@ -78,7 +79,7 @@ func TestParseLog(t *testing.T) {
|
||||
line: `{"log":"docker stdout test log","stream":"stdout","time":"2016-10-20T18:39:20.57606443Z"}` + "\n",
|
||||
msg: &logMessage{
|
||||
timestamp: timestamp,
|
||||
stream: stdoutType,
|
||||
stream: runtimeapi.Stdout,
|
||||
log: []byte("docker stdout test log"),
|
||||
},
|
||||
},
|
||||
@@ -86,23 +87,23 @@ func TestParseLog(t *testing.T) {
|
||||
line: `{"log":"docker stderr test log","stream":"stderr","time":"2016-10-20T18:39:20.57606443Z"}` + "\n",
|
||||
msg: &logMessage{
|
||||
timestamp: timestamp,
|
||||
stream: stderrType,
|
||||
stream: runtimeapi.Stderr,
|
||||
log: []byte("docker stderr test log"),
|
||||
},
|
||||
},
|
||||
{ // CRI log format stdout
|
||||
line: "2016-10-20T18:39:20.57606443Z stdout cri stdout test log\n",
|
||||
line: "2016-10-20T18:39:20.57606443Z stdout F cri stdout test log\n",
|
||||
msg: &logMessage{
|
||||
timestamp: timestamp,
|
||||
stream: stdoutType,
|
||||
stream: runtimeapi.Stdout,
|
||||
log: []byte("cri stdout test log\n"),
|
||||
},
|
||||
},
|
||||
{ // CRI log format stderr
|
||||
line: "2016-10-20T18:39:20.57606443Z stderr cri stderr test log\n",
|
||||
line: "2016-10-20T18:39:20.57606443Z stderr F cri stderr test log\n",
|
||||
msg: &logMessage{
|
||||
timestamp: timestamp,
|
||||
stream: stderrType,
|
||||
stream: runtimeapi.Stderr,
|
||||
log: []byte("cri stderr test log\n"),
|
||||
},
|
||||
},
|
||||
@@ -111,6 +112,22 @@ func TestParseLog(t *testing.T) {
|
||||
msg: &logMessage{},
|
||||
err: true,
|
||||
},
|
||||
{ // Partial CRI log line
|
||||
line: "2016-10-20T18:39:20.57606443Z stdout P cri stdout partial test log\n",
|
||||
msg: &logMessage{
|
||||
timestamp: timestamp,
|
||||
stream: runtimeapi.Stdout,
|
||||
log: []byte("cri stdout partial test log"),
|
||||
},
|
||||
},
|
||||
{ // Partial CRI log line with multiple log tags.
|
||||
line: "2016-10-20T18:39:20.57606443Z stdout P:TAG1:TAG2 cri stdout partial test log\n",
|
||||
msg: &logMessage{
|
||||
timestamp: timestamp,
|
||||
stream: runtimeapi.Stdout,
|
||||
log: []byte("cri stdout partial test log"),
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Logf("TestCase #%d: %+v", c, test)
|
||||
parse, err := getParseFunc([]byte(test.line))
|
||||
@@ -130,26 +147,26 @@ func TestWriteLogs(t *testing.T) {
|
||||
log := "abcdefg\n"
|
||||
|
||||
for c, test := range []struct {
|
||||
stream streamType
|
||||
stream runtimeapi.LogStreamType
|
||||
since time.Time
|
||||
timestamp bool
|
||||
expectStdout string
|
||||
expectStderr string
|
||||
}{
|
||||
{ // stderr log
|
||||
stream: stderrType,
|
||||
stream: runtimeapi.Stderr,
|
||||
expectStderr: log,
|
||||
},
|
||||
{ // stdout log
|
||||
stream: stdoutType,
|
||||
stream: runtimeapi.Stdout,
|
||||
expectStdout: log,
|
||||
},
|
||||
{ // since is after timestamp
|
||||
stream: stdoutType,
|
||||
stream: runtimeapi.Stdout,
|
||||
since: timestamp.Add(1 * time.Second),
|
||||
},
|
||||
{ // timestamp enabled
|
||||
stream: stderrType,
|
||||
stream: runtimeapi.Stderr,
|
||||
timestamp: true,
|
||||
expectStderr: timestamp.Format(timeFormat) + " " + log,
|
||||
},
|
||||
@@ -226,13 +243,13 @@ func TestWriteLogsWithBytesLimit(t *testing.T) {
|
||||
stderrBuf := bytes.NewBuffer(nil)
|
||||
w := newLogWriter(stdoutBuf, stderrBuf, &LogOptions{timestamp: test.timestamp, bytes: int64(test.bytes)})
|
||||
for i := 0; i < test.stdoutLines; i++ {
|
||||
msg.stream = stdoutType
|
||||
msg.stream = runtimeapi.Stdout
|
||||
if err := w.write(msg); err != nil {
|
||||
assert.EqualError(t, err, errMaximumWrite.Error())
|
||||
}
|
||||
}
|
||||
for i := 0; i < test.stderrLines; i++ {
|
||||
msg.stream = stderrType
|
||||
msg.stream = runtimeapi.Stderr
|
||||
if err := w.write(msg); err != nil {
|
||||
assert.EqualError(t, err, errMaximumWrite.Error())
|
||||
}
|
||||
|
@@ -57,6 +57,10 @@ func (fn ResourcePrinterFunc) IsGeneric() bool {
|
||||
}
|
||||
|
||||
type PrintOptions struct {
|
||||
// supported Format types can be found in pkg/printers/printers.go
|
||||
OutputFormatType string
|
||||
OutputFormatArgument string
|
||||
|
||||
NoHeaders bool
|
||||
WithNamespace bool
|
||||
WithKind bool
|
||||
@@ -66,6 +70,11 @@ type PrintOptions struct {
|
||||
AbsoluteTimestamps bool
|
||||
Kind string
|
||||
ColumnLabels []string
|
||||
|
||||
SortBy string
|
||||
|
||||
// indicates if it is OK to ignore missing keys for rendering an output template.
|
||||
AllowMissingKeys bool
|
||||
}
|
||||
|
||||
// Describer generates output for the named resource or an error
|
||||
@@ -100,13 +109,3 @@ type ErrNoDescriber struct {
|
||||
func (e ErrNoDescriber) Error() string {
|
||||
return fmt.Sprintf("no describer has been defined for %v", e.Types)
|
||||
}
|
||||
|
||||
// OutputOptions represents resource output options which is used to generate a resource printer.
|
||||
type OutputOptions struct {
|
||||
// supported Format types can be found in pkg/printers/printers.go
|
||||
FmtType string
|
||||
FmtArg string
|
||||
|
||||
// indicates if it is OK to ignore missing keys for rendering an output template.
|
||||
AllowMissingKeys bool
|
||||
}
|
||||
|
@@ -96,7 +96,7 @@ func TestPrintDefault(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range printerTests {
|
||||
printer, err := printers.GetStandardPrinter(&printers.OutputOptions{AllowMissingKeys: false}, false, nil, nil, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, printers.PrintOptions{})
|
||||
printer, err := printers.GetStandardPrinter(nil, nil, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, printers.PrintOptions{AllowMissingKeys: false})
|
||||
if err != nil {
|
||||
t.Errorf("in %s, unexpected error: %#v", test.Name, err)
|
||||
}
|
||||
@@ -254,24 +254,24 @@ func TestPrinter(t *testing.T) {
|
||||
|
||||
printerTests := []struct {
|
||||
Name string
|
||||
OutputOpts *printers.OutputOptions
|
||||
PrintOpts *printers.PrintOptions
|
||||
Input runtime.Object
|
||||
OutputVersions []schema.GroupVersion
|
||||
Expect string
|
||||
}{
|
||||
{"test json", &printers.OutputOptions{FmtType: "json", AllowMissingKeys: true}, simpleTest, nil, "{\n \"Data\": \"foo\"\n}\n"},
|
||||
{"test yaml", &printers.OutputOptions{FmtType: "yaml", AllowMissingKeys: true}, simpleTest, nil, "Data: foo\n"},
|
||||
{"test template", &printers.OutputOptions{FmtType: "template", FmtArg: "{{if .id}}{{.id}}{{end}}{{if .metadata.name}}{{.metadata.name}}{{end}}", AllowMissingKeys: true},
|
||||
{"test json", &printers.PrintOptions{OutputFormatType: "json", AllowMissingKeys: true}, simpleTest, nil, "{\n \"Data\": \"foo\"\n}\n"},
|
||||
{"test yaml", &printers.PrintOptions{OutputFormatType: "yaml", AllowMissingKeys: true}, simpleTest, nil, "Data: foo\n"},
|
||||
{"test template", &printers.PrintOptions{OutputFormatType: "template", OutputFormatArgument: "{{if .id}}{{.id}}{{end}}{{if .metadata.name}}{{.metadata.name}}{{end}}", AllowMissingKeys: true},
|
||||
podTest, []schema.GroupVersion{v1.SchemeGroupVersion}, "foo"},
|
||||
{"test jsonpath", &printers.OutputOptions{FmtType: "jsonpath", FmtArg: "{.metadata.name}", AllowMissingKeys: true}, podTest, []schema.GroupVersion{v1.SchemeGroupVersion}, "foo"},
|
||||
{"test jsonpath list", &printers.OutputOptions{FmtType: "jsonpath", FmtArg: "{.items[*].metadata.name}", AllowMissingKeys: true}, podListTest, []schema.GroupVersion{v1.SchemeGroupVersion}, "foo bar"},
|
||||
{"test jsonpath empty list", &printers.OutputOptions{FmtType: "jsonpath", FmtArg: "{.items[*].metadata.name}", AllowMissingKeys: true}, emptyListTest, []schema.GroupVersion{v1.SchemeGroupVersion}, ""},
|
||||
{"test name", &printers.OutputOptions{FmtType: "name", AllowMissingKeys: true}, podTest, []schema.GroupVersion{v1.SchemeGroupVersion}, "pods/foo\n"},
|
||||
{"emits versioned objects", &printers.OutputOptions{FmtType: "template", FmtArg: "{{.kind}}", AllowMissingKeys: true}, testapi, []schema.GroupVersion{v1.SchemeGroupVersion}, "Pod"},
|
||||
{"test jsonpath", &printers.PrintOptions{OutputFormatType: "jsonpath", OutputFormatArgument: "{.metadata.name}", AllowMissingKeys: true}, podTest, []schema.GroupVersion{v1.SchemeGroupVersion}, "foo"},
|
||||
{"test jsonpath list", &printers.PrintOptions{OutputFormatType: "jsonpath", OutputFormatArgument: "{.items[*].metadata.name}", AllowMissingKeys: true}, podListTest, []schema.GroupVersion{v1.SchemeGroupVersion}, "foo bar"},
|
||||
{"test jsonpath empty list", &printers.PrintOptions{OutputFormatType: "jsonpath", OutputFormatArgument: "{.items[*].metadata.name}", AllowMissingKeys: true}, emptyListTest, []schema.GroupVersion{v1.SchemeGroupVersion}, ""},
|
||||
{"test name", &printers.PrintOptions{OutputFormatType: "name", AllowMissingKeys: true}, podTest, []schema.GroupVersion{v1.SchemeGroupVersion}, "pods/foo\n"},
|
||||
{"emits versioned objects", &printers.PrintOptions{OutputFormatType: "template", OutputFormatArgument: "{{.kind}}", AllowMissingKeys: true}, testapi, []schema.GroupVersion{v1.SchemeGroupVersion}, "Pod"},
|
||||
}
|
||||
for _, test := range printerTests {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
printer, err := printers.GetStandardPrinter(test.OutputOpts, false, legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, printers.PrintOptions{})
|
||||
printer, err := printers.GetStandardPrinter(legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, *test.PrintOpts)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, unexpected error: %#v", test.Name, err)
|
||||
}
|
||||
@@ -290,18 +290,18 @@ func TestPrinter(t *testing.T) {
|
||||
|
||||
func TestBadPrinter(t *testing.T) {
|
||||
badPrinterTests := []struct {
|
||||
Name string
|
||||
OutputOpts *printers.OutputOptions
|
||||
Error error
|
||||
Name string
|
||||
PrintOpts *printers.PrintOptions
|
||||
Error error
|
||||
}{
|
||||
{"empty template", &printers.OutputOptions{FmtType: "template", AllowMissingKeys: false}, fmt.Errorf("template format specified but no template given")},
|
||||
{"bad template", &printers.OutputOptions{FmtType: "template", FmtArg: "{{ .Name", AllowMissingKeys: false}, fmt.Errorf("error parsing template {{ .Name, template: output:1: unclosed action\n")},
|
||||
{"bad templatefile", &printers.OutputOptions{FmtType: "templatefile", AllowMissingKeys: false}, fmt.Errorf("templatefile format specified but no template file given")},
|
||||
{"bad jsonpath", &printers.OutputOptions{FmtType: "jsonpath", FmtArg: "{.Name", AllowMissingKeys: false}, fmt.Errorf("error parsing jsonpath {.Name, unclosed action\n")},
|
||||
{"unknown format", &printers.OutputOptions{FmtType: "anUnknownFormat", FmtArg: "", AllowMissingKeys: false}, fmt.Errorf("output format \"anUnknownFormat\" not recognized")},
|
||||
{"empty template", &printers.PrintOptions{OutputFormatType: "template", AllowMissingKeys: false}, fmt.Errorf("template format specified but no template given")},
|
||||
{"bad template", &printers.PrintOptions{OutputFormatType: "template", OutputFormatArgument: "{{ .Name", AllowMissingKeys: false}, fmt.Errorf("error parsing template {{ .Name, template: output:1: unclosed action\n")},
|
||||
{"bad templatefile", &printers.PrintOptions{OutputFormatType: "templatefile", AllowMissingKeys: false}, fmt.Errorf("templatefile format specified but no template file given")},
|
||||
{"bad jsonpath", &printers.PrintOptions{OutputFormatType: "jsonpath", OutputFormatArgument: "{.Name", AllowMissingKeys: false}, fmt.Errorf("error parsing jsonpath {.Name, unclosed action\n")},
|
||||
{"unknown format", &printers.PrintOptions{OutputFormatType: "anUnknownFormat", OutputFormatArgument: "", AllowMissingKeys: false}, fmt.Errorf("output format \"anUnknownFormat\" not recognized")},
|
||||
}
|
||||
for _, test := range badPrinterTests {
|
||||
_, err := printers.GetStandardPrinter(test.OutputOpts, false, legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, printers.PrintOptions{})
|
||||
_, err := printers.GetStandardPrinter(legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, *test.PrintOpts)
|
||||
if err == nil || err.Error() != test.Error.Error() {
|
||||
t.Errorf("in %s, expect %s, got %s", test.Name, test.Error, err)
|
||||
}
|
||||
@@ -494,8 +494,8 @@ func TestNamePrinter(t *testing.T) {
|
||||
},
|
||||
"pods/foo\npods/bar\n"},
|
||||
}
|
||||
outputOpts := &printers.OutputOptions{FmtType: "name", AllowMissingKeys: false}
|
||||
printer, _ := printers.GetStandardPrinter(outputOpts, false, legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, printers.PrintOptions{})
|
||||
printOpts := &printers.PrintOptions{OutputFormatType: "name", AllowMissingKeys: false}
|
||||
printer, _ := printers.GetStandardPrinter(legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, *printOpts)
|
||||
for name, item := range tests {
|
||||
buff := &bytes.Buffer{}
|
||||
err := printer.PrintObj(item.obj, buff)
|
||||
@@ -2793,20 +2793,20 @@ func TestPrintPodDisruptionBudget(t *testing.T) {
|
||||
|
||||
func TestAllowMissingKeys(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
OutputOpts *printers.OutputOptions
|
||||
Input runtime.Object
|
||||
Expect string
|
||||
Error string
|
||||
Name string
|
||||
PrintOpts *printers.PrintOptions
|
||||
Input runtime.Object
|
||||
Expect string
|
||||
Error string
|
||||
}{
|
||||
{"test template, allow missing keys", &printers.OutputOptions{FmtType: "template", FmtArg: "{{.blarg}}", AllowMissingKeys: true}, &api.Pod{}, "<no value>", ""},
|
||||
{"test template, strict", &printers.OutputOptions{FmtType: "template", FmtArg: "{{.blarg}}", AllowMissingKeys: false}, &api.Pod{}, "", `error executing template "{{.blarg}}": template: output:1:2: executing "output" at <.blarg>: map has no entry for key "blarg"`},
|
||||
{"test jsonpath, allow missing keys", &printers.OutputOptions{FmtType: "jsonpath", FmtArg: "{.blarg}", AllowMissingKeys: true}, &api.Pod{}, "", ""},
|
||||
{"test jsonpath, strict", &printers.OutputOptions{FmtType: "jsonpath", FmtArg: "{.blarg}", AllowMissingKeys: false}, &api.Pod{}, "", "error executing jsonpath \"{.blarg}\": blarg is not found\n"},
|
||||
{"test template, allow missing keys", &printers.PrintOptions{OutputFormatType: "template", OutputFormatArgument: "{{.blarg}}", AllowMissingKeys: true}, &api.Pod{}, "<no value>", ""},
|
||||
{"test template, strict", &printers.PrintOptions{OutputFormatType: "template", OutputFormatArgument: "{{.blarg}}", AllowMissingKeys: false}, &api.Pod{}, "", `error executing template "{{.blarg}}": template: output:1:2: executing "output" at <.blarg>: map has no entry for key "blarg"`},
|
||||
{"test jsonpath, allow missing keys", &printers.PrintOptions{OutputFormatType: "jsonpath", OutputFormatArgument: "{.blarg}", AllowMissingKeys: true}, &api.Pod{}, "", ""},
|
||||
{"test jsonpath, strict", &printers.PrintOptions{OutputFormatType: "jsonpath", OutputFormatArgument: "{.blarg}", AllowMissingKeys: false}, &api.Pod{}, "", "error executing jsonpath \"{.blarg}\": blarg is not found\n"},
|
||||
}
|
||||
for _, test := range tests {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
printer, err := printers.GetStandardPrinter(test.OutputOpts, false, legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, printers.PrintOptions{})
|
||||
printer, err := printers.GetStandardPrinter(legacyscheme.Registry.RESTMapper(legacyscheme.Registry.EnabledVersions()...), legacyscheme.Scheme, legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.EnabledVersions()...), []runtime.Decoder{legacyscheme.Codecs.UniversalDecoder(), unstructured.UnstructuredJSONScheme}, *test.PrintOpts)
|
||||
if err != nil {
|
||||
t.Errorf("in %s, unexpected error: %#v", test.Name, err)
|
||||
}
|
||||
|
@@ -29,12 +29,8 @@ import (
|
||||
// a printer or an error. The printer is agnostic to schema versions, so you must
|
||||
// send arguments to PrintObj in the version you wish them to be shown using a
|
||||
// VersionedPrinter (typically when generic is true).
|
||||
func GetStandardPrinter(outputOpts *OutputOptions, noHeaders bool, mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options PrintOptions) (ResourcePrinter, error) {
|
||||
if outputOpts == nil {
|
||||
return nil, fmt.Errorf("no output options specified")
|
||||
}
|
||||
|
||||
format, formatArgument, allowMissingTemplateKeys := outputOpts.FmtType, outputOpts.FmtArg, outputOpts.AllowMissingKeys
|
||||
func GetStandardPrinter(mapper meta.RESTMapper, typer runtime.ObjectTyper, encoder runtime.Encoder, decoders []runtime.Decoder, options PrintOptions) (ResourcePrinter, error) {
|
||||
format, formatArgument, allowMissingTemplateKeys := options.OutputFormatType, options.OutputFormatArgument, options.AllowMissingKeys
|
||||
|
||||
var printer ResourcePrinter
|
||||
switch format {
|
||||
@@ -106,7 +102,7 @@ func GetStandardPrinter(outputOpts *OutputOptions, noHeaders bool, mapper meta.R
|
||||
|
||||
case "custom-columns":
|
||||
var err error
|
||||
if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument, decoders[0], noHeaders); err != nil {
|
||||
if printer, err = NewCustomColumnsPrinterFromSpec(formatArgument, decoders[0], options.NoHeaders); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@@ -58,6 +58,30 @@ var podResources = []api.ResourceName{
|
||||
api.ResourcePods,
|
||||
}
|
||||
|
||||
// podResourcePrefixes are the set of prefixes for resources (Hugepages, and other
|
||||
// potential extended reources with specific prefix) managed by quota associated with pods.
|
||||
var podResourcePrefixes = []string{
|
||||
api.ResourceHugePagesPrefix,
|
||||
api.ResourceRequestsHugePagesPrefix,
|
||||
}
|
||||
|
||||
// requestedResourcePrefixes are the set of prefixes for resources
|
||||
// that might be declared in pod's Resources.Requests/Limits
|
||||
var requestedResourcePrefixes = []string{
|
||||
api.ResourceHugePagesPrefix,
|
||||
}
|
||||
|
||||
const (
|
||||
requestsPrefix = "requests."
|
||||
limitsPrefix = "limits."
|
||||
)
|
||||
|
||||
// maskResourceWithPrefix mask resource with certain prefix
|
||||
// e.g. hugepages-XXX -> requests.hugepages-XXX
|
||||
func maskResourceWithPrefix(resource api.ResourceName, prefix string) api.ResourceName {
|
||||
return api.ResourceName(fmt.Sprintf("%s%s", prefix, string(resource)))
|
||||
}
|
||||
|
||||
// NOTE: it was a mistake, but if a quota tracks cpu or memory related resources,
|
||||
// the incoming pod is required to have those values set. we should not repeat
|
||||
// this mistake for other future resources (gpus, ephemeral-storage,etc).
|
||||
@@ -157,7 +181,14 @@ func (p *podEvaluator) Matches(resourceQuota *api.ResourceQuota, item runtime.Ob
|
||||
|
||||
// MatchingResources takes the input specified list of resources and returns the set of resources it matches.
|
||||
func (p *podEvaluator) MatchingResources(input []api.ResourceName) []api.ResourceName {
|
||||
return quota.Intersection(input, podResources)
|
||||
result := quota.Intersection(input, podResources)
|
||||
for _, resource := range input {
|
||||
if quota.ContainsPrefix(podResourcePrefixes, resource) {
|
||||
result = append(result, resource)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Usage knows how to measure usage associated with pods
|
||||
@@ -212,6 +243,18 @@ func podComputeUsageHelper(requests api.ResourceList, limits api.ResourceList) a
|
||||
if limit, found := limits[api.ResourceEphemeralStorage]; found {
|
||||
result[api.ResourceLimitsEphemeralStorage] = limit
|
||||
}
|
||||
for resource, request := range requests {
|
||||
if quota.ContainsPrefix(requestedResourcePrefixes, resource) {
|
||||
result[resource] = request
|
||||
result[maskResourceWithPrefix(resource, requestsPrefix)] = request
|
||||
}
|
||||
}
|
||||
for resource, limit := range limits {
|
||||
if quota.ContainsPrefix(requestedResourcePrefixes, resource) {
|
||||
result[maskResourceWithPrefix(resource, limitsPrefix)] = limit
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
|
@@ -175,6 +175,23 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"init container hugepages": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
InitContainers: []api.Container{{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi")},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
usage: api.ResourceList{
|
||||
api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"),
|
||||
api.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"),
|
||||
api.ResourcePods: resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"container CPU": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
@@ -232,6 +249,23 @@ func TestPodEvaluatorUsage(t *testing.T) {
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"container hugepages": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi")},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
usage: api.ResourceList{
|
||||
api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"),
|
||||
api.ResourceName(api.ResourceRequestsHugePagesPrefix + "2Mi"): resource.MustParse("100Mi"),
|
||||
api.ResourcePods: resource.MustParse("1"),
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"init container maximums override sum of containers": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
|
@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package quota
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@@ -196,6 +198,16 @@ func Contains(items []api.ResourceName, item api.ResourceName) bool {
|
||||
return ToSet(items).Has(string(item))
|
||||
}
|
||||
|
||||
// ContainsPrefix returns true if the specified item has a prefix that contained in given prefix Set
|
||||
func ContainsPrefix(prefixSet []string, item api.ResourceName) bool {
|
||||
for _, prefix := range prefixSet {
|
||||
if strings.HasPrefix(string(item), prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Intersection returns the intersection of both list of resources
|
||||
func Intersection(a []api.ResourceName, b []api.ResourceName) []api.ResourceName {
|
||||
setA := ToSet(a)
|
||||
|
@@ -226,6 +226,30 @@ func TestContains(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestContainsPrefix(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a []string
|
||||
b api.ResourceName
|
||||
expected bool
|
||||
}{
|
||||
"does-not-contain": {
|
||||
a: []string{api.ResourceHugePagesPrefix},
|
||||
b: api.ResourceCPU,
|
||||
expected: false,
|
||||
},
|
||||
"does-contain": {
|
||||
a: []string{api.ResourceHugePagesPrefix},
|
||||
b: api.ResourceName(api.ResourceHugePagesPrefix + "2Mi"),
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for testName, testCase := range testCases {
|
||||
if actual := ContainsPrefix(testCase.a, testCase.b); actual != testCase.expected {
|
||||
t.Errorf("%s expected: %v, actual: %v", testName, testCase.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsZero(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
a api.ResourceList
|
||||
|
@@ -153,7 +153,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
|
||||
// 000 - neither "zone", "zones", or "replica-zones" specified
|
||||
// Pick a zone randomly selected from all active zones where
|
||||
// Kubernetes cluster has a node.
|
||||
zones, err = cloud.GetAllZones()
|
||||
zones, err = cloud.GetAllCurrentZones()
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error getting zone information from GCE: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
|
@@ -197,16 +197,11 @@ func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
glusterfsVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Glusterfs: &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: volumeName,
|
||||
Path: volumeName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(glusterfsVolume), nil
|
||||
|
||||
// To reconstrcut volume spec we need endpoint where fetching endpoint from mount
|
||||
// string looks to be impossible, so returning error.
|
||||
|
||||
return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath")
|
||||
}
|
||||
|
||||
// Glusterfs volumes represent a bare host file or directory mount of an Glusterfs export.
|
||||
|
@@ -129,15 +129,19 @@ func TestAdmissionNamespaceExists(t *testing.T) {
|
||||
|
||||
// TestIgnoreAdmission validates that a request is ignored if its not a create
|
||||
func TestIgnoreAdmission(t *testing.T) {
|
||||
namespace := "test"
|
||||
mockClient := newMockClientForTest([]string{})
|
||||
handler, informerFactory, err := newHandlerForTest(mockClient)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error initializing handler: %v", err)
|
||||
}
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
chainHandler := admission.NewChainHandler(handler)
|
||||
|
||||
if handler.Handles(admission.Update) {
|
||||
t.Errorf("expected not to handle Update")
|
||||
pod := newPod(namespace)
|
||||
err = chainHandler.Admit(admission.NewAttributesRecord(&pod, nil, api.Kind("Pod").WithVersion("version"), pod.Namespace, pod.Name, api.Resource("pods").WithVersion("version"), "", admission.Update, nil))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error returned from admission handler")
|
||||
}
|
||||
if hasCreateNamespaceAction(mockClient) {
|
||||
t.Errorf("unexpected create namespace action")
|
||||
|
@@ -78,7 +78,7 @@ func mockVolumeLabels(labels map[string]string) *mockVolumes {
|
||||
// TestAdmission
|
||||
func TestAdmission(t *testing.T) {
|
||||
pvHandler := NewPersistentVolumeLabel()
|
||||
handler := admission.NewChainHandler(admission.NewNamedHandler("pv", pvHandler))
|
||||
handler := admission.NewChainHandler(pvHandler)
|
||||
ignoredPV := api.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "noncloud", Namespace: "myns"},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
|
@@ -4598,6 +4598,13 @@ const (
|
||||
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
|
||||
)
|
||||
|
||||
// The following identify resource prefix for Kubernetes object types
|
||||
const (
|
||||
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
|
||||
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
|
||||
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
|
||||
)
|
||||
|
||||
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
|
||||
type ResourceQuotaScope string
|
||||
|
||||
|
@@ -866,6 +866,10 @@
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/initializer",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/metrics",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/plugin/initialization",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
|
@@ -58,8 +58,24 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
case reflect.Interface, reflect.Map, reflect.Slice, reflect.Ptr:
|
||||
isValue = false
|
||||
}
|
||||
if isValue || c.Intn(5) == 0 {
|
||||
if isValue || c.Intn(10) == 0 {
|
||||
c.Fuzz(vobj.Field(i).Addr().Interface())
|
||||
|
||||
// JSON keys must not contain escape char with our JSON codec (jsoniter)
|
||||
// TODO: remove this when/if we moved from jsoniter.ConfigFastest to ConfigCompatibleWithStandardLibrary
|
||||
if field.Type.Kind() == reflect.Map {
|
||||
keys := append([]reflect.Value(nil), vobj.Field(i).MapKeys()...)
|
||||
for _, k := range keys {
|
||||
stripped := toJSONString(k.String())
|
||||
if stripped == k.String() {
|
||||
continue
|
||||
}
|
||||
// set new key
|
||||
vobj.Field(i).SetMapIndex(reflect.ValueOf(stripped), vobj.Field(i).MapIndex(k))
|
||||
// remove old
|
||||
vobj.Field(i).SetMapIndex(k, reflect.Value{})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -109,3 +125,13 @@ func Funcs(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toJSONString(s string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
// replace chars which are not supported in keys by jsoniter.ConfigFastest
|
||||
if r == '\\' || r == '"' {
|
||||
return 'x'
|
||||
}
|
||||
return r
|
||||
}, s)
|
||||
}
|
||||
|
@@ -13,15 +13,10 @@ go_test(
|
||||
"config_test.go",
|
||||
"errors_test.go",
|
||||
"handler_test.go",
|
||||
"metrics_test.go",
|
||||
"testutil_test.go",
|
||||
],
|
||||
importpath = "k8s.io/apiserver/pkg/admission",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
@@ -38,15 +33,12 @@ go_library(
|
||||
"errors.go",
|
||||
"handler.go",
|
||||
"interfaces.go",
|
||||
"metrics.go",
|
||||
"plugins.go",
|
||||
],
|
||||
importpath = "k8s.io/apiserver/pkg/admission",
|
||||
deps = [
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/api/admissionregistration/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apimachinery/announced:go_default_library",
|
||||
@@ -76,6 +68,7 @@ filegroup(
|
||||
":package-srcs",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission/configuration:all-srcs",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission/initializer:all-srcs",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission/metrics:all-srcs",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission/plugin/initialization:all-srcs",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle:all-srcs",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/config:all-srcs",
|
||||
|
@@ -16,46 +16,23 @@ limitations under the License.
|
||||
|
||||
package admission
|
||||
|
||||
import "time"
|
||||
|
||||
// chainAdmissionHandler is an instance of admission.NamedHandler that performs admission control using
|
||||
// a chain of admission handlers
|
||||
type chainAdmissionHandler []NamedHandler
|
||||
type chainAdmissionHandler []Interface
|
||||
|
||||
// NewChainHandler creates a new chain handler from an array of handlers. Used for testing.
|
||||
func NewChainHandler(handlers ...NamedHandler) chainAdmissionHandler {
|
||||
func NewChainHandler(handlers ...Interface) chainAdmissionHandler {
|
||||
return chainAdmissionHandler(handlers)
|
||||
}
|
||||
|
||||
func NewNamedHandler(name string, i Interface) NamedHandler {
|
||||
return &pluginHandler{
|
||||
i: i,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
stepValidate = "validate"
|
||||
stepAdmit = "admit"
|
||||
)
|
||||
|
||||
// Admit performs an admission control check using a chain of handlers, and returns immediately on first error
|
||||
func (admissionHandler chainAdmissionHandler) Admit(a Attributes) error {
|
||||
start := time.Now()
|
||||
err := admissionHandler.admit(a)
|
||||
Metrics.ObserveAdmissionStep(time.Since(start), err != nil, a, stepAdmit)
|
||||
return err
|
||||
}
|
||||
|
||||
func (admissionHandler chainAdmissionHandler) admit(a Attributes) error {
|
||||
for _, handler := range admissionHandler {
|
||||
if !handler.Interface().Handles(a.GetOperation()) {
|
||||
if !handler.Handles(a.GetOperation()) {
|
||||
continue
|
||||
}
|
||||
if mutator, ok := handler.Interface().(MutationInterface); ok {
|
||||
t := time.Now()
|
||||
if mutator, ok := handler.(MutationInterface); ok {
|
||||
err := mutator.Admit(a)
|
||||
Metrics.ObserveAdmissionController(time.Since(t), err != nil, handler, a, stepAdmit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -66,21 +43,12 @@ func (admissionHandler chainAdmissionHandler) admit(a Attributes) error {
|
||||
|
||||
// Validate performs an admission control check using a chain of handlers, and returns immediately on first error
|
||||
func (admissionHandler chainAdmissionHandler) Validate(a Attributes) error {
|
||||
start := time.Now()
|
||||
err := admissionHandler.validate(a)
|
||||
Metrics.ObserveAdmissionStep(time.Since(start), err != nil, a, stepValidate)
|
||||
return err
|
||||
}
|
||||
|
||||
func (admissionHandler chainAdmissionHandler) validate(a Attributes) (err error) {
|
||||
for _, handler := range admissionHandler {
|
||||
if !handler.Interface().Handles(a.GetOperation()) {
|
||||
if !handler.Handles(a.GetOperation()) {
|
||||
continue
|
||||
}
|
||||
if validator, ok := handler.Interface().(ValidationInterface); ok {
|
||||
t := time.Now()
|
||||
if validator, ok := handler.(ValidationInterface); ok {
|
||||
err := validator.Validate(a)
|
||||
Metrics.ObserveAdmissionController(time.Since(t), err != nil, handler, a, stepValidate)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -92,7 +60,7 @@ func (admissionHandler chainAdmissionHandler) validate(a Attributes) (err error)
|
||||
// Handles will return true if any of the handlers handles the given operation
|
||||
func (admissionHandler chainAdmissionHandler) Handles(operation Operation) bool {
|
||||
for _, handler := range admissionHandler {
|
||||
if handler.Interface().Handles(operation) {
|
||||
if handler.Handles(operation) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@@ -17,12 +17,45 @@ limitations under the License.
|
||||
package admission
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type FakeHandler struct {
|
||||
*Handler
|
||||
name string
|
||||
admit, admitCalled bool
|
||||
validate, validateCalled bool
|
||||
}
|
||||
|
||||
func (h *FakeHandler) Admit(a Attributes) (err error) {
|
||||
h.admitCalled = true
|
||||
if h.admit {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Don't admit")
|
||||
}
|
||||
|
||||
func (h *FakeHandler) Validate(a Attributes) (err error) {
|
||||
h.validateCalled = true
|
||||
if h.validate {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Don't validate")
|
||||
}
|
||||
|
||||
func makeHandler(name string, accept bool, ops ...Operation) *FakeHandler {
|
||||
return &FakeHandler{
|
||||
name: name,
|
||||
admit: accept,
|
||||
validate: accept,
|
||||
Handler: NewHandler(ops...),
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdmitAndValidate(t *testing.T) {
|
||||
sysns := metav1.NamespaceSystem
|
||||
otherns := "default"
|
||||
@@ -38,10 +71,10 @@ func TestAdmitAndValidate(t *testing.T) {
|
||||
name: "all accept",
|
||||
ns: sysns,
|
||||
operation: Create,
|
||||
chain: []NamedHandler{
|
||||
makeNamedHandler("a", true, Update, Delete, Create),
|
||||
makeNamedHandler("b", true, Delete, Create),
|
||||
makeNamedHandler("c", true, Create),
|
||||
chain: []Interface{
|
||||
makeHandler("a", true, Update, Delete, Create),
|
||||
makeHandler("b", true, Delete, Create),
|
||||
makeHandler("c", true, Create),
|
||||
},
|
||||
calls: map[string]bool{"a": true, "b": true, "c": true},
|
||||
accept: true,
|
||||
@@ -50,10 +83,10 @@ func TestAdmitAndValidate(t *testing.T) {
|
||||
name: "ignore handler",
|
||||
ns: otherns,
|
||||
operation: Create,
|
||||
chain: []NamedHandler{
|
||||
makeNamedHandler("a", true, Update, Delete, Create),
|
||||
makeNamedHandler("b", false, Delete),
|
||||
makeNamedHandler("c", true, Create),
|
||||
chain: []Interface{
|
||||
makeHandler("a", true, Update, Delete, Create),
|
||||
makeHandler("b", false, Delete),
|
||||
makeHandler("c", true, Create),
|
||||
},
|
||||
calls: map[string]bool{"a": true, "c": true},
|
||||
accept: true,
|
||||
@@ -62,10 +95,10 @@ func TestAdmitAndValidate(t *testing.T) {
|
||||
name: "ignore all",
|
||||
ns: sysns,
|
||||
operation: Connect,
|
||||
chain: []NamedHandler{
|
||||
makeNamedHandler("a", true, Update, Delete, Create),
|
||||
makeNamedHandler("b", false, Delete),
|
||||
makeNamedHandler("c", true, Create),
|
||||
chain: []Interface{
|
||||
makeHandler("a", true, Update, Delete, Create),
|
||||
makeHandler("b", false, Delete),
|
||||
makeHandler("c", true, Create),
|
||||
},
|
||||
calls: map[string]bool{},
|
||||
accept: true,
|
||||
@@ -74,17 +107,16 @@ func TestAdmitAndValidate(t *testing.T) {
|
||||
name: "reject one",
|
||||
ns: otherns,
|
||||
operation: Delete,
|
||||
chain: []NamedHandler{
|
||||
makeNamedHandler("a", true, Update, Delete, Create),
|
||||
makeNamedHandler("b", false, Delete),
|
||||
makeNamedHandler("c", true, Create),
|
||||
chain: []Interface{
|
||||
makeHandler("a", true, Update, Delete, Create),
|
||||
makeHandler("b", false, Delete),
|
||||
makeHandler("c", true, Create),
|
||||
},
|
||||
calls: map[string]bool{"a": true, "b": true},
|
||||
accept: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
Metrics.reset()
|
||||
t.Logf("testcase = %s", test.name)
|
||||
// call admit and check that validate was not called at all
|
||||
err := test.chain.Admit(NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, nil))
|
||||
@@ -93,26 +125,20 @@ func TestAdmitAndValidate(t *testing.T) {
|
||||
t.Errorf("unexpected result of admit call: %v", accepted)
|
||||
}
|
||||
for _, h := range test.chain {
|
||||
fake := h.Interface().(*FakeHandler)
|
||||
_, shouldBeCalled := test.calls[h.Name()]
|
||||
fake := h.(*FakeHandler)
|
||||
_, shouldBeCalled := test.calls[fake.name]
|
||||
if shouldBeCalled != fake.admitCalled {
|
||||
t.Errorf("admit handler %s not called as expected: %v", h.Name(), fake.admitCalled)
|
||||
t.Errorf("admit handler %s not called as expected: %v", fake.name, fake.admitCalled)
|
||||
continue
|
||||
}
|
||||
if fake.validateCalled {
|
||||
t.Errorf("validate handler %s called during admit", h.Name())
|
||||
t.Errorf("validate handler %s called during admit", fake.name)
|
||||
}
|
||||
|
||||
// reset value for validation test
|
||||
fake.admitCalled = false
|
||||
}
|
||||
|
||||
labelFilter := map[string]string{
|
||||
"type": "admit",
|
||||
}
|
||||
|
||||
checkAdmitAndValidateMetrics(t, labelFilter, test.accept, test.calls)
|
||||
Metrics.reset()
|
||||
// call validate and check that admit was not called at all
|
||||
err = test.chain.Validate(NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, nil))
|
||||
accepted = (err == nil)
|
||||
@@ -120,65 +146,26 @@ func TestAdmitAndValidate(t *testing.T) {
|
||||
t.Errorf("unexpected result of validate call: %v\n", accepted)
|
||||
}
|
||||
for _, h := range test.chain {
|
||||
fake := h.Interface().(*FakeHandler)
|
||||
fake := h.(*FakeHandler)
|
||||
|
||||
_, shouldBeCalled := test.calls[h.Name()]
|
||||
_, shouldBeCalled := test.calls[fake.name]
|
||||
if shouldBeCalled != fake.validateCalled {
|
||||
t.Errorf("validate handler %s not called as expected: %v", h.Name(), fake.validateCalled)
|
||||
t.Errorf("validate handler %s not called as expected: %v", fake.name, fake.validateCalled)
|
||||
continue
|
||||
}
|
||||
|
||||
if fake.admitCalled {
|
||||
t.Errorf("mutating handler unexpectedly called: %s", h.Name())
|
||||
t.Errorf("mutating handler unexpectedly called: %s", fake.name)
|
||||
}
|
||||
}
|
||||
|
||||
labelFilter = map[string]string{
|
||||
"type": "validate",
|
||||
}
|
||||
|
||||
checkAdmitAndValidateMetrics(t, labelFilter, test.accept, test.calls)
|
||||
}
|
||||
}
|
||||
|
||||
func checkAdmitAndValidateMetrics(t *testing.T, labelFilter map[string]string, accept bool, calls map[string]bool) {
|
||||
acceptFilter := map[string]string{"rejected": "false"}
|
||||
for k, v := range labelFilter {
|
||||
acceptFilter[k] = v
|
||||
}
|
||||
|
||||
rejectFilter := map[string]string{"rejected": "true"}
|
||||
for k, v := range labelFilter {
|
||||
rejectFilter[k] = v
|
||||
}
|
||||
|
||||
if accept {
|
||||
// Ensure exactly one admission end-to-end admission accept should have been recorded.
|
||||
expectHistogramCountTotal(t, "apiserver_admission_step_admission_latencies_seconds", acceptFilter, 1)
|
||||
|
||||
// Ensure the expected count of admission controllers have been executed.
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", acceptFilter, len(calls))
|
||||
} else {
|
||||
// When not accepted, ensure exactly one end-to-end rejection has been recorded.
|
||||
expectHistogramCountTotal(t, "apiserver_admission_step_admission_latencies_seconds", rejectFilter, 1)
|
||||
if len(calls) > 0 {
|
||||
if len(calls) > 1 {
|
||||
// When not accepted, ensure that all but the last controller had been accepted, since
|
||||
// the chain stops execution at the first rejection.
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", acceptFilter, len(calls)-1)
|
||||
}
|
||||
|
||||
// When not accepted, ensure exactly one controller has been rejected.
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", rejectFilter, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandles(t *testing.T) {
|
||||
chain := chainAdmissionHandler{
|
||||
makeNamedHandler("a", true, Update, Delete, Create),
|
||||
makeNamedHandler("b", true, Delete, Create),
|
||||
makeNamedHandler("c", true, Create),
|
||||
makeHandler("a", true, Update, Delete, Create),
|
||||
makeHandler("b", true, Delete, Create),
|
||||
makeHandler("c", true, Create),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
|
42
staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD
Normal file
42
staging/src/k8s.io/apiserver/pkg/admission/metrics/BUILD
Normal file
@@ -0,0 +1,42 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["metrics.go"],
|
||||
importpath = "k8s.io/apiserver/pkg/admission/metrics",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"metrics_test.go",
|
||||
"testutil_test.go",
|
||||
],
|
||||
importpath = "k8s.io/apiserver/pkg/admission/metrics",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@@ -14,16 +14,16 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package admission
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/admissionregistration/v1alpha1"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -39,10 +39,64 @@ var (
|
||||
Metrics = newAdmissionMetrics()
|
||||
)
|
||||
|
||||
// NamedHandler requires each admission.Interface be named, primarly for metrics tracking purposes.
|
||||
type NamedHandler interface {
|
||||
Interface() Interface
|
||||
Name() string
|
||||
// ObserverFunc is a func that emits metrics.
|
||||
type ObserverFunc func(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string)
|
||||
|
||||
const (
|
||||
stepValidate = "validate"
|
||||
stepAdmit = "admit"
|
||||
)
|
||||
|
||||
// WithControllerMetrics is a decorator for named admission handlers.
|
||||
func WithControllerMetrics(i admission.Interface, name string) admission.Interface {
|
||||
return WithMetrics(i, Metrics.ObserveAdmissionController, name)
|
||||
}
|
||||
|
||||
// WithStepMetrics is a decorator for a whole admission phase, i.e. admit or validation.admission step.
|
||||
func WithStepMetrics(i admission.Interface) admission.Interface {
|
||||
return WithMetrics(i, Metrics.ObserveAdmissionStep)
|
||||
}
|
||||
|
||||
// WithMetrics is a decorator for admission handlers with a generic observer func.
|
||||
func WithMetrics(i admission.Interface, observer ObserverFunc, extraLabels ...string) admission.Interface {
|
||||
return &pluginHandlerWithMetrics{
|
||||
Interface: i,
|
||||
observer: observer,
|
||||
extraLabels: extraLabels,
|
||||
}
|
||||
}
|
||||
|
||||
// pluginHandlerWithMetrics decorates a admission handler with metrics.
|
||||
type pluginHandlerWithMetrics struct {
|
||||
admission.Interface
|
||||
observer ObserverFunc
|
||||
extraLabels []string
|
||||
}
|
||||
|
||||
// Admit performs a mutating admission control check and emit metrics.
|
||||
func (p pluginHandlerWithMetrics) Admit(a admission.Attributes) error {
|
||||
mutatingHandler, ok := p.Interface.(admission.MutationInterface)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
err := mutatingHandler.Admit(a)
|
||||
p.observer(time.Since(start), err != nil, a, stepAdmit, p.extraLabels...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate performs a non-mutating admission control check and emits metrics.
|
||||
func (p pluginHandlerWithMetrics) Validate(a admission.Attributes) error {
|
||||
validatingHandler, ok := p.Interface.(admission.ValidationInterface)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
err := validatingHandler.Validate(a)
|
||||
p.observer(time.Since(start), err != nil, a, stepValidate, p.extraLabels...)
|
||||
return err
|
||||
}
|
||||
|
||||
// AdmissionMetrics instruments admission with prometheus metrics.
|
||||
@@ -83,22 +137,21 @@ func (m *AdmissionMetrics) reset() {
|
||||
}
|
||||
|
||||
// ObserveAdmissionStep records admission related metrics for a admission step, identified by step type.
|
||||
func (m *AdmissionMetrics) ObserveAdmissionStep(elapsed time.Duration, rejected bool, attr Attributes, stepType string) {
|
||||
func (m *AdmissionMetrics) ObserveAdmissionStep(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) {
|
||||
gvr := attr.GetResource()
|
||||
m.step.observe(elapsed, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))
|
||||
m.step.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...)
|
||||
}
|
||||
|
||||
// ObserveAdmissionController records admission related metrics for a built-in admission controller, identified by it's plugin handler name.
|
||||
func (m *AdmissionMetrics) ObserveAdmissionController(elapsed time.Duration, rejected bool, handler NamedHandler, attr Attributes, stepType string) {
|
||||
func (m *AdmissionMetrics) ObserveAdmissionController(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) {
|
||||
gvr := attr.GetResource()
|
||||
m.controller.observe(elapsed, handler.Name(), stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))
|
||||
m.controller.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...)
|
||||
}
|
||||
|
||||
// ObserveWebhook records admission related metrics for a admission webhook.
|
||||
func (m *AdmissionMetrics) ObserveWebhook(elapsed time.Duration, rejected bool, hook *v1alpha1.Webhook, attr Attributes) {
|
||||
t := "admit" // TODO: pass in type (validate|admit) once mutating webhook functionality has been implemented
|
||||
func (m *AdmissionMetrics) ObserveWebhook(elapsed time.Duration, rejected bool, attr admission.Attributes, stepType string, extraLabels ...string) {
|
||||
gvr := attr.GetResource()
|
||||
m.webhook.observe(elapsed, hook.Name, t, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))
|
||||
m.webhook.observe(elapsed, append(extraLabels, stepType, string(attr.GetOperation()), gvr.Group, gvr.Version, gvr.Resource, attr.GetSubresource(), strconv.FormatBool(rejected))...)
|
||||
}
|
||||
|
||||
type metricSet struct {
|
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
)
|
||||
|
||||
var (
|
||||
kind = schema.GroupVersionKind{Group: "kgroup", Version: "kversion", Kind: "kind"}
|
||||
resource = schema.GroupVersionResource{Group: "rgroup", Version: "rversion", Resource: "resource"}
|
||||
attr = admission.NewAttributesRecord(nil, nil, kind, "ns", "name", resource, "subresource", admission.Create, nil)
|
||||
)
|
||||
|
||||
func TestObserveAdmissionStep(t *testing.T) {
|
||||
Metrics.reset()
|
||||
handler := WithStepMetrics(&mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create), true, true})
|
||||
handler.(admission.MutationInterface).Admit(attr)
|
||||
handler.(admission.ValidationInterface).Validate(attr)
|
||||
wantLabels := map[string]string{
|
||||
"operation": string(admission.Create),
|
||||
"group": resource.Group,
|
||||
"version": resource.Version,
|
||||
"resource": resource.Resource,
|
||||
"subresource": "subresource",
|
||||
"type": "admit",
|
||||
"rejected": "false",
|
||||
}
|
||||
expectHistogramCountTotal(t, "apiserver_admission_step_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_step_admission_latencies_seconds_summary", wantLabels)
|
||||
|
||||
wantLabels["type"] = "validate"
|
||||
expectHistogramCountTotal(t, "apiserver_admission_step_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_step_admission_latencies_seconds_summary", wantLabels)
|
||||
}
|
||||
|
||||
func TestObserveAdmissionController(t *testing.T) {
|
||||
Metrics.reset()
|
||||
handler := WithControllerMetrics(&mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create), true, true}, "a")
|
||||
handler.(admission.MutationInterface).Admit(attr)
|
||||
handler.(admission.ValidationInterface).Validate(attr)
|
||||
wantLabels := map[string]string{
|
||||
"name": "a",
|
||||
"operation": string(admission.Create),
|
||||
"group": resource.Group,
|
||||
"version": resource.Version,
|
||||
"resource": resource.Resource,
|
||||
"subresource": "subresource",
|
||||
"type": "validate",
|
||||
"rejected": "false",
|
||||
}
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_controller_admission_latencies_seconds_summary", wantLabels)
|
||||
|
||||
wantLabels["type"] = "validate"
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_controller_admission_latencies_seconds_summary", wantLabels)
|
||||
}
|
||||
|
||||
func TestObserveWebhook(t *testing.T) {
|
||||
Metrics.reset()
|
||||
Metrics.ObserveWebhook(2*time.Second, false, attr, stepAdmit, "x")
|
||||
wantLabels := map[string]string{
|
||||
"name": "x",
|
||||
"operation": string(admission.Create),
|
||||
"group": resource.Group,
|
||||
"version": resource.Version,
|
||||
"resource": resource.Resource,
|
||||
"subresource": "subresource",
|
||||
"type": "admit",
|
||||
"rejected": "false",
|
||||
}
|
||||
expectHistogramCountTotal(t, "apiserver_admission_webhook_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_webhook_admission_latencies_seconds_summary", wantLabels)
|
||||
}
|
||||
|
||||
func TestWithMetrics(t *testing.T) {
|
||||
Metrics.reset()
|
||||
|
||||
type Test struct {
|
||||
name string
|
||||
ns string
|
||||
operation admission.Operation
|
||||
handler admission.Interface
|
||||
admit, validate bool
|
||||
}
|
||||
for _, test := range []Test{
|
||||
{
|
||||
"both-interfaces-admit-and-validate",
|
||||
"some-ns",
|
||||
admission.Create,
|
||||
&mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true, true},
|
||||
true, true,
|
||||
},
|
||||
{
|
||||
"both-interfaces-dont-admit",
|
||||
"some-ns",
|
||||
admission.Create,
|
||||
&mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), false, true},
|
||||
false, true,
|
||||
},
|
||||
{
|
||||
"both-interfaces-admit-dont-validate",
|
||||
"some-ns",
|
||||
admission.Create,
|
||||
&mutatingAndValidatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true, false},
|
||||
true, false,
|
||||
},
|
||||
{
|
||||
"validate-interfaces-validate",
|
||||
"some-ns",
|
||||
admission.Create,
|
||||
&validatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true},
|
||||
true, true,
|
||||
},
|
||||
{
|
||||
"validate-interfaces-dont-validate",
|
||||
"some-ns",
|
||||
admission.Create,
|
||||
&validatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true},
|
||||
true, false,
|
||||
},
|
||||
{
|
||||
"mutating-interfaces-admit",
|
||||
"some-ns",
|
||||
admission.Create,
|
||||
&mutatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), true},
|
||||
true, true,
|
||||
},
|
||||
{
|
||||
"mutating-interfaces-dont-admit",
|
||||
"some-ns",
|
||||
admission.Create,
|
||||
&mutatingFakeHandler{admission.NewHandler(admission.Create, admission.Update), false},
|
||||
true, false,
|
||||
},
|
||||
} {
|
||||
Metrics.reset()
|
||||
|
||||
h := WithMetrics(test.handler, Metrics.ObserveAdmissionController, test.name)
|
||||
|
||||
// test mutation
|
||||
err := h.(admission.MutationInterface).Admit(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, nil))
|
||||
if test.admit && err != nil {
|
||||
t.Errorf("expected admit to succeed, but failed: %v", err)
|
||||
continue
|
||||
} else if !test.admit && err == nil {
|
||||
t.Errorf("expected admit to fail, but it succeeded")
|
||||
continue
|
||||
}
|
||||
|
||||
filter := map[string]string{"rejected": "false"}
|
||||
if !test.admit {
|
||||
filter["rejected"] = "true"
|
||||
}
|
||||
if _, mutating := test.handler.(admission.MutationInterface); mutating {
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", filter, 1)
|
||||
} else {
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", filter, 0)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// skip validation step if mutation failed
|
||||
continue
|
||||
}
|
||||
|
||||
// test validation
|
||||
err = h.(admission.ValidationInterface).Validate(admission.NewAttributesRecord(nil, nil, schema.GroupVersionKind{}, test.ns, "", schema.GroupVersionResource{}, "", test.operation, nil))
|
||||
if test.validate && err != nil {
|
||||
t.Errorf("expected admit to succeed, but failed: %v", err)
|
||||
continue
|
||||
} else if !test.validate && err == nil {
|
||||
t.Errorf("expected validation to fail, but it succeeded")
|
||||
continue
|
||||
}
|
||||
|
||||
filter = map[string]string{"rejected": "false"}
|
||||
if !test.admit {
|
||||
filter["rejected"] = "true"
|
||||
}
|
||||
if _, validating := test.handler.(admission.ValidationInterface); validating {
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", filter, 1)
|
||||
} else {
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", filter, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mutatingAndValidatingFakeHandler struct {
|
||||
*admission.Handler
|
||||
admit bool
|
||||
validate bool
|
||||
}
|
||||
|
||||
func (h *mutatingAndValidatingFakeHandler) Admit(a admission.Attributes) (err error) {
|
||||
if h.admit {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("don't admit")
|
||||
}
|
||||
|
||||
func (h *mutatingAndValidatingFakeHandler) Validate(a admission.Attributes) (err error) {
|
||||
if h.validate {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("don't validate")
|
||||
}
|
||||
|
||||
type validatingFakeHandler struct {
|
||||
*admission.Handler
|
||||
validate bool
|
||||
}
|
||||
|
||||
func (h *validatingFakeHandler) Validate(a admission.Attributes) (err error) {
|
||||
if h.validate {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("don't validate")
|
||||
}
|
||||
|
||||
type mutatingFakeHandler struct {
|
||||
*admission.Handler
|
||||
admit bool
|
||||
}
|
||||
|
||||
func (h *mutatingFakeHandler) Amit(a admission.Attributes) (err error) {
|
||||
if h.admit {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("don't admit")
|
||||
}
|
@@ -14,90 +14,15 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package admission
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
ptype "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// FakeHandler provide a mock implement both MutationInterface and ValidationInterface that tracks which
|
||||
// methods have been called and always returns an error if admit is false.
|
||||
type FakeHandler struct {
|
||||
*Handler
|
||||
admit bool
|
||||
admitCalled bool
|
||||
validateCalled bool
|
||||
}
|
||||
|
||||
func (h *FakeHandler) Admit(a Attributes) (err error) {
|
||||
h.admitCalled = true
|
||||
if h.admit {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Don't admit")
|
||||
}
|
||||
|
||||
func (h *FakeHandler) Validate(a Attributes) (err error) {
|
||||
h.validateCalled = true
|
||||
if h.admit {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Don't admit")
|
||||
}
|
||||
|
||||
func makeHandler(admit bool, ops ...Operation) *FakeHandler {
|
||||
return &FakeHandler{
|
||||
admit: admit,
|
||||
Handler: NewHandler(ops...),
|
||||
}
|
||||
}
|
||||
|
||||
func makeNamedHandler(name string, admit bool, ops ...Operation) NamedHandler {
|
||||
return &pluginHandler{
|
||||
i: &FakeHandler{
|
||||
admit: admit,
|
||||
Handler: NewHandler(ops...),
|
||||
},
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// FakeValidatingHandler provide a mock of ValidationInterface that tracks which
|
||||
// methods have been called and always returns an error if validate is false.
|
||||
type FakeValidatingHandler struct {
|
||||
*Handler
|
||||
validate, validateCalled bool
|
||||
}
|
||||
|
||||
func (h *FakeValidatingHandler) Validate(a Attributes) (err error) {
|
||||
h.validateCalled = true
|
||||
if h.validate {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Don't validate")
|
||||
}
|
||||
|
||||
func makeValidatingHandler(validate bool, ops ...Operation) *FakeValidatingHandler {
|
||||
return &FakeValidatingHandler{
|
||||
validate: validate,
|
||||
Handler: NewHandler(ops...),
|
||||
}
|
||||
}
|
||||
|
||||
func makeValidatingNamedHandler(name string, validate bool, ops ...Operation) NamedHandler {
|
||||
return &pluginHandler{
|
||||
i: &FakeValidatingHandler{
|
||||
validate: validate,
|
||||
Handler: NewHandler(ops...),
|
||||
},
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func labelsMatch(metric *ptype.Metric, labelFilter map[string]string) bool {
|
||||
for _, lp := range metric.GetLabel() {
|
||||
if value, ok := labelFilter[lp.GetName()]; ok && lp.GetValue() != value {
|
@@ -1,84 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package admission
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/admissionregistration/v1alpha1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
kind = schema.GroupVersionKind{Group: "kgroup", Version: "kversion", Kind: "kind"}
|
||||
resource = schema.GroupVersionResource{Group: "rgroup", Version: "rversion", Resource: "resource"}
|
||||
attr = NewAttributesRecord(nil, nil, kind, "ns", "name", resource, "subresource", Create, nil)
|
||||
)
|
||||
|
||||
func TestObserveAdmissionStep(t *testing.T) {
|
||||
Metrics.reset()
|
||||
Metrics.ObserveAdmissionStep(2*time.Second, false, attr, "admit")
|
||||
wantLabels := map[string]string{
|
||||
"operation": string(Create),
|
||||
"group": resource.Group,
|
||||
"version": resource.Version,
|
||||
"resource": resource.Resource,
|
||||
"subresource": "subresource",
|
||||
"type": "admit",
|
||||
"rejected": "false",
|
||||
}
|
||||
expectHistogramCountTotal(t, "apiserver_admission_step_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_step_admission_latencies_seconds_summary", wantLabels)
|
||||
}
|
||||
|
||||
func TestObserveAdmissionController(t *testing.T) {
|
||||
Metrics.reset()
|
||||
handler := makeValidatingNamedHandler("a", true, Create)
|
||||
Metrics.ObserveAdmissionController(2*time.Second, false, handler, attr, "validate")
|
||||
wantLabels := map[string]string{
|
||||
"name": "a",
|
||||
"operation": string(Create),
|
||||
"group": resource.Group,
|
||||
"version": resource.Version,
|
||||
"resource": resource.Resource,
|
||||
"subresource": "subresource",
|
||||
"type": "validate",
|
||||
"rejected": "false",
|
||||
}
|
||||
expectHistogramCountTotal(t, "apiserver_admission_controller_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_controller_admission_latencies_seconds_summary", wantLabels)
|
||||
}
|
||||
|
||||
func TestObserveWebhook(t *testing.T) {
|
||||
Metrics.reset()
|
||||
hook := &v1alpha1.Webhook{Name: "x"}
|
||||
Metrics.ObserveWebhook(2*time.Second, false, hook, attr)
|
||||
wantLabels := map[string]string{
|
||||
"name": "x",
|
||||
"operation": string(Create),
|
||||
"group": resource.Group,
|
||||
"version": resource.Version,
|
||||
"resource": resource.Resource,
|
||||
"subresource": "subresource",
|
||||
"type": "admit",
|
||||
"rejected": "false",
|
||||
}
|
||||
expectHistogramCountTotal(t, "apiserver_admission_webhook_admission_latencies_seconds", wantLabels, 1)
|
||||
expectFindMetric(t, "apiserver_admission_webhook_admission_latencies_seconds_summary", wantLabels)
|
||||
}
|
@@ -23,6 +23,7 @@ go_library(
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library",
|
||||
|
@@ -39,6 +39,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission/configuration"
|
||||
genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer"
|
||||
admissionmetrics "k8s.io/apiserver/pkg/admission/metrics"
|
||||
"k8s.io/apiserver/pkg/admission/plugin/webhook/config"
|
||||
webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors"
|
||||
"k8s.io/apiserver/pkg/admission/plugin/webhook/namespace"
|
||||
@@ -102,6 +103,8 @@ func NewMutatingWebhook(configFile io.Reader) (*MutatingWebhook, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ admission.MutationInterface = &MutatingWebhook{}
|
||||
|
||||
// MutatingWebhook is an implementation of admission.Interface.
|
||||
type MutatingWebhook struct {
|
||||
*admission.Handler
|
||||
@@ -238,7 +241,7 @@ func (a *MutatingWebhook) Admit(attr admission.Attributes) error {
|
||||
for _, hook := range relevantHooks {
|
||||
t := time.Now()
|
||||
err := a.callAttrMutatingHook(ctx, hook, versionedAttr)
|
||||
admission.Metrics.ObserveWebhook(time.Since(t), err != nil, hook, attr)
|
||||
admissionmetrics.Metrics.ObserveWebhook(time.Since(t), err != nil, attr, "admit", hook.Name)
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -21,6 +21,7 @@ go_library(
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/configuration:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/config:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/errors:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/namespace:go_default_library",
|
||||
|
@@ -38,6 +38,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission/configuration"
|
||||
genericadmissioninit "k8s.io/apiserver/pkg/admission/initializer"
|
||||
admissionmetrics "k8s.io/apiserver/pkg/admission/metrics"
|
||||
"k8s.io/apiserver/pkg/admission/plugin/webhook/config"
|
||||
webhookerrors "k8s.io/apiserver/pkg/admission/plugin/webhook/errors"
|
||||
"k8s.io/apiserver/pkg/admission/plugin/webhook/namespace"
|
||||
@@ -101,6 +102,8 @@ func NewValidatingAdmissionWebhook(configFile io.Reader) (*ValidatingAdmissionWe
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ admission.ValidationInterface = &ValidatingAdmissionWebhook{}
|
||||
|
||||
// ValidatingAdmissionWebhook is an implementation of admission.Interface.
|
||||
type ValidatingAdmissionWebhook struct {
|
||||
*admission.Handler
|
||||
@@ -185,8 +188,8 @@ func (a *ValidatingAdmissionWebhook) loadConfiguration(attr admission.Attributes
|
||||
return hookConfig, nil
|
||||
}
|
||||
|
||||
// Admit makes an admission decision based on the request attributes.
|
||||
func (a *ValidatingAdmissionWebhook) Admit(attr admission.Attributes) error {
|
||||
// Validate makes an admission decision based on the request attributes.
|
||||
func (a *ValidatingAdmissionWebhook) Validate(attr admission.Attributes) error {
|
||||
hookConfig, err := a.loadConfiguration(attr)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -238,7 +241,7 @@ func (a *ValidatingAdmissionWebhook) Admit(attr admission.Attributes) error {
|
||||
|
||||
t := time.Now()
|
||||
err := a.callHook(ctx, hook, versionedAttr)
|
||||
admission.Metrics.ObserveWebhook(time.Since(t), err != nil, hook, attr)
|
||||
admissionmetrics.Metrics.ObserveWebhook(time.Since(t), err != nil, attr, "validating", hook.Name)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
@@ -116,8 +116,8 @@ func (c urlConfigGenerator) ccfgURL(urlPath string) registrationv1alpha1.Webhook
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmit tests that ValidatingAdmissionWebhook#Admit works as expected
|
||||
func TestAdmit(t *testing.T) {
|
||||
// TestValidate tests that ValidatingAdmissionWebhook#Validate works as expected
|
||||
func TestValidate(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(scheme)
|
||||
corev1.AddToScheme(scheme)
|
||||
@@ -393,7 +393,7 @@ func TestAdmit(t *testing.T) {
|
||||
}
|
||||
t.Run(name, func(t *testing.T) {
|
||||
wh.hookSource = &tt.hookSource
|
||||
err = wh.Admit(admission.NewAttributesRecord(&object, &oldObject, kind, namespace, name, resource, subResource, operation, &userInfo))
|
||||
err = wh.Validate(admission.NewAttributesRecord(&object, &oldObject, kind, namespace, name, resource, subResource, operation, &userInfo))
|
||||
if tt.expectAllow != (err == nil) {
|
||||
t.Errorf("expected allowed=%v, but got err=%v", tt.expectAllow, err)
|
||||
}
|
||||
@@ -410,8 +410,8 @@ func TestAdmit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestAdmitCachedClient tests that ValidatingAdmissionWebhook#Admit should cache restClient
|
||||
func TestAdmitCachedClient(t *testing.T) {
|
||||
// TestValidateCachedClient tests that ValidatingAdmissionWebhook#Validate should cache restClient
|
||||
func TestValidateCachedClient(t *testing.T) {
|
||||
scheme := runtime.NewScheme()
|
||||
v1alpha1.AddToScheme(scheme)
|
||||
corev1.AddToScheme(scheme)
|
||||
@@ -560,7 +560,7 @@ func TestAdmitCachedClient(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = wh.Admit(admission.NewAttributesRecord(&object, &oldObject, kind, namespace, testcase.name, resource, subResource, operation, &userInfo))
|
||||
err = wh.Validate(admission.NewAttributesRecord(&object, &oldObject, kind, namespace, testcase.name, resource, subResource, operation, &userInfo))
|
||||
if testcase.expectAllow != (err == nil) {
|
||||
t.Errorf("expected allowed=%v, but got err=%v", testcase.expectAllow, err)
|
||||
}
|
||||
|
@@ -39,20 +39,6 @@ type Plugins struct {
|
||||
registry map[string]Factory
|
||||
}
|
||||
|
||||
// pluginHandler associates name with a admission.Interface handler.
|
||||
type pluginHandler struct {
|
||||
i Interface
|
||||
name string
|
||||
}
|
||||
|
||||
func (h *pluginHandler) Interface() Interface {
|
||||
return h.i
|
||||
}
|
||||
|
||||
func (h *pluginHandler) Name() string {
|
||||
return h.name
|
||||
}
|
||||
|
||||
// All registered admission options.
|
||||
var (
|
||||
// PluginEnabledFn checks whether a plugin is enabled. By default, if you ask about it, it's enabled.
|
||||
@@ -132,10 +118,12 @@ func splitStream(config io.Reader) (io.Reader, io.Reader, error) {
|
||||
return bytes.NewBuffer(configBytes), bytes.NewBuffer(configBytes), nil
|
||||
}
|
||||
|
||||
type Decorator func(handler Interface, name string) Interface
|
||||
|
||||
// NewFromPlugins returns an admission.Interface that will enforce admission control decisions of all
|
||||
// the given plugins.
|
||||
func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigProvider, pluginInitializer PluginInitializer) (Interface, error) {
|
||||
handlers := []NamedHandler{}
|
||||
func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigProvider, pluginInitializer PluginInitializer, decorator Decorator) (Interface, error) {
|
||||
handlers := []Interface{}
|
||||
for _, pluginName := range pluginNames {
|
||||
pluginConfig, err := configProvider.ConfigFor(pluginName)
|
||||
if err != nil {
|
||||
@@ -147,8 +135,11 @@ func (ps *Plugins) NewFromPlugins(pluginNames []string, configProvider ConfigPro
|
||||
return nil, err
|
||||
}
|
||||
if plugin != nil {
|
||||
handler := &pluginHandler{i: plugin, name: pluginName}
|
||||
handlers = append(handlers, handler)
|
||||
if decorator != nil {
|
||||
handlers = append(handlers, decorator(plugin, pluginName))
|
||||
} else {
|
||||
handlers = append(handlers, plugin)
|
||||
}
|
||||
}
|
||||
}
|
||||
return chainAdmissionHandler(handlers), nil
|
||||
|
@@ -153,6 +153,11 @@ type Policy struct {
|
||||
// The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
|
||||
// PolicyRules are strictly ordered.
|
||||
Rules []PolicyRule
|
||||
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified per rule in which case the union of both are omitted.
|
||||
// +optional
|
||||
OmitStages []Stage
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -208,8 +213,10 @@ type PolicyRule struct {
|
||||
// +optional
|
||||
NonResourceURLs []string
|
||||
|
||||
// OmitStages specify events generated in which stages will not be emitted to backend.
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified policy wide in which case the union of both are omitted.
|
||||
// An empty list means no restrictions will apply.
|
||||
// +optional
|
||||
OmitStages []Stage
|
||||
}
|
||||
|
||||
|
@@ -414,6 +414,21 @@ func (m *Policy) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if len(m.OmitStages) > 0 {
|
||||
for _, s := range m.OmitStages {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -723,6 +738,12 @@ func (m *Policy) Size() (n int) {
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.OmitStages) > 0 {
|
||||
for _, s := range m.OmitStages {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -874,6 +895,7 @@ func (this *Policy) String() string {
|
||||
s := strings.Join([]string{`&Policy{`,
|
||||
`ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
|
||||
`Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`,
|
||||
`OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@@ -2044,6 +2066,35 @@ func (m *Policy) Unmarshal(dAtA []byte) error {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
@@ -2570,80 +2621,80 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 1185 bytes of a gzipped FileDescriptorProto
|
||||
// 1190 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xcf, 0xd6, 0x71, 0x62, 0x4f, 0x1a, 0x27, 0x9d, 0x22, 0xba, 0xca, 0xc1, 0x36, 0x46, 0x42,
|
||||
0x11, 0x84, 0xdd, 0xa4, 0x04, 0x5a, 0x0e, 0x1c, 0x62, 0x15, 0x81, 0xa5, 0x34, 0x84, 0x49, 0x5c,
|
||||
0x89, 0x3f, 0x07, 0xd6, 0xf6, 0x8b, 0x3d, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x8d, 0x72, 0xe3, 0xc0,
|
||||
0x15, 0x89, 0x3b, 0x1f, 0xa6, 0xe2, 0x50, 0x29, 0xc7, 0x1e, 0x7b, 0xb2, 0x88, 0xf9, 0x16, 0x39,
|
||||
0xa1, 0x99, 0x9d, 0xdd, 0xd9, 0x75, 0x6a, 0xd5, 0xe1, 0xd0, 0xdb, 0xce, 0x7b, 0xbf, 0xf7, 0x9b,
|
||||
0xf7, 0xde, 0xbe, 0x3f, 0x83, 0xbe, 0x3d, 0x7f, 0xcc, 0x1d, 0x1a, 0xb8, 0xe7, 0x51, 0x07, 0x98,
|
||||
0x0f, 0x02, 0xb8, 0x3b, 0x06, 0xbf, 0x17, 0x30, 0x57, 0x2b, 0xbc, 0x90, 0x72, 0x60, 0x63, 0x60,
|
||||
0x6e, 0x78, 0xde, 0x57, 0x27, 0xd7, 0x8b, 0x7a, 0x54, 0xb8, 0xe3, 0x3d, 0x6f, 0x18, 0x0e, 0xbc,
|
||||
0x3d, 0xb7, 0x0f, 0x3e, 0x30, 0x4f, 0x40, 0xcf, 0x09, 0x59, 0x20, 0x02, 0xbc, 0x1d, 0x5b, 0x3a,
|
||||
0xa9, 0xa5, 0x13, 0x9e, 0xf7, 0xd5, 0xc9, 0x51, 0x96, 0x4e, 0x62, 0xb9, 0xf5, 0x71, 0x9f, 0x8a,
|
||||
0x41, 0xd4, 0x71, 0xba, 0xc1, 0xc8, 0xed, 0x07, 0xfd, 0xc0, 0x55, 0x04, 0x9d, 0xe8, 0x4c, 0x9d,
|
||||
0xd4, 0x41, 0x7d, 0xc5, 0xc4, 0x5b, 0x3b, 0xc6, 0x25, 0xd7, 0x8b, 0xc4, 0x00, 0x7c, 0x41, 0xbb,
|
||||
0x9e, 0xa0, 0x81, 0xef, 0x8e, 0x6f, 0xb8, 0xb1, 0xb5, 0x6f, 0xd0, 0x23, 0xaf, 0x3b, 0xa0, 0x3e,
|
||||
0xb0, 0x0b, 0x13, 0xc3, 0x08, 0x84, 0xf7, 0x3a, 0x2b, 0x77, 0x9e, 0x15, 0x8b, 0x7c, 0x41, 0x47,
|
||||
0x70, 0xc3, 0xe0, 0xb3, 0x37, 0x19, 0xf0, 0xee, 0x00, 0x46, 0xde, 0x0d, 0xbb, 0x4f, 0xe6, 0xd9,
|
||||
0x45, 0x82, 0x0e, 0x5d, 0xea, 0x0b, 0x2e, 0xd8, 0xac, 0x51, 0xe3, 0x05, 0x42, 0xc5, 0x2f, 0xc7,
|
||||
0xe0, 0x0b, 0xfc, 0x13, 0x2a, 0xc9, 0x10, 0x7a, 0x9e, 0xf0, 0x6c, 0xab, 0x6e, 0x6d, 0xaf, 0x3d,
|
||||
0xdc, 0x75, 0x4c, 0xde, 0x53, 0x46, 0x93, 0x7a, 0x89, 0x76, 0xc6, 0x7b, 0xce, 0x37, 0x9d, 0x9f,
|
||||
0xa1, 0x2b, 0x9e, 0x82, 0xf0, 0x9a, 0xf8, 0x72, 0x52, 0x5b, 0x9a, 0x4e, 0x6a, 0xc8, 0xc8, 0x48,
|
||||
0xca, 0x8a, 0x77, 0x50, 0x71, 0x08, 0x63, 0x18, 0xda, 0x77, 0xea, 0xd6, 0x76, 0xb9, 0xf9, 0xae,
|
||||
0x06, 0x17, 0x0f, 0xa5, 0xf0, 0x3a, 0xf9, 0x20, 0x31, 0x08, 0xff, 0x80, 0xca, 0x32, 0x5a, 0x2e,
|
||||
0xbc, 0x51, 0x68, 0x17, 0x94, 0x43, 0x1f, 0x2e, 0xe6, 0xd0, 0x29, 0x1d, 0x41, 0xf3, 0x9e, 0x66,
|
||||
0x2f, 0x9f, 0x26, 0x24, 0xc4, 0xf0, 0xe1, 0x23, 0xb4, 0xaa, 0x2a, 0xa7, 0xf5, 0xc4, 0x5e, 0x56,
|
||||
0xce, 0xec, 0x6b, 0xf8, 0xea, 0x41, 0x2c, 0xbe, 0x9e, 0xd4, 0xde, 0x9b, 0x97, 0x4f, 0x71, 0x11,
|
||||
0x02, 0x77, 0xda, 0xad, 0x27, 0x24, 0x21, 0x91, 0xa1, 0x71, 0xe1, 0xf5, 0xc1, 0x2e, 0xe6, 0x43,
|
||||
0x3b, 0x91, 0xc2, 0xeb, 0xe4, 0x83, 0xc4, 0x20, 0xfc, 0x10, 0x21, 0x06, 0xbf, 0x44, 0xc0, 0x45,
|
||||
0x9b, 0xb4, 0xec, 0x15, 0x65, 0x92, 0xa6, 0x8e, 0xa4, 0x1a, 0x92, 0x41, 0xe1, 0x3a, 0x5a, 0x1e,
|
||||
0x03, 0xeb, 0xd8, 0xab, 0x0a, 0x7d, 0x57, 0xa3, 0x97, 0x9f, 0x01, 0xeb, 0x10, 0xa5, 0xc1, 0x5f,
|
||||
0xa3, 0xe5, 0x88, 0x03, 0xb3, 0x4b, 0x2a, 0x57, 0x1f, 0x64, 0x72, 0xe5, 0xe4, 0x6b, 0x5b, 0xe6,
|
||||
0xa8, 0xcd, 0x81, 0xb5, 0xfc, 0xb3, 0xc0, 0x30, 0x49, 0x09, 0x51, 0x0c, 0x78, 0x80, 0x36, 0xe9,
|
||||
0x28, 0x04, 0xc6, 0x03, 0x5f, 0x96, 0x8a, 0xd4, 0xd8, 0xe5, 0x5b, 0xb1, 0xbe, 0x33, 0x9d, 0xd4,
|
||||
0x36, 0x5b, 0x33, 0x1c, 0xe4, 0x06, 0x2b, 0xfe, 0x08, 0x95, 0x79, 0x10, 0xb1, 0x2e, 0xb4, 0x8e,
|
||||
0xb9, 0x8d, 0xea, 0x85, 0xed, 0x72, 0x73, 0x5d, 0xfe, 0xb4, 0x93, 0x44, 0x48, 0x8c, 0x1e, 0x9f,
|
||||
0xa1, 0x72, 0xa0, 0xea, 0x8a, 0xc0, 0x99, 0xbd, 0xa6, 0xfc, 0xf9, 0xdc, 0x59, 0x74, 0x34, 0xe8,
|
||||
0x32, 0x25, 0x70, 0x06, 0x0c, 0xfc, 0x2e, 0xc4, 0xf7, 0xa4, 0x42, 0x62, 0xa8, 0xf1, 0x00, 0x55,
|
||||
0x18, 0xf0, 0x30, 0xf0, 0x39, 0x9c, 0x08, 0x4f, 0x44, 0xdc, 0xbe, 0xab, 0x2e, 0xdb, 0x59, 0xac,
|
||||
0xfc, 0x62, 0x9b, 0x26, 0x9e, 0x4e, 0x6a, 0x15, 0x92, 0xe3, 0x21, 0x33, 0xbc, 0xd8, 0x43, 0xeb,
|
||||
0xfa, 0x17, 0xc7, 0x8e, 0xd8, 0xeb, 0xea, 0xa2, 0xed, 0xb9, 0x17, 0xe9, 0x11, 0xe0, 0xb4, 0xfd,
|
||||
0x73, 0x3f, 0xf8, 0xd5, 0x6f, 0xde, 0x9b, 0x4e, 0x6a, 0xeb, 0x24, 0x4b, 0x41, 0xf2, 0x8c, 0xb8,
|
||||
0x67, 0x82, 0xd1, 0x77, 0x54, 0x6e, 0x79, 0x47, 0x2e, 0x10, 0x7d, 0xc9, 0x0c, 0x27, 0xfe, 0xc3,
|
||||
0x42, 0xb6, 0xbe, 0x97, 0x40, 0x17, 0xe8, 0x18, 0x7a, 0x69, 0xdf, 0xd9, 0x1b, 0xea, 0x42, 0x77,
|
||||
0xb1, 0xec, 0x3d, 0xa5, 0x5d, 0x16, 0xa8, 0x0e, 0xae, 0xeb, 0xca, 0xb4, 0xc9, 0x1c, 0x62, 0x32,
|
||||
0xf7, 0x4a, 0x1c, 0xa0, 0x8a, 0x6a, 0x35, 0xe3, 0xc4, 0xe6, 0xff, 0x73, 0x22, 0xe9, 0xe4, 0xca,
|
||||
0x49, 0x8e, 0x8e, 0xcc, 0xd0, 0x37, 0x9e, 0x5b, 0xa8, 0xac, 0xe6, 0xe8, 0x21, 0xe5, 0x02, 0xff,
|
||||
0x78, 0x63, 0x96, 0x3a, 0x8b, 0x5d, 0x2c, 0xad, 0xd5, 0x24, 0xdd, 0xd4, 0xf7, 0x96, 0x12, 0x49,
|
||||
0x66, 0x8e, 0x9e, 0xa2, 0x22, 0x15, 0x30, 0xe2, 0xf6, 0x9d, 0x7a, 0x61, 0x26, 0xa6, 0x37, 0xf4,
|
||||
0x80, 0xf2, 0xb0, 0xb9, 0x9e, 0x4c, 0xa7, 0x96, 0x64, 0x21, 0x31, 0x59, 0xe3, 0x2f, 0x0b, 0x55,
|
||||
0xbe, 0x62, 0x41, 0x14, 0x12, 0x88, 0x5b, 0x8e, 0xe3, 0xf7, 0x51, 0xb1, 0x2f, 0x25, 0x2a, 0x86,
|
||||
0xb2, 0xb1, 0x8b, 0x61, 0xb1, 0x4e, 0xb6, 0x30, 0x4b, 0x2c, 0x94, 0x47, 0xba, 0x85, 0x53, 0x1a,
|
||||
0x62, 0xf4, 0xf8, 0x91, 0x2c, 0xf8, 0xf8, 0x70, 0xe4, 0x8d, 0x80, 0xdb, 0x05, 0x65, 0xa0, 0xcb,
|
||||
0x38, 0xa3, 0x20, 0x79, 0x5c, 0xe3, 0xf7, 0x02, 0xda, 0x98, 0xe9, 0x60, 0xbc, 0x83, 0x4a, 0x09,
|
||||
0x48, 0x7b, 0x98, 0x66, 0x2d, 0xe1, 0x22, 0x29, 0x02, 0xbb, 0xa8, 0xec, 0x4b, 0xaa, 0xd0, 0xeb,
|
||||
0x82, 0xde, 0x40, 0xe9, 0x8e, 0x38, 0x4a, 0x14, 0xc4, 0x60, 0xe4, 0xc4, 0x95, 0x07, 0xb5, 0x7b,
|
||||
0x32, 0x13, 0x57, 0x62, 0x89, 0xd2, 0xe0, 0x26, 0x2a, 0x44, 0xb4, 0xa7, 0x37, 0xc8, 0xae, 0x06,
|
||||
0x14, 0xda, 0x8b, 0x6e, 0x0f, 0x69, 0x2c, 0x77, 0x81, 0x17, 0xd2, 0x67, 0xc0, 0x38, 0x0d, 0x7c,
|
||||
0xbd, 0x3e, 0xd2, 0x5d, 0x70, 0x70, 0xdc, 0xd2, 0x1a, 0x92, 0x41, 0xe1, 0x03, 0xb4, 0x91, 0x84,
|
||||
0x95, 0x18, 0xc6, 0x4b, 0xe4, 0x81, 0x36, 0xdc, 0x20, 0x79, 0x35, 0x99, 0xc5, 0xe3, 0x4f, 0xd1,
|
||||
0x1a, 0x8f, 0x3a, 0x69, 0xfa, 0xe2, 0xad, 0x72, 0x5f, 0x9b, 0xaf, 0x9d, 0x18, 0x15, 0xc9, 0xe2,
|
||||
0x1a, 0x2f, 0x2c, 0xb4, 0x72, 0x1c, 0x0c, 0x69, 0xf7, 0xe2, 0x2d, 0xbc, 0x17, 0xbe, 0x43, 0x45,
|
||||
0x16, 0x0d, 0x21, 0xa9, 0xf3, 0xfd, 0xc5, 0xeb, 0x3c, 0x76, 0x91, 0x44, 0x43, 0x30, 0x45, 0x2b,
|
||||
0x4f, 0x9c, 0xc4, 0x8c, 0x8d, 0xbf, 0x2d, 0x84, 0x62, 0xd0, 0x5b, 0xe8, 0xd7, 0x76, 0xbe, 0x5f,
|
||||
0x77, 0x6f, 0x1b, 0xc7, 0x9c, 0x86, 0x7d, 0x5e, 0x48, 0x62, 0x90, 0xa1, 0x99, 0xd7, 0x95, 0xb5,
|
||||
0xc8, 0xeb, 0xaa, 0x86, 0x8a, 0x72, 0xd5, 0x27, 0x1d, 0x5b, 0x96, 0x48, 0xb9, 0x91, 0x39, 0x89,
|
||||
0xe5, 0xd8, 0x41, 0x48, 0x7e, 0xa8, 0x56, 0x4f, 0xda, 0xb4, 0x22, 0x7f, 0x55, 0x3b, 0x95, 0x92,
|
||||
0x0c, 0x42, 0x12, 0xca, 0x57, 0x08, 0xb7, 0x97, 0x0d, 0xa1, 0x7c, 0x9c, 0x70, 0x12, 0xcb, 0x31,
|
||||
0xcd, 0xce, 0x89, 0xa2, 0xca, 0xc4, 0xe3, 0xc5, 0x33, 0x91, 0x9f, 0x4c, 0xa6, 0x73, 0x5f, 0x3b,
|
||||
0x65, 0x1c, 0x84, 0xd2, 0x36, 0xe6, 0xf6, 0x8a, 0xf1, 0x3d, 0xed, 0x73, 0x4e, 0x32, 0x08, 0xfc,
|
||||
0x05, 0xda, 0xf0, 0x03, 0x3f, 0xa1, 0x6a, 0x93, 0x43, 0x6e, 0xaf, 0x2a, 0xa3, 0xfb, 0xb2, 0x97,
|
||||
0x8e, 0xf2, 0x2a, 0x32, 0x8b, 0xc5, 0x8f, 0x10, 0x0a, 0x46, 0x54, 0xa8, 0x0d, 0xc1, 0xed, 0x92,
|
||||
0xb2, 0x7c, 0xa0, 0xaa, 0x3a, 0x95, 0x9a, 0x27, 0x60, 0x06, 0xda, 0x74, 0x2e, 0xaf, 0xaa, 0x4b,
|
||||
0x2f, 0xaf, 0xaa, 0x4b, 0xaf, 0xae, 0xaa, 0x4b, 0xbf, 0x4d, 0xab, 0xd6, 0xe5, 0xb4, 0x6a, 0xbd,
|
||||
0x9c, 0x56, 0xad, 0x57, 0xd3, 0xaa, 0xf5, 0xcf, 0xb4, 0x6a, 0xfd, 0xf9, 0x6f, 0x75, 0xe9, 0xfb,
|
||||
0x52, 0x92, 0x84, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x9e, 0x6c, 0xe1, 0x29, 0x5b, 0x0d, 0x00,
|
||||
0x00,
|
||||
0x89, 0x3f, 0x07, 0xd6, 0xf6, 0x8b, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x8d, 0x72, 0xe3, 0xc0,
|
||||
0x15, 0x89, 0x3b, 0x1f, 0xa6, 0xe2, 0x80, 0x94, 0x63, 0x8f, 0x3d, 0x59, 0xc4, 0x7c, 0x8b, 0x1c,
|
||||
0x10, 0x9a, 0xd9, 0x99, 0x9d, 0x5d, 0xa7, 0x56, 0x1d, 0x0e, 0xbd, 0xed, 0xbc, 0xf7, 0x7b, 0xbf,
|
||||
0xf7, 0xe6, 0xed, 0xfb, 0x33, 0xe8, 0xeb, 0xf3, 0xc7, 0xcc, 0xf1, 0x43, 0xf7, 0x3c, 0xee, 0x00,
|
||||
0x0d, 0x80, 0x03, 0x73, 0xc7, 0x10, 0xf4, 0x42, 0xea, 0x2a, 0x85, 0x17, 0xf9, 0x0c, 0xe8, 0x18,
|
||||
0xa8, 0x1b, 0x9d, 0xf7, 0xe5, 0xc9, 0xf5, 0xe2, 0x9e, 0xcf, 0xdd, 0xf1, 0x9e, 0x37, 0x8c, 0x06,
|
||||
0xde, 0x9e, 0xdb, 0x87, 0x00, 0xa8, 0xc7, 0xa1, 0xe7, 0x44, 0x34, 0xe4, 0x21, 0xde, 0x4e, 0x2c,
|
||||
0x9d, 0xd4, 0xd2, 0x89, 0xce, 0xfb, 0xf2, 0xe4, 0x48, 0x4b, 0x47, 0x5b, 0x6e, 0x7d, 0xd8, 0xf7,
|
||||
0xf9, 0x20, 0xee, 0x38, 0xdd, 0x70, 0xe4, 0xf6, 0xc3, 0x7e, 0xe8, 0x4a, 0x82, 0x4e, 0x7c, 0x26,
|
||||
0x4f, 0xf2, 0x20, 0xbf, 0x12, 0xe2, 0xad, 0x1d, 0x13, 0x92, 0xeb, 0xc5, 0x7c, 0x00, 0x01, 0xf7,
|
||||
0xbb, 0x1e, 0xf7, 0xc3, 0xc0, 0x1d, 0xdf, 0x08, 0x63, 0x6b, 0xdf, 0xa0, 0x47, 0x5e, 0x77, 0xe0,
|
||||
0x07, 0x40, 0x2f, 0xcc, 0x1d, 0x46, 0xc0, 0xbd, 0x57, 0x59, 0xb9, 0xf3, 0xac, 0x68, 0x1c, 0x70,
|
||||
0x7f, 0x04, 0x37, 0x0c, 0x3e, 0x79, 0x9d, 0x01, 0xeb, 0x0e, 0x60, 0xe4, 0xdd, 0xb0, 0xfb, 0x68,
|
||||
0x9e, 0x5d, 0xcc, 0xfd, 0xa1, 0xeb, 0x07, 0x9c, 0x71, 0x3a, 0x6b, 0xd4, 0xf8, 0x0b, 0xa1, 0xe2,
|
||||
0xe7, 0x63, 0x08, 0x38, 0xfe, 0x01, 0x95, 0xc4, 0x15, 0x7a, 0x1e, 0xf7, 0x6c, 0xab, 0x6e, 0x6d,
|
||||
0xaf, 0x3d, 0xdc, 0x75, 0x4c, 0xde, 0x53, 0x46, 0x93, 0x7a, 0x81, 0x76, 0xc6, 0x7b, 0xce, 0x57,
|
||||
0x9d, 0x1f, 0xa1, 0xcb, 0x9f, 0x02, 0xf7, 0x9a, 0xf8, 0x72, 0x52, 0x5b, 0x9a, 0x4e, 0x6a, 0xc8,
|
||||
0xc8, 0x48, 0xca, 0x8a, 0x77, 0x50, 0x71, 0x08, 0x63, 0x18, 0xda, 0x77, 0xea, 0xd6, 0x76, 0xb9,
|
||||
0xf9, 0xb6, 0x02, 0x17, 0x0f, 0x85, 0xf0, 0x5a, 0x7f, 0x90, 0x04, 0x84, 0xbf, 0x43, 0x65, 0x71,
|
||||
0x5b, 0xc6, 0xbd, 0x51, 0x64, 0x17, 0x64, 0x40, 0xef, 0x2f, 0x16, 0xd0, 0xa9, 0x3f, 0x82, 0xe6,
|
||||
0x3d, 0xc5, 0x5e, 0x3e, 0xd5, 0x24, 0xc4, 0xf0, 0xe1, 0x23, 0xb4, 0x2a, 0x2b, 0xa7, 0xf5, 0xc4,
|
||||
0x5e, 0x96, 0xc1, 0xec, 0x2b, 0xf8, 0xea, 0x41, 0x22, 0xbe, 0x9e, 0xd4, 0xde, 0x99, 0x97, 0x4f,
|
||||
0x7e, 0x11, 0x01, 0x73, 0xda, 0xad, 0x27, 0x44, 0x93, 0x88, 0xab, 0x31, 0xee, 0xf5, 0xc1, 0x2e,
|
||||
0xe6, 0xaf, 0x76, 0x22, 0x84, 0xd7, 0xfa, 0x83, 0x24, 0x20, 0xfc, 0x10, 0x21, 0x0a, 0x3f, 0xc5,
|
||||
0xc0, 0x78, 0x9b, 0xb4, 0xec, 0x15, 0x69, 0x92, 0xa6, 0x8e, 0xa4, 0x1a, 0x92, 0x41, 0xe1, 0x3a,
|
||||
0x5a, 0x1e, 0x03, 0xed, 0xd8, 0xab, 0x12, 0x7d, 0x57, 0xa1, 0x97, 0x9f, 0x01, 0xed, 0x10, 0xa9,
|
||||
0xc1, 0x5f, 0xa2, 0xe5, 0x98, 0x01, 0xb5, 0x4b, 0x32, 0x57, 0xef, 0x65, 0x72, 0xe5, 0xe4, 0x6b,
|
||||
0x5b, 0xe4, 0xa8, 0xcd, 0x80, 0xb6, 0x82, 0xb3, 0xd0, 0x30, 0x09, 0x09, 0x91, 0x0c, 0x78, 0x80,
|
||||
0x36, 0xfd, 0x51, 0x04, 0x94, 0x85, 0x81, 0x28, 0x15, 0xa1, 0xb1, 0xcb, 0xb7, 0x62, 0x7d, 0x6b,
|
||||
0x3a, 0xa9, 0x6d, 0xb6, 0x66, 0x38, 0xc8, 0x0d, 0x56, 0xfc, 0x01, 0x2a, 0xb3, 0x30, 0xa6, 0x5d,
|
||||
0x68, 0x1d, 0x33, 0x1b, 0xd5, 0x0b, 0xdb, 0xe5, 0xe6, 0xba, 0xf8, 0x69, 0x27, 0x5a, 0x48, 0x8c,
|
||||
0x1e, 0x9f, 0xa1, 0x72, 0x28, 0xeb, 0x8a, 0xc0, 0x99, 0xbd, 0x26, 0xe3, 0xf9, 0xd4, 0x59, 0x74,
|
||||
0x34, 0xa8, 0x32, 0x25, 0x70, 0x06, 0x14, 0x82, 0x2e, 0x24, 0x7e, 0x52, 0x21, 0x31, 0xd4, 0x78,
|
||||
0x80, 0x2a, 0x14, 0x58, 0x14, 0x06, 0x0c, 0x4e, 0xb8, 0xc7, 0x63, 0x66, 0xdf, 0x95, 0xce, 0x76,
|
||||
0x16, 0x2b, 0xbf, 0xc4, 0xa6, 0x89, 0xa7, 0x93, 0x5a, 0x85, 0xe4, 0x78, 0xc8, 0x0c, 0x2f, 0xf6,
|
||||
0xd0, 0xba, 0xfa, 0xc5, 0x49, 0x20, 0xf6, 0xba, 0x74, 0xb4, 0x3d, 0xd7, 0x91, 0x1a, 0x01, 0x4e,
|
||||
0x3b, 0x38, 0x0f, 0xc2, 0x9f, 0x83, 0xe6, 0xbd, 0xe9, 0xa4, 0xb6, 0x4e, 0xb2, 0x14, 0x24, 0xcf,
|
||||
0x88, 0x7b, 0xe6, 0x32, 0xca, 0x47, 0xe5, 0x96, 0x3e, 0x72, 0x17, 0x51, 0x4e, 0x66, 0x38, 0xf1,
|
||||
0x6f, 0x16, 0xb2, 0x95, 0x5f, 0x02, 0x5d, 0xf0, 0xc7, 0xd0, 0x4b, 0xfb, 0xce, 0xde, 0x90, 0x0e,
|
||||
0xdd, 0xc5, 0xb2, 0xf7, 0xd4, 0xef, 0xd2, 0x50, 0x76, 0x70, 0x5d, 0x55, 0xa6, 0x4d, 0xe6, 0x10,
|
||||
0x93, 0xb9, 0x2e, 0x71, 0x88, 0x2a, 0xb2, 0xd5, 0x4c, 0x10, 0x9b, 0xff, 0x2f, 0x08, 0xdd, 0xc9,
|
||||
0x95, 0x93, 0x1c, 0x1d, 0x99, 0xa1, 0x6f, 0x3c, 0xb7, 0x50, 0x59, 0xce, 0xd1, 0x43, 0x9f, 0x71,
|
||||
0xfc, 0xfd, 0x8d, 0x59, 0xea, 0x2c, 0xe6, 0x58, 0x58, 0xcb, 0x49, 0xba, 0xa9, 0xfc, 0x96, 0xb4,
|
||||
0x24, 0x33, 0x47, 0x4f, 0x51, 0xd1, 0xe7, 0x30, 0x62, 0xf6, 0x9d, 0x7a, 0x61, 0xe6, 0x4e, 0xaf,
|
||||
0xe9, 0x01, 0x19, 0x61, 0x73, 0x5d, 0x4f, 0xa7, 0x96, 0x60, 0x21, 0x09, 0x59, 0xe3, 0x0f, 0x0b,
|
||||
0x55, 0xbe, 0xa0, 0x61, 0x1c, 0x11, 0x48, 0x5a, 0x8e, 0xe1, 0x77, 0x51, 0xb1, 0x2f, 0x24, 0xf2,
|
||||
0x0e, 0x65, 0x63, 0x97, 0xc0, 0x12, 0x9d, 0x68, 0x61, 0xaa, 0x2d, 0x64, 0x44, 0xaa, 0x85, 0x53,
|
||||
0x1a, 0x62, 0xf4, 0xf8, 0x91, 0x28, 0xf8, 0xe4, 0x70, 0xe4, 0x8d, 0x80, 0xd9, 0x05, 0x69, 0xa0,
|
||||
0xca, 0x38, 0xa3, 0x20, 0x79, 0x5c, 0xe3, 0xd7, 0x02, 0xda, 0x98, 0xe9, 0x60, 0xbc, 0x83, 0x4a,
|
||||
0x1a, 0xa4, 0x22, 0x4c, 0xb3, 0xa6, 0xb9, 0x48, 0x8a, 0xc0, 0x2e, 0x2a, 0x07, 0x82, 0x2a, 0xf2,
|
||||
0xba, 0xa0, 0x36, 0x50, 0xba, 0x23, 0x8e, 0xb4, 0x82, 0x18, 0x8c, 0x98, 0xb8, 0xe2, 0x20, 0x77,
|
||||
0x4f, 0x66, 0xe2, 0x0a, 0x2c, 0x91, 0x1a, 0xdc, 0x44, 0x85, 0xd8, 0xef, 0xa9, 0x0d, 0xb2, 0xab,
|
||||
0x00, 0x85, 0xf6, 0xa2, 0xdb, 0x43, 0x18, 0x8b, 0x5d, 0xe0, 0x45, 0xfe, 0x33, 0xa0, 0xcc, 0x0f,
|
||||
0x03, 0xb5, 0x3e, 0xd2, 0x5d, 0x70, 0x70, 0xdc, 0x52, 0x1a, 0x92, 0x41, 0xe1, 0x03, 0xb4, 0xa1,
|
||||
0xaf, 0xa5, 0x0d, 0x93, 0x25, 0xf2, 0x40, 0x19, 0x6e, 0x90, 0xbc, 0x9a, 0xcc, 0xe2, 0xf1, 0xc7,
|
||||
0x68, 0x8d, 0xc5, 0x9d, 0x34, 0x7d, 0xc9, 0x56, 0xb9, 0xaf, 0xcc, 0xd7, 0x4e, 0x8c, 0x8a, 0x64,
|
||||
0x71, 0x8d, 0x7f, 0x2d, 0xb4, 0x72, 0x1c, 0x0e, 0xfd, 0xee, 0xc5, 0x1b, 0x78, 0x2f, 0x7c, 0x83,
|
||||
0x8a, 0x34, 0x1e, 0x82, 0xae, 0xf3, 0xfd, 0xc5, 0xeb, 0x3c, 0x09, 0x91, 0xc4, 0x43, 0x30, 0x45,
|
||||
0x2b, 0x4e, 0x8c, 0x24, 0x8c, 0xf8, 0x11, 0x42, 0xe1, 0xc8, 0xe7, 0xb2, 0xa9, 0x75, 0x11, 0x3e,
|
||||
0x90, 0x81, 0xa4, 0x52, 0xb3, 0xb5, 0x33, 0xd0, 0xc6, 0x9f, 0x16, 0x42, 0x09, 0xfb, 0x1b, 0x68,
|
||||
0xf4, 0x76, 0xbe, 0xd1, 0x77, 0x6f, 0x9b, 0x80, 0x39, 0x9d, 0xfe, 0xbc, 0xa0, 0xef, 0x20, 0x72,
|
||||
0x62, 0x9e, 0x65, 0xd6, 0x22, 0xcf, 0xb2, 0x1a, 0x2a, 0x8a, 0x37, 0x82, 0x6e, 0xf5, 0xb2, 0x40,
|
||||
0x8a, 0x55, 0xce, 0x48, 0x22, 0xc7, 0x0e, 0x42, 0xe2, 0x43, 0xce, 0x08, 0x9d, 0xda, 0x8a, 0x48,
|
||||
0x6d, 0x3b, 0x95, 0x92, 0x0c, 0x42, 0x10, 0x8a, 0xe7, 0x0b, 0xb3, 0x97, 0x0d, 0xa1, 0x78, 0xd5,
|
||||
0x30, 0x92, 0xc8, 0xb1, 0x9f, 0x1d, 0x30, 0x45, 0x99, 0x89, 0xc7, 0x8b, 0x67, 0x22, 0x3f, 0xd2,
|
||||
0x4c, 0xcb, 0xbf, 0x72, 0x3c, 0x39, 0x08, 0xa5, 0xfd, 0xcf, 0xec, 0x15, 0x13, 0x7b, 0x3a, 0x20,
|
||||
0x18, 0xc9, 0x20, 0xf0, 0x67, 0x68, 0x23, 0x08, 0x03, 0x4d, 0xd5, 0x26, 0x87, 0xcc, 0x5e, 0x95,
|
||||
0x46, 0xf7, 0x45, 0x13, 0x1e, 0xe5, 0x55, 0x64, 0x16, 0x3b, 0x53, 0x85, 0xa5, 0x85, 0xab, 0xb0,
|
||||
0xe9, 0x5c, 0x5e, 0x55, 0x97, 0x5e, 0x5c, 0x55, 0x97, 0x5e, 0x5e, 0x55, 0x97, 0x7e, 0x99, 0x56,
|
||||
0xad, 0xcb, 0x69, 0xd5, 0x7a, 0x31, 0xad, 0x5a, 0x2f, 0xa7, 0x55, 0xeb, 0xef, 0x69, 0xd5, 0xfa,
|
||||
0xfd, 0x9f, 0xea, 0xd2, 0xb7, 0x25, 0x9d, 0x84, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x55, 0x14,
|
||||
0x18, 0x3e, 0x94, 0x0d, 0x00, 0x00,
|
||||
}
|
||||
|
@@ -164,6 +164,11 @@ message Policy {
|
||||
// The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
|
||||
// PolicyRules are strictly ordered.
|
||||
repeated PolicyRule rules = 2;
|
||||
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified per rule in which case the union of both are omitted.
|
||||
// +optional
|
||||
repeated string omitStages = 3;
|
||||
}
|
||||
|
||||
// PolicyList is a list of audit Policies.
|
||||
@@ -214,8 +219,10 @@ message PolicyRule {
|
||||
// +optional
|
||||
repeated string nonResourceURLs = 7;
|
||||
|
||||
// OmitStages specify events generated in which stages will not be emitted to backend.
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified policy wide in which case the union of both are omitted.
|
||||
// An empty list means no restrictions will apply.
|
||||
// +optional
|
||||
repeated string omitStages = 8;
|
||||
}
|
||||
|
||||
|
@@ -160,6 +160,11 @@ type Policy struct {
|
||||
// The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
|
||||
// PolicyRules are strictly ordered.
|
||||
Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
|
||||
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified per rule in which case the union of both are omitted.
|
||||
// +optional
|
||||
OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -215,8 +220,10 @@ type PolicyRule struct {
|
||||
// +optional
|
||||
NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"`
|
||||
|
||||
// OmitStages specify events generated in which stages will not be emitted to backend.
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified policy wide in which case the union of both are omitted.
|
||||
// An empty list means no restrictions will apply.
|
||||
// +optional
|
||||
OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"`
|
||||
}
|
||||
|
||||
|
@@ -207,6 +207,7 @@ func autoConvert_audit_ObjectReference_To_v1alpha1_ObjectReference(in *audit.Obj
|
||||
func autoConvert_v1alpha1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules))
|
||||
out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -218,6 +219,7 @@ func Convert_v1alpha1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s co
|
||||
func autoConvert_audit_Policy_To_v1alpha1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules))
|
||||
out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -195,6 +195,11 @@ func (in *Policy) DeepCopyInto(out *Policy) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.OmitStages != nil {
|
||||
in, out := &in.OmitStages, &out.OmitStages
|
||||
*out = make([]Stage, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -418,6 +418,21 @@ func (m *Policy) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += n
|
||||
}
|
||||
}
|
||||
if len(m.OmitStages) > 0 {
|
||||
for _, s := range m.OmitStages {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -729,6 +744,12 @@ func (m *Policy) Size() (n int) {
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if len(m.OmitStages) > 0 {
|
||||
for _, s := range m.OmitStages {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -881,6 +902,7 @@ func (this *Policy) String() string {
|
||||
s := strings.Join([]string{`&Policy{`,
|
||||
`ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
|
||||
`Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`,
|
||||
`OmitStages:` + fmt.Sprintf("%v", this.OmitStages) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@@ -2080,6 +2102,35 @@ func (m *Policy) Unmarshal(dAtA []byte) error {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field OmitStages", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.OmitStages = append(m.OmitStages, Stage(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
@@ -2606,81 +2657,81 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 1211 bytes of a gzipped FileDescriptorProto
|
||||
// 1216 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x41, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xce, 0xd6, 0x71, 0x63, 0x4f, 0x1a, 0x27, 0x9d, 0x22, 0xba, 0xca, 0xc1, 0x36, 0x46, 0x82,
|
||||
0x08, 0xd2, 0xdd, 0xa6, 0x2d, 0x24, 0x17, 0x0e, 0xb1, 0x8a, 0xc0, 0x52, 0x1a, 0xa2, 0x71, 0x5c,
|
||||
0x21, 0xe0, 0xc0, 0xda, 0x7e, 0xb1, 0x87, 0xd8, 0xbb, 0xcb, 0xcc, 0xac, 0x51, 0x6e, 0xfc, 0x01,
|
||||
0x24, 0xee, 0xfc, 0x0b, 0x7e, 0x40, 0xc5, 0xa1, 0x87, 0x1c, 0x7b, 0xec, 0xc9, 0x22, 0xe6, 0x5f,
|
||||
0xe4, 0x84, 0x66, 0x76, 0x76, 0x67, 0x6d, 0xd7, 0xd4, 0xe1, 0xd0, 0xdb, 0xee, 0x7b, 0xdf, 0xf7,
|
||||
0xcd, 0x7b, 0x6f, 0xe6, 0xbd, 0x19, 0x74, 0x72, 0x7e, 0xc0, 0x1d, 0x1a, 0xb8, 0xe7, 0x51, 0x1b,
|
||||
0x98, 0x0f, 0x02, 0xb8, 0x3b, 0x02, 0xbf, 0x1b, 0x30, 0x57, 0x3b, 0xbc, 0x90, 0x72, 0x60, 0x23,
|
||||
0x60, 0x6e, 0x78, 0xde, 0x53, 0x7f, 0xae, 0x17, 0x75, 0xa9, 0x70, 0x47, 0x7b, 0x6d, 0x10, 0xde,
|
||||
0x9e, 0xdb, 0x03, 0x1f, 0x98, 0x27, 0xa0, 0xeb, 0x84, 0x2c, 0x10, 0x01, 0xfe, 0x38, 0x26, 0x3a,
|
||||
0x29, 0xd1, 0x09, 0xcf, 0x7b, 0xea, 0xcf, 0x51, 0x44, 0x47, 0x13, 0xb7, 0x1f, 0xf4, 0xa8, 0xe8,
|
||||
0x47, 0x6d, 0xa7, 0x13, 0x0c, 0xdd, 0x5e, 0xd0, 0x0b, 0x5c, 0xc5, 0x6f, 0x47, 0x67, 0xea, 0x4f,
|
||||
0xfd, 0xa8, 0xaf, 0x58, 0x77, 0x7b, 0xd7, 0x04, 0xe4, 0x7a, 0x91, 0xe8, 0x83, 0x2f, 0x68, 0xc7,
|
||||
0x13, 0x34, 0xf0, 0xdd, 0xd1, 0x5c, 0x14, 0xdb, 0x4f, 0x0c, 0x7a, 0xe8, 0x75, 0xfa, 0xd4, 0x07,
|
||||
0x76, 0x61, 0x32, 0x18, 0x82, 0xf0, 0xde, 0xc4, 0x72, 0x17, 0xb1, 0x58, 0xe4, 0x0b, 0x3a, 0x84,
|
||||
0x39, 0xc2, 0xe7, 0x6f, 0x23, 0xf0, 0x4e, 0x1f, 0x86, 0xde, 0x1c, 0xef, 0xf1, 0x22, 0x5e, 0x24,
|
||||
0xe8, 0xc0, 0xa5, 0xbe, 0xe0, 0x82, 0xcd, 0x91, 0x0e, 0xde, 0xbe, 0x25, 0xde, 0x20, 0xec, 0xcf,
|
||||
0xef, 0x49, 0xed, 0x25, 0x42, 0xf9, 0x2f, 0x47, 0xe0, 0x0b, 0xfc, 0x23, 0x2a, 0xc8, 0xe4, 0xbb,
|
||||
0x9e, 0xf0, 0x6c, 0xab, 0x6a, 0xed, 0xac, 0x3f, 0x7a, 0xe8, 0x98, 0x0d, 0x4b, 0x63, 0x31, 0x7b,
|
||||
0x26, 0xd1, 0xce, 0x68, 0xcf, 0xf9, 0xa6, 0xfd, 0x13, 0x74, 0xc4, 0x33, 0x10, 0x5e, 0x1d, 0x5f,
|
||||
0x8e, 0x2b, 0x2b, 0x93, 0x71, 0x05, 0x19, 0x1b, 0x49, 0x55, 0xf1, 0x2e, 0xca, 0x0f, 0x60, 0x04,
|
||||
0x03, 0xfb, 0x56, 0xd5, 0xda, 0x29, 0xd6, 0xdf, 0xd7, 0xe0, 0xfc, 0x91, 0x34, 0x5e, 0x27, 0x1f,
|
||||
0x24, 0x06, 0xe1, 0xef, 0x51, 0x51, 0xd6, 0x89, 0x0b, 0x6f, 0x18, 0xda, 0x39, 0x15, 0xd0, 0x27,
|
||||
0xcb, 0x05, 0x74, 0x4a, 0x87, 0x50, 0xbf, 0xab, 0xd5, 0x8b, 0xa7, 0x89, 0x08, 0x31, 0x7a, 0xf8,
|
||||
0x18, 0xad, 0xa9, 0xc2, 0x34, 0x9e, 0xda, 0xab, 0x2a, 0x98, 0x27, 0x1a, 0xbe, 0x76, 0x18, 0x9b,
|
||||
0xaf, 0xc7, 0x95, 0x0f, 0x16, 0xed, 0x84, 0xb8, 0x08, 0x81, 0x3b, 0xad, 0xc6, 0x53, 0x92, 0x88,
|
||||
0xc8, 0xd4, 0xb8, 0xf0, 0x7a, 0x60, 0xe7, 0xa7, 0x53, 0x6b, 0x4a, 0xe3, 0x75, 0xf2, 0x41, 0x62,
|
||||
0x10, 0x7e, 0x84, 0x10, 0x83, 0x9f, 0x23, 0xe0, 0xa2, 0x45, 0x1a, 0xf6, 0x6d, 0x45, 0x49, 0x4b,
|
||||
0x47, 0x52, 0x0f, 0xc9, 0xa0, 0x70, 0x15, 0xad, 0x8e, 0x80, 0xb5, 0xed, 0x35, 0x85, 0xbe, 0xa3,
|
||||
0xd1, 0xab, 0xcf, 0x81, 0xb5, 0x89, 0xf2, 0xe0, 0xaf, 0xd1, 0x6a, 0xc4, 0x81, 0xd9, 0x05, 0x55,
|
||||
0xab, 0x8f, 0x32, 0xb5, 0x72, 0xa6, 0xbb, 0x42, 0xd6, 0xa8, 0xc5, 0x81, 0x35, 0xfc, 0xb3, 0xc0,
|
||||
0x28, 0x49, 0x0b, 0x51, 0x0a, 0xb8, 0x8f, 0xb6, 0xe8, 0x30, 0x04, 0xc6, 0x03, 0x5f, 0x1e, 0x15,
|
||||
0xe9, 0xb1, 0x8b, 0x37, 0x52, 0x7d, 0x6f, 0x32, 0xae, 0x6c, 0x35, 0x66, 0x34, 0xc8, 0x9c, 0x2a,
|
||||
0xfe, 0x14, 0x15, 0x79, 0x10, 0xb1, 0x0e, 0x34, 0x4e, 0xb8, 0x8d, 0xaa, 0xb9, 0x9d, 0x62, 0x7d,
|
||||
0x43, 0x6e, 0x5a, 0x33, 0x31, 0x12, 0xe3, 0xc7, 0x80, 0x8a, 0x81, 0x3a, 0x57, 0x04, 0xce, 0xec,
|
||||
0x75, 0x15, 0xcf, 0x81, 0xb3, 0xe4, 0x4c, 0xd1, 0xa7, 0x94, 0xc0, 0x19, 0x30, 0xf0, 0x3b, 0x10,
|
||||
0x2f, 0x93, 0x1a, 0x89, 0x51, 0xc6, 0x7d, 0x54, 0x62, 0xc0, 0xc3, 0xc0, 0xe7, 0xd0, 0x14, 0x9e,
|
||||
0x88, 0xb8, 0x7d, 0x47, 0xad, 0xb5, 0xbb, 0xdc, 0xe9, 0x8b, 0x39, 0x75, 0x3c, 0x19, 0x57, 0x4a,
|
||||
0x64, 0x4a, 0x87, 0xcc, 0xe8, 0x62, 0x0f, 0x6d, 0xe8, 0x1d, 0x8e, 0x03, 0xb1, 0x37, 0xd4, 0x42,
|
||||
0x3b, 0x0b, 0x17, 0xd2, 0xb3, 0xc3, 0x69, 0xf9, 0xe7, 0x7e, 0xf0, 0x8b, 0x5f, 0xbf, 0x3b, 0x19,
|
||||
0x57, 0x36, 0x48, 0x56, 0x82, 0x4c, 0x2b, 0xe2, 0xae, 0x49, 0x46, 0xaf, 0x51, 0xba, 0xe1, 0x1a,
|
||||
0x53, 0x89, 0xe8, 0x45, 0x66, 0x34, 0xf1, 0x6f, 0x16, 0xb2, 0xf5, 0xba, 0x04, 0x3a, 0x40, 0x47,
|
||||
0xd0, 0x4d, 0xdb, 0xce, 0xde, 0x54, 0x0b, 0xba, 0xcb, 0x55, 0xef, 0x19, 0xed, 0xb0, 0x40, 0x35,
|
||||
0x70, 0x55, 0x1f, 0x4c, 0x9b, 0x2c, 0x10, 0x26, 0x0b, 0x97, 0xc4, 0x01, 0x2a, 0xa9, 0x4e, 0x33,
|
||||
0x41, 0x6c, 0xfd, 0xbf, 0x20, 0x92, 0x46, 0x2e, 0x35, 0xa7, 0xe4, 0xc8, 0x8c, 0x7c, 0xed, 0x85,
|
||||
0x85, 0x8a, 0x6a, 0x8c, 0x1e, 0x51, 0x2e, 0xf0, 0x0f, 0x73, 0xa3, 0xd4, 0x59, 0x6e, 0x61, 0xc9,
|
||||
0x56, 0x83, 0x74, 0x4b, 0xaf, 0x5b, 0x48, 0x2c, 0x99, 0x31, 0xda, 0x44, 0x79, 0x2a, 0x60, 0xc8,
|
||||
0xed, 0x5b, 0xd5, 0xdc, 0x8c, 0xf4, 0x7f, 0xb7, 0x80, 0x0a, 0xb0, 0xbe, 0x91, 0xcc, 0xa6, 0x86,
|
||||
0x14, 0x21, 0xb1, 0x56, 0xed, 0x0f, 0x0b, 0x95, 0xbe, 0x62, 0x41, 0x14, 0x12, 0x88, 0x1b, 0x8e,
|
||||
0xe3, 0x0f, 0x51, 0xbe, 0x27, 0x2d, 0x2a, 0x85, 0xa2, 0xe1, 0xc5, 0xb0, 0xd8, 0x27, 0x1b, 0x98,
|
||||
0x25, 0x0c, 0x15, 0x90, 0x6e, 0xe0, 0x54, 0x86, 0x18, 0x3f, 0xde, 0x97, 0xe7, 0x3d, 0xfe, 0x39,
|
||||
0xf6, 0x86, 0xc0, 0xed, 0x9c, 0x22, 0xe8, 0x53, 0x9c, 0x71, 0x90, 0x69, 0x5c, 0xed, 0xcf, 0x1c,
|
||||
0xda, 0x9c, 0x69, 0x60, 0xbc, 0x8b, 0x0a, 0x09, 0x48, 0x47, 0x98, 0x16, 0x2d, 0xd1, 0x22, 0x29,
|
||||
0x02, 0xbb, 0xa8, 0xe8, 0x4b, 0xa9, 0xd0, 0xeb, 0x80, 0xbe, 0x7f, 0xd2, 0x1b, 0xe2, 0x38, 0x71,
|
||||
0x10, 0x83, 0x91, 0xf3, 0x56, 0xfe, 0xa8, 0x9b, 0x27, 0x33, 0x6f, 0x25, 0x96, 0x28, 0x0f, 0xae,
|
||||
0xa3, 0x5c, 0x44, 0xbb, 0xfa, 0xfe, 0x78, 0xa8, 0x01, 0xb9, 0xd6, 0xb2, 0x77, 0x87, 0x24, 0xcb,
|
||||
0x24, 0xbc, 0x90, 0xaa, 0x8a, 0xea, 0xab, 0x23, 0x4d, 0xe2, 0xf0, 0xa4, 0x11, 0x57, 0x3a, 0x45,
|
||||
0xc8, 0x7b, 0xc3, 0x0b, 0xe9, 0x73, 0x60, 0x9c, 0x06, 0xfe, 0xec, 0xbd, 0x71, 0x78, 0xd2, 0xd0,
|
||||
0x1e, 0x92, 0x41, 0xe1, 0x43, 0xb4, 0x99, 0x14, 0x21, 0x21, 0xc6, 0x57, 0xc8, 0x7d, 0x4d, 0xdc,
|
||||
0x24, 0xd3, 0x6e, 0x32, 0x8b, 0xc7, 0x9f, 0xa1, 0x75, 0x1e, 0xb5, 0xd3, 0x62, 0x17, 0x14, 0xfd,
|
||||
0x9e, 0xa6, 0xaf, 0x37, 0x8d, 0x8b, 0x64, 0x71, 0xb5, 0x97, 0x16, 0xba, 0x7d, 0x12, 0x0c, 0x68,
|
||||
0xe7, 0xe2, 0x1d, 0xbc, 0x2d, 0xbe, 0x45, 0x79, 0x16, 0x0d, 0x20, 0x69, 0x8a, 0xc7, 0x4b, 0x37,
|
||||
0x45, 0x1c, 0x21, 0x89, 0x06, 0x60, 0x4e, 0xb8, 0xfc, 0xe3, 0x24, 0x16, 0xac, 0xfd, 0x65, 0x21,
|
||||
0x14, 0x83, 0xde, 0x41, 0x6f, 0x9f, 0x4e, 0xf7, 0xb6, 0x7b, 0xc3, 0x34, 0x16, 0x34, 0xf7, 0x8b,
|
||||
0x5c, 0x92, 0x82, 0xcc, 0xcc, 0xbc, 0xc3, 0xac, 0x65, 0xde, 0x61, 0x15, 0x94, 0x97, 0x8f, 0x82,
|
||||
0xa4, 0xbb, 0x8b, 0x12, 0x29, 0xef, 0x6e, 0x4e, 0x62, 0x3b, 0x76, 0x10, 0x92, 0x1f, 0xea, 0x88,
|
||||
0x26, 0x2d, 0x5d, 0x92, 0x1b, 0xd5, 0x4a, 0xad, 0x24, 0x83, 0x90, 0x82, 0xf2, 0xbd, 0xc2, 0xed,
|
||||
0x55, 0x23, 0x28, 0x9f, 0x31, 0x9c, 0xc4, 0x76, 0xdc, 0xcf, 0xce, 0x94, 0xbc, 0x2a, 0xc4, 0xfe,
|
||||
0xd2, 0x85, 0x98, 0x1e, 0x62, 0xa6, 0xc9, 0xdf, 0x38, 0x90, 0x1c, 0x84, 0xd2, 0x8e, 0xe7, 0xf6,
|
||||
0x6d, 0x13, 0x7a, 0x3a, 0x12, 0x38, 0xc9, 0x20, 0xf0, 0x17, 0x68, 0xd3, 0x0f, 0xfc, 0x44, 0xaa,
|
||||
0x45, 0x8e, 0xb8, 0xbd, 0xa6, 0x48, 0xf7, 0x64, 0x23, 0x1d, 0x4f, 0xbb, 0xc8, 0x2c, 0x16, 0xef,
|
||||
0x23, 0x14, 0x0c, 0xa9, 0x50, 0x77, 0x09, 0xb7, 0x0b, 0x8a, 0x79, 0x5f, 0x1d, 0xe9, 0xd4, 0x6a,
|
||||
0xde, 0x8a, 0x19, 0x68, 0xfd, 0xc1, 0xe5, 0x55, 0x79, 0xe5, 0xd5, 0x55, 0x79, 0xe5, 0xf5, 0x55,
|
||||
0x79, 0xe5, 0xd7, 0x49, 0xd9, 0xba, 0x9c, 0x94, 0xad, 0x57, 0x93, 0xb2, 0xf5, 0x7a, 0x52, 0xb6,
|
||||
0xfe, 0x9e, 0x94, 0xad, 0xdf, 0xff, 0x29, 0xaf, 0x7c, 0xb7, 0xa6, 0x6b, 0xf0, 0x6f, 0x00, 0x00,
|
||||
0x00, 0xff, 0xff, 0xfd, 0x2a, 0x16, 0x68, 0xbb, 0x0d, 0x00, 0x00,
|
||||
0x24, 0xee, 0xfc, 0x0b, 0x7e, 0x40, 0xc5, 0x81, 0x43, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x2f,
|
||||
0x22, 0x21, 0xa1, 0x99, 0x9d, 0xdd, 0x59, 0xdb, 0x35, 0x75, 0x38, 0xf4, 0xb6, 0xf3, 0xde, 0xf7,
|
||||
0x7d, 0xf3, 0xe6, 0xcd, 0x7b, 0x6f, 0x16, 0x9d, 0x9c, 0x1f, 0x70, 0x87, 0x06, 0xee, 0x79, 0xd4,
|
||||
0x06, 0xe6, 0x83, 0x00, 0xee, 0x8e, 0xc0, 0xef, 0x06, 0xcc, 0xd5, 0x0e, 0x2f, 0xa4, 0x1c, 0xd8,
|
||||
0x08, 0x98, 0x1b, 0x9e, 0xf7, 0xd4, 0xca, 0xf5, 0xa2, 0x2e, 0x15, 0xee, 0x68, 0xaf, 0x0d, 0xc2,
|
||||
0xdb, 0x73, 0x7b, 0xe0, 0x03, 0xf3, 0x04, 0x74, 0x9d, 0x90, 0x05, 0x22, 0xc0, 0x1f, 0xc6, 0x44,
|
||||
0x27, 0x25, 0x3a, 0xe1, 0x79, 0x4f, 0xad, 0x1c, 0x45, 0x74, 0x34, 0x71, 0xfb, 0x41, 0x8f, 0x8a,
|
||||
0x7e, 0xd4, 0x76, 0x3a, 0xc1, 0xd0, 0xed, 0x05, 0xbd, 0xc0, 0x55, 0xfc, 0x76, 0x74, 0xa6, 0x56,
|
||||
0x6a, 0xa1, 0xbe, 0x62, 0xdd, 0xed, 0x5d, 0x13, 0x90, 0xeb, 0x45, 0xa2, 0x0f, 0xbe, 0xa0, 0x1d,
|
||||
0x4f, 0xd0, 0xc0, 0x77, 0x47, 0x73, 0x51, 0x6c, 0x3f, 0x31, 0xe8, 0xa1, 0xd7, 0xe9, 0x53, 0x1f,
|
||||
0xd8, 0x85, 0x39, 0xc1, 0x10, 0x84, 0xf7, 0x3a, 0x96, 0xbb, 0x88, 0xc5, 0x22, 0x5f, 0xd0, 0x21,
|
||||
0xcc, 0x11, 0x3e, 0x7d, 0x13, 0x81, 0x77, 0xfa, 0x30, 0xf4, 0xe6, 0x78, 0x8f, 0x17, 0xf1, 0x22,
|
||||
0x41, 0x07, 0x2e, 0xf5, 0x05, 0x17, 0x6c, 0x8e, 0x74, 0xf0, 0xe6, 0x2b, 0xf1, 0x06, 0x61, 0x7f,
|
||||
0xfe, 0x4e, 0x6a, 0x7f, 0x22, 0x94, 0xff, 0x7c, 0x04, 0xbe, 0xc0, 0xdf, 0xa3, 0x82, 0x3c, 0x7c,
|
||||
0xd7, 0x13, 0x9e, 0x6d, 0x55, 0xad, 0x9d, 0xf5, 0x47, 0x0f, 0x1d, 0x73, 0x61, 0x69, 0x2c, 0xe6,
|
||||
0xce, 0x24, 0xda, 0x19, 0xed, 0x39, 0x5f, 0xb5, 0x7f, 0x80, 0x8e, 0x78, 0x06, 0xc2, 0xab, 0xe3,
|
||||
0xcb, 0x71, 0x65, 0x65, 0x32, 0xae, 0x20, 0x63, 0x23, 0xa9, 0x2a, 0xde, 0x45, 0xf9, 0x01, 0x8c,
|
||||
0x60, 0x60, 0xdf, 0xaa, 0x5a, 0x3b, 0xc5, 0xfa, 0xbb, 0x1a, 0x9c, 0x3f, 0x92, 0xc6, 0xeb, 0xe4,
|
||||
0x83, 0xc4, 0x20, 0xfc, 0x2d, 0x2a, 0xca, 0x3c, 0x71, 0xe1, 0x0d, 0x43, 0x3b, 0xa7, 0x02, 0xfa,
|
||||
0x68, 0xb9, 0x80, 0x4e, 0xe9, 0x10, 0xea, 0x77, 0xb5, 0x7a, 0xf1, 0x34, 0x11, 0x21, 0x46, 0x0f,
|
||||
0x1f, 0xa3, 0x35, 0x95, 0x98, 0xc6, 0x53, 0x7b, 0x55, 0x05, 0xf3, 0x44, 0xc3, 0xd7, 0x0e, 0x63,
|
||||
0xf3, 0xf5, 0xb8, 0xf2, 0xde, 0xa2, 0x9b, 0x10, 0x17, 0x21, 0x70, 0xa7, 0xd5, 0x78, 0x4a, 0x12,
|
||||
0x11, 0x79, 0x34, 0x2e, 0xbc, 0x1e, 0xd8, 0xf9, 0xe9, 0xa3, 0x35, 0xa5, 0xf1, 0x3a, 0xf9, 0x20,
|
||||
0x31, 0x08, 0x3f, 0x42, 0x88, 0xc1, 0x8f, 0x11, 0x70, 0xd1, 0x22, 0x0d, 0xfb, 0xb6, 0xa2, 0xa4,
|
||||
0xa9, 0x23, 0xa9, 0x87, 0x64, 0x50, 0xb8, 0x8a, 0x56, 0x47, 0xc0, 0xda, 0xf6, 0x9a, 0x42, 0xdf,
|
||||
0xd1, 0xe8, 0xd5, 0xe7, 0xc0, 0xda, 0x44, 0x79, 0xf0, 0x97, 0x68, 0x35, 0xe2, 0xc0, 0xec, 0x82,
|
||||
0xca, 0xd5, 0x07, 0x99, 0x5c, 0x39, 0xd3, 0x5d, 0x21, 0x73, 0xd4, 0xe2, 0xc0, 0x1a, 0xfe, 0x59,
|
||||
0x60, 0x94, 0xa4, 0x85, 0x28, 0x05, 0xdc, 0x47, 0x5b, 0x74, 0x18, 0x02, 0xe3, 0x81, 0x2f, 0x4b,
|
||||
0x45, 0x7a, 0xec, 0xe2, 0x8d, 0x54, 0xdf, 0x99, 0x8c, 0x2b, 0x5b, 0x8d, 0x19, 0x0d, 0x32, 0xa7,
|
||||
0x8a, 0x3f, 0x46, 0x45, 0x1e, 0x44, 0xac, 0x03, 0x8d, 0x13, 0x6e, 0xa3, 0x6a, 0x6e, 0xa7, 0x58,
|
||||
0xdf, 0x90, 0x97, 0xd6, 0x4c, 0x8c, 0xc4, 0xf8, 0x31, 0xa0, 0x62, 0xa0, 0xea, 0x8a, 0xc0, 0x99,
|
||||
0xbd, 0xae, 0xe2, 0x39, 0x70, 0x96, 0x9c, 0x29, 0xba, 0x4a, 0x09, 0x9c, 0x01, 0x03, 0xbf, 0x03,
|
||||
0xf1, 0x36, 0xa9, 0x91, 0x18, 0x65, 0xdc, 0x47, 0x25, 0x06, 0x3c, 0x0c, 0x7c, 0x0e, 0x4d, 0xe1,
|
||||
0x89, 0x88, 0xdb, 0x77, 0xd4, 0x5e, 0xbb, 0xcb, 0x55, 0x5f, 0xcc, 0xa9, 0xe3, 0xc9, 0xb8, 0x52,
|
||||
0x22, 0x53, 0x3a, 0x64, 0x46, 0x17, 0x7b, 0x68, 0x43, 0xdf, 0x70, 0x1c, 0x88, 0xbd, 0xa1, 0x36,
|
||||
0xda, 0x59, 0xb8, 0x91, 0x9e, 0x1d, 0x4e, 0xcb, 0x3f, 0xf7, 0x83, 0x9f, 0xfc, 0xfa, 0xdd, 0xc9,
|
||||
0xb8, 0xb2, 0x41, 0xb2, 0x12, 0x64, 0x5a, 0x11, 0x77, 0xcd, 0x61, 0xf4, 0x1e, 0xa5, 0x1b, 0xee,
|
||||
0x31, 0x75, 0x10, 0xbd, 0xc9, 0x8c, 0x26, 0xfe, 0xc5, 0x42, 0xb6, 0xde, 0x97, 0x40, 0x07, 0xe8,
|
||||
0x08, 0xba, 0x69, 0xdb, 0xd9, 0x9b, 0x6a, 0x43, 0x77, 0xb9, 0xec, 0x3d, 0xa3, 0x1d, 0x16, 0xa8,
|
||||
0x06, 0xae, 0xea, 0xc2, 0xb4, 0xc9, 0x02, 0x61, 0xb2, 0x70, 0x4b, 0x1c, 0xa0, 0x92, 0xea, 0x34,
|
||||
0x13, 0xc4, 0xd6, 0xff, 0x0b, 0x22, 0x69, 0xe4, 0x52, 0x73, 0x4a, 0x8e, 0xcc, 0xc8, 0xd7, 0x5e,
|
||||
0x58, 0xa8, 0xa8, 0xc6, 0xe8, 0x11, 0xe5, 0x02, 0x7f, 0x37, 0x37, 0x4a, 0x9d, 0xe5, 0x36, 0x96,
|
||||
0x6c, 0x35, 0x48, 0xb7, 0xf4, 0xbe, 0x85, 0xc4, 0x92, 0x19, 0xa3, 0x4d, 0x94, 0xa7, 0x02, 0x86,
|
||||
0xdc, 0xbe, 0x55, 0xcd, 0xcd, 0x48, 0xff, 0x77, 0x0b, 0xa8, 0x00, 0xeb, 0x1b, 0xc9, 0x6c, 0x6a,
|
||||
0x48, 0x11, 0x12, 0x6b, 0xd5, 0x7e, 0xb3, 0x50, 0xe9, 0x0b, 0x16, 0x44, 0x21, 0x81, 0xb8, 0xe1,
|
||||
0x38, 0x7e, 0x1f, 0xe5, 0x7b, 0xd2, 0xa2, 0x8e, 0x50, 0x34, 0xbc, 0x18, 0x16, 0xfb, 0x64, 0x03,
|
||||
0xb3, 0x84, 0xa1, 0x02, 0xd2, 0x0d, 0x9c, 0xca, 0x10, 0xe3, 0xc7, 0xfb, 0xb2, 0xde, 0xe3, 0xc5,
|
||||
0xb1, 0x37, 0x04, 0x6e, 0xe7, 0x14, 0x41, 0x57, 0x71, 0xc6, 0x41, 0xa6, 0x71, 0xb5, 0xdf, 0x73,
|
||||
0x68, 0x73, 0xa6, 0x81, 0xf1, 0x2e, 0x2a, 0x24, 0x20, 0x1d, 0x61, 0x9a, 0xb4, 0x44, 0x8b, 0xa4,
|
||||
0x08, 0xec, 0xa2, 0xa2, 0x2f, 0xa5, 0x42, 0xaf, 0x03, 0xfa, 0xfd, 0x49, 0x5f, 0x88, 0xe3, 0xc4,
|
||||
0x41, 0x0c, 0x46, 0xce, 0x5b, 0xb9, 0x50, 0x2f, 0x4f, 0x66, 0xde, 0x4a, 0x2c, 0x51, 0x1e, 0x5c,
|
||||
0x47, 0xb9, 0x88, 0x76, 0xf5, 0xfb, 0xf1, 0x50, 0x03, 0x72, 0xad, 0x65, 0xdf, 0x0e, 0x49, 0x96,
|
||||
0x87, 0xf0, 0x42, 0xaa, 0x32, 0xaa, 0x9f, 0x8e, 0xf4, 0x10, 0x87, 0x27, 0x8d, 0x38, 0xd3, 0x29,
|
||||
0x42, 0xbe, 0x1b, 0x5e, 0x48, 0x9f, 0x03, 0xe3, 0x34, 0xf0, 0x67, 0xdf, 0x8d, 0xc3, 0x93, 0x86,
|
||||
0xf6, 0x90, 0x0c, 0x0a, 0x1f, 0xa2, 0xcd, 0x24, 0x09, 0x09, 0x31, 0x7e, 0x42, 0xee, 0x6b, 0xe2,
|
||||
0x26, 0x99, 0x76, 0x93, 0x59, 0x3c, 0xfe, 0x04, 0xad, 0xf3, 0xa8, 0x9d, 0x26, 0xbb, 0xa0, 0xe8,
|
||||
0xf7, 0x34, 0x7d, 0xbd, 0x69, 0x5c, 0x24, 0x8b, 0xab, 0xfd, 0x63, 0xa1, 0xdb, 0x27, 0xc1, 0x80,
|
||||
0x76, 0x2e, 0xde, 0xc2, 0xbf, 0xc5, 0xd7, 0x28, 0xcf, 0xa2, 0x01, 0x24, 0x4d, 0xf1, 0x78, 0xe9,
|
||||
0xa6, 0x88, 0x23, 0x24, 0xd1, 0x00, 0x4c, 0x85, 0xcb, 0x15, 0x27, 0xb1, 0x20, 0xde, 0x47, 0x28,
|
||||
0x18, 0x52, 0xa1, 0x06, 0x40, 0x52, 0xb1, 0xf7, 0x55, 0x1c, 0xa9, 0xd5, 0x3c, 0xf0, 0x19, 0x68,
|
||||
0xed, 0x0f, 0x0b, 0xa1, 0x58, 0xfd, 0x2d, 0x0c, 0x85, 0xd3, 0xe9, 0xa1, 0xe0, 0xde, 0xf0, 0xfc,
|
||||
0x0b, 0xa6, 0xc2, 0x8b, 0x5c, 0x72, 0x04, 0x99, 0x12, 0xf3, 0x03, 0x67, 0x2d, 0xf3, 0x03, 0x57,
|
||||
0x41, 0x79, 0xf9, 0x37, 0x91, 0x8c, 0x85, 0xa2, 0x44, 0xca, 0x47, 0x9f, 0x93, 0xd8, 0x8e, 0x1d,
|
||||
0x84, 0xe4, 0x87, 0xaa, 0xed, 0x24, 0xb3, 0x25, 0x99, 0xd9, 0x56, 0x6a, 0x25, 0x19, 0x84, 0x14,
|
||||
0x94, 0x3f, 0x3a, 0xdc, 0x5e, 0x35, 0x82, 0xf2, 0xff, 0x87, 0x93, 0xd8, 0x8e, 0xfb, 0xd9, 0x61,
|
||||
0x94, 0x57, 0x89, 0xd8, 0x5f, 0x3a, 0x11, 0xd3, 0xd3, 0xcf, 0x4c, 0x87, 0xd7, 0x4e, 0x32, 0x07,
|
||||
0xa1, 0x74, 0x54, 0x70, 0xfb, 0xb6, 0x09, 0x3d, 0x9d, 0x25, 0x9c, 0x64, 0x10, 0xf8, 0x33, 0xb4,
|
||||
0xe9, 0x07, 0x7e, 0x22, 0xd5, 0x22, 0x47, 0xdc, 0x5e, 0x53, 0xa4, 0x7b, 0xb2, 0x03, 0x8f, 0xa7,
|
||||
0x5d, 0x64, 0x16, 0x3b, 0x53, 0x83, 0x85, 0xa5, 0x6b, 0xb0, 0xfe, 0xe0, 0xf2, 0xaa, 0xbc, 0xf2,
|
||||
0xf2, 0xaa, 0xbc, 0xf2, 0xea, 0xaa, 0xbc, 0xf2, 0xf3, 0xa4, 0x6c, 0x5d, 0x4e, 0xca, 0xd6, 0xcb,
|
||||
0x49, 0xd9, 0x7a, 0x35, 0x29, 0x5b, 0x7f, 0x4d, 0xca, 0xd6, 0xaf, 0x7f, 0x97, 0x57, 0xbe, 0x59,
|
||||
0xd3, 0x39, 0xf8, 0x37, 0x00, 0x00, 0xff, 0xff, 0x04, 0xbb, 0x40, 0x37, 0xf4, 0x0d, 0x00, 0x00,
|
||||
}
|
||||
|
@@ -174,6 +174,11 @@ message Policy {
|
||||
// The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
|
||||
// PolicyRules are strictly ordered.
|
||||
repeated PolicyRule rules = 2;
|
||||
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified per rule in which case the union of both are omitted.
|
||||
// +optional
|
||||
repeated string omitStages = 3;
|
||||
}
|
||||
|
||||
// PolicyList is a list of audit Policies.
|
||||
@@ -224,8 +229,10 @@ message PolicyRule {
|
||||
// +optional
|
||||
repeated string nonResourceURLs = 7;
|
||||
|
||||
// OmitStages specify events generated in which stages will not be emitted to backend.
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified policy wide in which case the union of both are omitted.
|
||||
// An empty list means no restrictions will apply.
|
||||
// +optional
|
||||
repeated string omitStages = 8;
|
||||
}
|
||||
|
||||
|
@@ -156,6 +156,11 @@ type Policy struct {
|
||||
// The default audit level is None, but can be overridden by a catch-all rule at the end of the list.
|
||||
// PolicyRules are strictly ordered.
|
||||
Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"`
|
||||
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified per rule in which case the union of both are omitted.
|
||||
// +optional
|
||||
OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,3,rep,name=omitStages"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
@@ -211,8 +216,10 @@ type PolicyRule struct {
|
||||
// +optional
|
||||
NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,7,rep,name=nonResourceURLs"`
|
||||
|
||||
// OmitStages specify events generated in which stages will not be emitted to backend.
|
||||
// OmitStages is a list of stages for which no events are created. Note that this can also
|
||||
// be specified policy wide in which case the union of both are omitted.
|
||||
// An empty list means no restrictions will apply.
|
||||
// +optional
|
||||
OmitStages []Stage `json:"omitStages,omitempty" protobuf:"bytes,8,rep,name=omitStages"`
|
||||
}
|
||||
|
||||
|
@@ -202,6 +202,7 @@ func Convert_audit_ObjectReference_To_v1beta1_ObjectReference(in *audit.ObjectRe
|
||||
func autoConvert_v1beta1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Rules = *(*[]audit.PolicyRule)(unsafe.Pointer(&in.Rules))
|
||||
out.OmitStages = *(*[]audit.Stage)(unsafe.Pointer(&in.OmitStages))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -213,6 +214,7 @@ func Convert_v1beta1_Policy_To_audit_Policy(in *Policy, out *audit.Policy, s con
|
||||
func autoConvert_audit_Policy_To_v1beta1_Policy(in *audit.Policy, out *Policy, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Rules = *(*[]PolicyRule)(unsafe.Pointer(&in.Rules))
|
||||
out.OmitStages = *(*[]Stage)(unsafe.Pointer(&in.OmitStages))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -195,6 +195,11 @@ func (in *Policy) DeepCopyInto(out *Policy) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.OmitStages != nil {
|
||||
in, out := &in.OmitStages, &out.OmitStages
|
||||
*out = make([]Stage, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -26,6 +26,7 @@ import (
|
||||
|
||||
func ValidatePolicy(policy *audit.Policy) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allErrs = append(allErrs, validateOmitStages(policy.OmitStages, field.NewPath("omitStages"))...)
|
||||
rulePath := field.NewPath("rules")
|
||||
for i, rule := range policy.Rules {
|
||||
allErrs = append(allErrs, validatePolicyRule(rule, rulePath.Index(i))...)
|
||||
|
@@ -54,7 +54,9 @@ func TestValidatePolicy(t *testing.T) {
|
||||
for _, rule := range validRules {
|
||||
successCases = append(successCases, audit.Policy{Rules: []audit.PolicyRule{rule}})
|
||||
}
|
||||
successCases = append(successCases, audit.Policy{}) // Empty policy is valid.
|
||||
successCases = append(successCases, audit.Policy{}) // Empty policy is valid.
|
||||
successCases = append(successCases, audit.Policy{OmitStages: []audit.Stage{ // Policy with omitStages
|
||||
audit.Stage("RequestReceived")}})
|
||||
successCases = append(successCases, audit.Policy{Rules: validRules}) // Multiple rules.
|
||||
|
||||
for i, policy := range successCases {
|
||||
@@ -113,7 +115,7 @@ func TestValidatePolicy(t *testing.T) {
|
||||
Resources: []audit.GroupResources{{ResourceNames: []string{"leader"}}},
|
||||
Namespaces: []string{"kube-system"},
|
||||
},
|
||||
{ // invalid omitStages
|
||||
{ // invalid omitStages in rule
|
||||
Level: audit.LevelMetadata,
|
||||
OmitStages: []audit.Stage{
|
||||
audit.Stage("foo"),
|
||||
@@ -124,7 +126,21 @@ func TestValidatePolicy(t *testing.T) {
|
||||
for _, rule := range invalidRules {
|
||||
errorCases = append(errorCases, audit.Policy{Rules: []audit.PolicyRule{rule}})
|
||||
}
|
||||
errorCases = append(errorCases, audit.Policy{Rules: append(validRules, audit.PolicyRule{})}) // Multiple rules.
|
||||
|
||||
// Multiple rules.
|
||||
errorCases = append(errorCases, audit.Policy{Rules: append(validRules, audit.PolicyRule{})})
|
||||
|
||||
// invalid omitStages in policy
|
||||
policy := audit.Policy{OmitStages: []audit.Stage{
|
||||
audit.Stage("foo"),
|
||||
},
|
||||
Rules: []audit.PolicyRule{
|
||||
{
|
||||
Level: audit.LevelMetadata,
|
||||
},
|
||||
},
|
||||
}
|
||||
errorCases = append(errorCases, policy)
|
||||
|
||||
for i, policy := range errorCases {
|
||||
if errs := ValidatePolicy(&policy); len(errs) == 0 {
|
||||
|
@@ -192,6 +192,11 @@ func (in *Policy) DeepCopyInto(out *Policy) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.OmitStages != nil {
|
||||
in, out := &in.OmitStages, &out.OmitStages
|
||||
*out = make([]Stage, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -36,9 +36,26 @@ type Checker interface {
|
||||
|
||||
// NewChecker creates a new policy checker.
|
||||
func NewChecker(policy *audit.Policy) Checker {
|
||||
for i, rule := range policy.Rules {
|
||||
policy.Rules[i].OmitStages = unionStages(policy.OmitStages, rule.OmitStages)
|
||||
}
|
||||
return &policyChecker{*policy}
|
||||
}
|
||||
|
||||
func unionStages(stageLists ...[]audit.Stage) []audit.Stage {
|
||||
m := make(map[audit.Stage]bool)
|
||||
for _, sl := range stageLists {
|
||||
for _, s := range sl {
|
||||
m[s] = true
|
||||
}
|
||||
}
|
||||
result := make([]audit.Stage, 0, len(m))
|
||||
for key := range m {
|
||||
result = append(result, key)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// FakeChecker creates a checker that returns a constant level for all requests (for testing).
|
||||
func FakeChecker(level audit.Level, stage []audit.Stage) Checker {
|
||||
return &fakeChecker{level, stage}
|
||||
@@ -54,7 +71,7 @@ func (p *policyChecker) LevelAndStages(attrs authorizer.Attributes) (audit.Level
|
||||
return rule.Level, rule.OmitStages
|
||||
}
|
||||
}
|
||||
return DefaultAuditLevel, nil
|
||||
return DefaultAuditLevel, p.OmitStages
|
||||
}
|
||||
|
||||
// Check whether the rule matches the request attrs.
|
||||
|
@@ -28,12 +28,12 @@ import (
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
)
|
||||
|
||||
func TestChecker(t *testing.T) {
|
||||
tim := &user.DefaultInfo{
|
||||
var (
|
||||
tim = &user.DefaultInfo{
|
||||
Name: "tim@k8s.io",
|
||||
Groups: []string{"humans", "developers"},
|
||||
}
|
||||
attrs := map[string]authorizer.Attributes{
|
||||
attrs = map[string]authorizer.Attributes{
|
||||
"namespaced": &authorizer.AttributesRecord{
|
||||
User: tim,
|
||||
Verb: "get",
|
||||
@@ -75,7 +75,7 @@ func TestChecker(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
rules := map[string]audit.PolicyRule{
|
||||
rules = map[string]audit.PolicyRule{
|
||||
"default": {
|
||||
Level: audit.LevelMetadata,
|
||||
},
|
||||
@@ -151,65 +151,165 @@ func TestChecker(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
test := func(req string, expLevel audit.Level, expOmitStages []audit.Stage, ruleNames ...string) {
|
||||
policy := audit.Policy{}
|
||||
for _, rule := range ruleNames {
|
||||
require.Contains(t, rules, rule)
|
||||
policy.Rules = append(policy.Rules, rules[rule])
|
||||
func test(t *testing.T, req string, expLevel audit.Level, policyStages, expOmitStages []audit.Stage, ruleNames ...string) {
|
||||
policy := audit.Policy{OmitStages: policyStages}
|
||||
for _, rule := range ruleNames {
|
||||
require.Contains(t, rules, rule)
|
||||
policy.Rules = append(policy.Rules, rules[rule])
|
||||
}
|
||||
require.Contains(t, attrs, req)
|
||||
actualLevel, actualOmitStages := NewChecker(&policy).LevelAndStages(attrs[req])
|
||||
assert.Equal(t, expLevel, actualLevel, "request:%s rules:%s", req, strings.Join(ruleNames, ","))
|
||||
assert.True(t, stageEqual(expOmitStages, actualOmitStages), "request:%s rules:%s, expected stages: %v, actual stages: %v",
|
||||
req, strings.Join(ruleNames, ","), expOmitStages, actualOmitStages)
|
||||
}
|
||||
|
||||
func testAuditLevel(t *testing.T, stages []audit.Stage) {
|
||||
test(t, "namespaced", audit.LevelMetadata, stages, stages, "default")
|
||||
test(t, "namespaced", audit.LevelNone, stages, stages, "create")
|
||||
test(t, "namespaced", audit.LevelMetadata, stages, stages, "tims")
|
||||
test(t, "namespaced", audit.LevelMetadata, stages, stages, "humans")
|
||||
test(t, "namespaced", audit.LevelNone, stages, stages, "serviceAccounts")
|
||||
test(t, "namespaced", audit.LevelRequestResponse, stages, stages, "getPods")
|
||||
test(t, "namespaced", audit.LevelNone, stages, stages, "getClusterRoles")
|
||||
test(t, "namespaced", audit.LevelNone, stages, stages, "getLogs")
|
||||
test(t, "namespaced", audit.LevelNone, stages, stages, "getMetrics")
|
||||
test(t, "namespaced", audit.LevelMetadata, stages, stages, "getMetrics", "serviceAccounts", "default")
|
||||
test(t, "namespaced", audit.LevelRequestResponse, stages, stages, "getMetrics", "getPods", "default")
|
||||
test(t, "namespaced", audit.LevelRequestResponse, stages, stages, "getPodLogs", "getPods")
|
||||
|
||||
test(t, "cluster", audit.LevelMetadata, stages, stages, "default")
|
||||
test(t, "cluster", audit.LevelNone, stages, stages, "create")
|
||||
test(t, "cluster", audit.LevelMetadata, stages, stages, "tims")
|
||||
test(t, "cluster", audit.LevelMetadata, stages, stages, "humans")
|
||||
test(t, "cluster", audit.LevelNone, stages, stages, "serviceAccounts")
|
||||
test(t, "cluster", audit.LevelNone, stages, stages, "getPods")
|
||||
test(t, "cluster", audit.LevelRequestResponse, stages, stages, "getClusterRoles")
|
||||
test(t, "cluster", audit.LevelRequest, stages, stages, "clusterRoleEdit", "getClusterRoles")
|
||||
test(t, "cluster", audit.LevelNone, stages, stages, "getLogs")
|
||||
test(t, "cluster", audit.LevelNone, stages, stages, "getMetrics")
|
||||
test(t, "cluster", audit.LevelMetadata, stages, stages, "getMetrics", "serviceAccounts", "default")
|
||||
test(t, "cluster", audit.LevelRequestResponse, stages, stages, "getMetrics", "getClusterRoles", "default")
|
||||
test(t, "cluster", audit.LevelNone, stages, stages, "getPodLogs", "getPods")
|
||||
|
||||
test(t, "nonResource", audit.LevelMetadata, stages, stages, "default")
|
||||
test(t, "nonResource", audit.LevelNone, stages, stages, "create")
|
||||
test(t, "nonResource", audit.LevelMetadata, stages, stages, "tims")
|
||||
test(t, "nonResource", audit.LevelMetadata, stages, stages, "humans")
|
||||
test(t, "nonResource", audit.LevelNone, stages, stages, "serviceAccounts")
|
||||
test(t, "nonResource", audit.LevelNone, stages, stages, "getPods")
|
||||
test(t, "nonResource", audit.LevelNone, stages, stages, "getClusterRoles")
|
||||
test(t, "nonResource", audit.LevelRequestResponse, stages, stages, "getLogs")
|
||||
test(t, "nonResource", audit.LevelNone, stages, stages, "getMetrics")
|
||||
test(t, "nonResource", audit.LevelMetadata, stages, stages, "getMetrics", "serviceAccounts", "default")
|
||||
test(t, "nonResource", audit.LevelRequestResponse, stages, stages, "getLogs", "getClusterRoles", "default")
|
||||
test(t, "nonResource", audit.LevelNone, stages, stages, "getPodLogs", "getPods")
|
||||
|
||||
test(t, "subresource", audit.LevelRequest, stages, stages, "getPodLogs", "getPods")
|
||||
|
||||
}
|
||||
|
||||
func TestChecker(t *testing.T) {
|
||||
testAuditLevel(t, nil)
|
||||
|
||||
// test omitStages pre rule
|
||||
test(t, "namespaced", audit.LevelRequest, nil, []audit.Stage{audit.StageRequestReceived}, "omit RequestReceived", "getPods", "default")
|
||||
test(t, "namespaced", audit.LevelRequest, nil, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
test(t, "cluster", audit.LevelRequest, nil, []audit.Stage{audit.StageRequestReceived}, "omit RequestReceived", "getPods", "default")
|
||||
test(t, "cluster", audit.LevelRequest, nil, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
test(t, "nonResource", audit.LevelRequest, nil, []audit.Stage{audit.StageRequestReceived}, "omit RequestReceived", "getPods", "default")
|
||||
test(t, "nonResource", audit.LevelRequest, nil, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
}
|
||||
|
||||
func TestCheckerPolicyOmitStages(t *testing.T) {
|
||||
policyStages := []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted}
|
||||
testAuditLevel(t, policyStages)
|
||||
|
||||
// test omitStages policy wide
|
||||
test(t, "namespaced", audit.LevelRequest, policyStages, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted}, "omit RequestReceived", "getPods", "default")
|
||||
test(t, "namespaced", audit.LevelRequest, policyStages, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
test(t, "cluster", audit.LevelRequest, policyStages, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted}, "omit RequestReceived", "getPods", "default")
|
||||
test(t, "cluster", audit.LevelRequest, policyStages, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
test(t, "nonResource", audit.LevelMetadata, policyStages, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted}, "default", "omit RequestReceived", "getPods")
|
||||
test(t, "nonResource", audit.LevelRequest, policyStages, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
}
|
||||
|
||||
// stageEqual returns true if s1 and s2 are super set of each other
|
||||
func stageEqual(s1, s2 []audit.Stage) bool {
|
||||
m1 := make(map[audit.Stage]bool)
|
||||
m2 := make(map[audit.Stage]bool)
|
||||
for _, s := range s1 {
|
||||
m1[s] = true
|
||||
}
|
||||
for _, s := range s2 {
|
||||
m2[s] = true
|
||||
}
|
||||
if len(m1) != len(m2) {
|
||||
return false
|
||||
}
|
||||
for key, value := range m1 {
|
||||
if m2[key] != value {
|
||||
return false
|
||||
}
|
||||
require.Contains(t, attrs, req)
|
||||
actualLevel, actualOmitStages := NewChecker(&policy).LevelAndStages(attrs[req])
|
||||
assert.Equal(t, expLevel, actualLevel, "request:%s rules:%s", req, strings.Join(ruleNames, ","))
|
||||
assert.Equal(t, expOmitStages, actualOmitStages, "request:%s rules:%s", req, strings.Join(ruleNames, ","))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestUnionStages(t *testing.T) {
|
||||
var testCases = []struct {
|
||||
s1, s2, exp []audit.Stage
|
||||
}{
|
||||
{
|
||||
[]audit.Stage{},
|
||||
[]audit.Stage{},
|
||||
[]audit.Stage{},
|
||||
},
|
||||
{
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
[]audit.Stage{},
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
},
|
||||
{
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
},
|
||||
{
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
[]audit.Stage{audit.StageResponseStarted},
|
||||
[]audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted},
|
||||
},
|
||||
{
|
||||
[]audit.Stage{audit.StageRequestReceived, audit.StageRequestReceived},
|
||||
[]audit.Stage{audit.StageRequestReceived, audit.StageRequestReceived},
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
},
|
||||
{
|
||||
[]audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted},
|
||||
[]audit.Stage{audit.StagePanic, audit.StageRequestReceived},
|
||||
[]audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StagePanic},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
[]audit.Stage{audit.StageRequestReceived},
|
||||
},
|
||||
}
|
||||
|
||||
test("namespaced", audit.LevelMetadata, nil, "default")
|
||||
test("namespaced", audit.LevelNone, nil, "create")
|
||||
test("namespaced", audit.LevelMetadata, nil, "tims")
|
||||
test("namespaced", audit.LevelMetadata, nil, "humans")
|
||||
test("namespaced", audit.LevelNone, nil, "serviceAccounts")
|
||||
test("namespaced", audit.LevelRequestResponse, nil, "getPods")
|
||||
test("namespaced", audit.LevelNone, nil, "getClusterRoles")
|
||||
test("namespaced", audit.LevelNone, nil, "getLogs")
|
||||
test("namespaced", audit.LevelNone, nil, "getMetrics")
|
||||
test("namespaced", audit.LevelMetadata, nil, "getMetrics", "serviceAccounts", "default")
|
||||
test("namespaced", audit.LevelRequestResponse, nil, "getMetrics", "getPods", "default")
|
||||
test("namespaced", audit.LevelRequestResponse, nil, "getPodLogs", "getPods")
|
||||
test("namespaced", audit.LevelRequest, []audit.Stage{audit.StageRequestReceived}, "omit RequestReceived", "getPods", "default")
|
||||
test("namespaced", audit.LevelRequest, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
|
||||
test("cluster", audit.LevelMetadata, nil, "default")
|
||||
test("cluster", audit.LevelNone, nil, "create")
|
||||
test("cluster", audit.LevelMetadata, nil, "tims")
|
||||
test("cluster", audit.LevelMetadata, nil, "humans")
|
||||
test("cluster", audit.LevelNone, nil, "serviceAccounts")
|
||||
test("cluster", audit.LevelNone, nil, "getPods")
|
||||
test("cluster", audit.LevelRequestResponse, nil, "getClusterRoles")
|
||||
test("cluster", audit.LevelRequest, nil, "clusterRoleEdit", "getClusterRoles")
|
||||
test("cluster", audit.LevelNone, nil, "getLogs")
|
||||
test("cluster", audit.LevelNone, nil, "getMetrics")
|
||||
test("cluster", audit.LevelMetadata, nil, "getMetrics", "serviceAccounts", "default")
|
||||
test("cluster", audit.LevelRequestResponse, nil, "getMetrics", "getClusterRoles", "default")
|
||||
test("cluster", audit.LevelNone, nil, "getPodLogs", "getPods")
|
||||
test("cluster", audit.LevelRequest, []audit.Stage{audit.StageRequestReceived}, "omit RequestReceived", "getPods", "default")
|
||||
test("cluster", audit.LevelRequest, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
|
||||
test("nonResource", audit.LevelMetadata, nil, "default")
|
||||
test("nonResource", audit.LevelNone, nil, "create")
|
||||
test("nonResource", audit.LevelMetadata, nil, "tims")
|
||||
test("nonResource", audit.LevelMetadata, nil, "humans")
|
||||
test("nonResource", audit.LevelNone, nil, "serviceAccounts")
|
||||
test("nonResource", audit.LevelNone, nil, "getPods")
|
||||
test("nonResource", audit.LevelNone, nil, "getClusterRoles")
|
||||
test("nonResource", audit.LevelRequestResponse, nil, "getLogs")
|
||||
test("nonResource", audit.LevelNone, nil, "getMetrics")
|
||||
test("nonResource", audit.LevelMetadata, nil, "getMetrics", "serviceAccounts", "default")
|
||||
test("nonResource", audit.LevelRequestResponse, nil, "getLogs", "getClusterRoles", "default")
|
||||
test("nonResource", audit.LevelNone, nil, "getPodLogs", "getPods")
|
||||
test("nonResource", audit.LevelRequest, []audit.Stage{audit.StageRequestReceived}, "omit RequestReceived", "getPods", "default")
|
||||
test("nonResource", audit.LevelRequest, []audit.Stage{audit.StageRequestReceived, audit.StageResponseStarted, audit.StageResponseComplete}, "only audit panic", "getPods", "default")
|
||||
|
||||
test("subresource", audit.LevelRequest, nil, "getPodLogs", "getPods")
|
||||
test("subresource", audit.LevelRequest, nil, "getPods", "getPodLogs")
|
||||
for _, tc := range testCases {
|
||||
result := unionStages(tc.s1, tc.s2)
|
||||
assert.Len(t, result, len(tc.exp))
|
||||
for _, expStage := range tc.exp {
|
||||
ok := false
|
||||
for _, resultStage := range result {
|
||||
if resultStage == expStage {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -30,6 +30,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/initializer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/metrics:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/initialization:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/admission/plugin/webhook/mutating:go_default_library",
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission/initializer"
|
||||
admissionmetrics "k8s.io/apiserver/pkg/admission/metrics"
|
||||
"k8s.io/apiserver/pkg/admission/plugin/initialization"
|
||||
"k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle"
|
||||
mutatingwebhook "k8s.io/apiserver/pkg/admission/plugin/webhook/mutating"
|
||||
@@ -109,12 +110,12 @@ func (a *AdmissionOptions) ApplyTo(
|
||||
pluginInitializers = append(pluginInitializers, genericInitializer)
|
||||
initializersChain = append(initializersChain, pluginInitializers...)
|
||||
|
||||
admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain)
|
||||
admissionChain, err := a.Plugins.NewFromPlugins(pluginNames, pluginsConfigProvider, initializersChain, admissionmetrics.WithControllerMetrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.AdmissionControl = admissionChain
|
||||
c.AdmissionControl = admissionmetrics.WithStepMetrics(admissionChain)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -834,6 +834,10 @@
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/initializer",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/metrics",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/plugin/initialization",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
|
@@ -830,6 +830,10 @@
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/initializer",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/metrics",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/apiserver/pkg/admission/plugin/initialization",
|
||||
"Rev": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||
|
@@ -97,8 +97,9 @@ func setupProviderConfig() error {
|
||||
|
||||
cloudConfig.Provider = gceCloud
|
||||
|
||||
// Arbitrarily pick one of the zones we have nodes in
|
||||
if cloudConfig.Zone == "" && framework.TestContext.CloudConfig.MultiZone {
|
||||
zones, err := gceCloud.GetAllZones()
|
||||
zones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -14,7 +14,9 @@ go_library(
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
|
@@ -19,10 +19,13 @@ package multicluster
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@@ -61,8 +64,128 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
|
||||
It("should schedule pods in the same zones as statically provisioned PVs [sig-storage]", func() {
|
||||
PodsUseStaticPVsOrFail(f, (2*zoneCount)+1, image)
|
||||
})
|
||||
|
||||
It("should only be allowed to provision PDs in zones where nodes exist", func() {
|
||||
OnlyAllowNodeZones(f, zoneCount, image)
|
||||
})
|
||||
})
|
||||
|
||||
// OnlyAllowNodeZones tests that GetAllCurrentZones returns only zones with Nodes
|
||||
func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) {
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get all the zones that the nodes are in
|
||||
expectedZones, err := gceCloud.GetAllZonesFromCloudProvider()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Expected zones: %v\n", expectedZones)
|
||||
|
||||
// Get all the zones in this current region
|
||||
region := gceCloud.Region()
|
||||
allZonesInRegion, err := gceCloud.ListZonesInRegion(region)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var extraZone string
|
||||
for _, zone := range allZonesInRegion {
|
||||
if !expectedZones.Has(zone.Name) {
|
||||
extraZone = zone.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
Expect(extraZone).NotTo(Equal(""), fmt.Sprintf("No extra zones available in region %s", region))
|
||||
|
||||
By(fmt.Sprintf("starting a compute instance in unused zone: %v\n", extraZone))
|
||||
project := framework.TestContext.CloudConfig.ProjectID
|
||||
zone := extraZone
|
||||
myuuid := string(uuid.NewUUID())
|
||||
name := "compute-" + myuuid
|
||||
imageURL := "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20140606"
|
||||
|
||||
rb := &compute.Instance{
|
||||
MachineType: "zones/" + zone + "/machineTypes/f1-micro",
|
||||
Disks: []*compute.AttachedDisk{
|
||||
{
|
||||
AutoDelete: true,
|
||||
Boot: true,
|
||||
Type: "PERSISTENT",
|
||||
InitializeParams: &compute.AttachedDiskInitializeParams{
|
||||
DiskName: "my-root-pd-" + myuuid,
|
||||
SourceImage: imageURL,
|
||||
},
|
||||
},
|
||||
},
|
||||
NetworkInterfaces: []*compute.NetworkInterface{
|
||||
{
|
||||
AccessConfigs: []*compute.AccessConfig{
|
||||
{
|
||||
Type: "ONE_TO_ONE_NAT",
|
||||
Name: "External NAT",
|
||||
},
|
||||
},
|
||||
Network: "/global/networks/default",
|
||||
},
|
||||
},
|
||||
Name: name,
|
||||
}
|
||||
|
||||
err = gceCloud.InsertInstance(project, zone, rb)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
defer func() {
|
||||
// Teardown of the compute instance
|
||||
framework.Logf("Deleting compute resource: %v", name)
|
||||
resp, err := gceCloud.DeleteInstance(project, zone, name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Compute deletion response: %v\n", resp)
|
||||
}()
|
||||
|
||||
By("Creating zoneCount+1 PVCs and making sure PDs are only provisioned in zones with nodes")
|
||||
// Create some (zoneCount+1) PVCs with names of form "pvc-x" where x is 1...zoneCount+1
|
||||
// This will exploit ChooseZoneForVolume in pkg/volume/util.go to provision them in all the zones it "sees"
|
||||
var pvcList []*v1.PersistentVolumeClaim
|
||||
c := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
for index := 1; index <= zoneCount+1; index++ {
|
||||
pvc := newNamedDefaultClaim(ns, index)
|
||||
pvc, err = framework.CreatePVC(c, ns, pvc)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
pvcList = append(pvcList, pvc)
|
||||
|
||||
// Defer the cleanup
|
||||
defer func() {
|
||||
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||
err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
||||
if err != nil {
|
||||
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait for all claims bound
|
||||
for _, claim := range pvcList {
|
||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
pvZones := sets.NewString()
|
||||
By("Checking that PDs have been provisioned in only the expected zones")
|
||||
for _, claim := range pvcList {
|
||||
// Get a new copy of the claim to have all fields populated
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get the related PV
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
pvZone, ok := pv.ObjectMeta.Labels[kubeletapis.LabelZoneFailureDomain]
|
||||
Expect(ok).To(BeTrue(), "PV has no LabelZone to be found")
|
||||
pvZones.Insert(pvZone)
|
||||
}
|
||||
Expect(pvZones.Equal(expectedZones)).To(BeTrue(), fmt.Sprintf("PDs provisioned in unwanted zones. We want zones: %v, got: %v", expectedZones, pvZones))
|
||||
}
|
||||
|
||||
// Check that the pods comprising a service get spread evenly across available zones
|
||||
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
|
||||
// First create the service
|
||||
@@ -320,3 +443,24 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
func newNamedDefaultClaim(ns string, index int) *v1.PersistentVolumeClaim {
|
||||
claim := v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc-" + strconv.Itoa(index),
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return &claim
|
||||
}
|
||||
|
@@ -495,8 +495,8 @@ var _ = SIGDescribe("Dynamic Provisioning", func() {
|
||||
gceCloud, err := framework.GetGCECloud()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get all k8s managed zones
|
||||
managedZones, err = gceCloud.GetAllZones()
|
||||
// Get all k8s managed zones (same as zones with nodes in them for test)
|
||||
managedZones, err = gceCloud.GetAllZonesFromCloudProvider()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Get a list of all zones in the project
|
||||
|
@@ -119,42 +119,6 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
||||
})
|
||||
})
|
||||
|
||||
// LocalStorageAllocatableEviction tests that the node responds to node disk pressure by evicting only responsible pods.
|
||||
// Node disk pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
|
||||
var _ = framework.KubeDescribe("LocalStorageAllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
f := framework.NewDefaultFramework("localstorageallocatable-eviction-test")
|
||||
pressureTimeout := 10 * time.Minute
|
||||
expectedNodeCondition := v1.NodeDiskPressure
|
||||
Context(fmt.Sprintf(testContextFmt, expectedNodeCondition), func() {
|
||||
// Set up --kube-reserved for scratch storage
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
|
||||
diskConsumed := uint64(200000000) // At least 200 Mb for pods to consume
|
||||
summary := eventuallyGetSummary()
|
||||
availableBytes := *(summary.Node.Fs.AvailableBytes)
|
||||
initialConfig.KubeReserved = map[string]string{
|
||||
string(v1.ResourceEphemeralStorage): fmt.Sprintf("%d", availableBytes-diskConsumed),
|
||||
}
|
||||
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
||||
initialConfig.CgroupsPerQOS = true
|
||||
initialConfig.FeatureGates[string(features.LocalStorageCapacityIsolation)] = true
|
||||
// set evictionHard to be very small, so that only the allocatable eviction threshold triggers
|
||||
initialConfig.EvictionHard = map[string]string{"nodefs.available": "1"}
|
||||
initialConfig.EvictionMinimumReclaim = map[string]string{}
|
||||
framework.Logf("KubeReserved: %+v", initialConfig.KubeReserved)
|
||||
})
|
||||
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logDiskMetrics, []podEvictSpec{
|
||||
{
|
||||
evictionPriority: 1,
|
||||
pod: diskConsumingPod("container-disk-hog", 10000, nil, v1.ResourceRequirements{}),
|
||||
},
|
||||
{
|
||||
evictionPriority: 0,
|
||||
pod: innocentPod(),
|
||||
},
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||
// Disk pressure is induced by running pods which consume disk space.
|
||||
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
|
||||
|
Reference in New Issue
Block a user