diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go b/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go index 13766b73cda..3a014f49794 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go @@ -53,7 +53,8 @@ type AnnotateOptions struct { // Common user flags overwrite bool local bool - dryrun bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.DryRunVerifier all bool resourceVersion string selector string @@ -164,11 +165,21 @@ func (o *AnnotateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ } o.outputFormat = cmdutil.GetFlagString(cmd, "output") - o.dryrun = cmdutil.GetClientSideDryRun(cmd) - - if o.dryrun { - o.PrintFlags.Complete("%s (dry run)") + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -266,12 +277,18 @@ func (o AnnotateOptions) RunAnnotate() error { var outputObj runtime.Object obj := info.Object - if o.dryrun || o.local { + if o.dryRunStrategy == cmdutil.DryRunClient || o.local { if err := o.updateAnnotations(obj); err != nil { return err } outputObj = obj } else { + mapping := info.ResourceMapping() + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { + return err + } + } name, namespace := info.Name, info.Namespace if len(o.resourceVersion) != 0 { @@ -303,12 +320,13 @@ func (o AnnotateOptions) RunAnnotate() error { klog.V(2).Infof("couldn't compute patch: %v", err) } - mapping := info.ResourceMapping() client, err := o.unstructuredClientForMapping(mapping) if err != nil { return err } - helper := resource.NewHelper(client, mapping) + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer) if createdPatch { outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD index bcf72e692e1..6f9d4dfe7d8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/BUILD @@ -30,7 +30,6 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/resource:go_default_library", - "//staging/src/k8s.io/client-go/discovery:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/delete:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library", diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go index 0deb5a8e724..82d00b01345 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go @@ -32,7 +32,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/klog" "k8s.io/kubectl/pkg/cmd/delete" @@ -60,8 +59,8 @@ type ApplyOptions struct { ForceConflicts bool FieldManager string Selector string - DryRun bool - ServerDryRun bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier Prune bool PruneResources []pruneResource cmdBaseName string @@ -70,12 +69,11 @@ type ApplyOptions struct { OpenAPIPatch bool PruneWhitelist []string - Validator validation.Schema - Builder *resource.Builder - Mapper meta.RESTMapper - DynamicClient dynamic.Interface - DiscoveryClient discovery.DiscoveryInterface - OpenAPISchema openapi.Resources + Validator validation.Schema + Builder *resource.Builder + Mapper meta.RESTMapper + DynamicClient dynamic.Interface + OpenAPISchema openapi.Resources Namespace string EnforceNamespace bool @@ -192,7 +190,7 @@ func NewCmdApply(baseName string, f cmdutil.Factory, ioStreams genericclioptions cmd.Flags().BoolVar(&o.All, "all", o.All, "Select all resources in the namespace of the specified resource types.") cmd.Flags().StringArrayVar(&o.PruneWhitelist, "prune-whitelist", o.PruneWhitelist, "Overwrite the default whitelist with for --prune") cmd.Flags().BoolVar(&o.OpenAPIPatch, "openapi-patch", o.OpenAPIPatch, "If true, use openapi to calculate diff when the openapi presents and the resource can be found in the openapi spec. Otherwise, fall back to use baked-in types.") - cmd.Flags().BoolVar(&o.ServerDryRun, "server-dry-run", o.ServerDryRun, "If true, request will be sent to server with dry-run flag, which means the modifications won't be persisted.") + cmd.Flags().Bool("server-dry-run", false, "If true, request will be sent to server with dry-run flag, which means the modifications won't be persisted.") cmd.Flags().MarkDeprecated("server-dry-run", "--server-dry-run is deprecated and can be replaced with --dry-run=server.") cmdutil.AddDryRunFlag(cmd) cmdutil.AddServerSideApplyFlags(cmd) @@ -210,39 +208,42 @@ func (o *ApplyOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { var err error o.ServerSideApply = cmdutil.GetServerSideApplyFlag(cmd) o.ForceConflicts = cmdutil.GetForceConflictsFlag(cmd) - o.FieldManager = cmdutil.GetFieldManagerFlag(cmd) - o.DryRun = cmdutil.GetClientSideDryRun(cmd) + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } o.DynamicClient, err = f.DynamicClient() if err != nil { return err } - - o.DiscoveryClient, err = f.ToDiscoveryClient() + discoveryClient, err := f.ToDiscoveryClient() if err != nil { return err } + o.DryRunVerifier = resource.NewDryRunVerifier(o.DynamicClient, discoveryClient) + o.FieldManager = cmdutil.GetFieldManagerFlag(cmd) if o.ForceConflicts && !o.ServerSideApply { return fmt.Errorf("--force-conflicts only works with --server-side") } - if o.DryRun && o.ServerSideApply { - return fmt.Errorf("--dry-run doesn't work with --server-side (did you mean --server-dry-run instead?)") + if o.DryRunStrategy == cmdutil.DryRunClient && o.ServerSideApply { + return fmt.Errorf("--dry-run=client doesn't work with --server-side (did you mean --dry-run=server instead?)") } - if o.DryRun && o.ServerDryRun { - return fmt.Errorf("--dry-run and --server-dry-run can't be used together") + var deprecatedServerDryRunFlag = cmdutil.GetFlagBool(cmd, "server-dry-run") + if o.DryRunStrategy == cmdutil.DryRunClient && deprecatedServerDryRunFlag { + return fmt.Errorf("--dry-run=client and --server-dry-run can't be used together (did you mean --dry-run=server instead?)") + } + + if o.DryRunStrategy == cmdutil.DryRunNone && deprecatedServerDryRunFlag { + o.DryRunStrategy = cmdutil.DryRunServer } // allow for a success message operation to be specified at print time o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } - if o.ServerDryRun { - o.PrintFlags.Complete("%s (server dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) return o.PrintFlags.ToPrinter() } @@ -397,11 +398,11 @@ func (o *ApplyOptions) Run() error { } helper := resource.NewHelper(info.Client, info.Mapping) - if o.ServerDryRun { - if err := resource.VerifyDryRun(info.Mapping.GroupVersionKind, o.DynamicClient, o.DiscoveryClient); err != nil { + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { return err } - helper.DryRun(o.ServerDryRun) + helper.DryRun(true) } obj, err := helper.Patch( info.Namespace, @@ -471,14 +472,14 @@ See http://k8s.io/docs/reference/using-api/api-concepts/#conflicts`, err) return cmdutil.AddSourceToErr("creating", info.Source, err) } - if !o.DryRun { + if o.DryRunStrategy != cmdutil.DryRunClient { // Then create the resource and skip the three-way merge helper := resource.NewHelper(info.Client, info.Mapping) - if o.ServerDryRun { - if err := resource.VerifyDryRun(info.Mapping.GroupVersionKind, o.DynamicClient, o.DiscoveryClient); err != nil { + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { return cmdutil.AddSourceToErr("creating", info.Source, err) } - helper.DryRun(o.ServerDryRun) + helper.DryRun(true) } obj, err := helper.Create(info.Namespace, true, info.Object) if err != nil { @@ -509,7 +510,7 @@ See http://k8s.io/docs/reference/using-api/api-concepts/#conflicts`, err) return err } - if !o.DryRun { + if o.DryRunStrategy != cmdutil.DryRunClient { metadata, _ := meta.Accessor(info.Object) annotationMap := metadata.GetAnnotations() if _, ok := annotationMap[corev1.LastAppliedConfigAnnotation]; !ok { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go index 7c6eefa42fe..f761f80032e 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go @@ -49,7 +49,8 @@ type SetLastAppliedOptions struct { infoList []*resource.Info namespace string enforceNamespace bool - dryRun bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.DryRunVerifier shortOutput bool output string patchBufferList []PatchBuffer @@ -118,11 +119,23 @@ func NewCmdApplySetLastApplied(f cmdutil.Factory, ioStreams genericclioptions.IO // Complete populates dry-run and output flag options. func (o *SetLastAppliedOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { - o.dryRun = cmdutil.GetClientSideDryRun(cmd) + var err error + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) o.output = cmdutil.GetFlagString(cmd, "output") o.shortOutput = o.output == "name" - var err error o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() if err != nil { return err @@ -130,13 +143,7 @@ func (o *SetLastAppliedOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) o.builder = f.NewBuilder() o.unstructuredClientForMapping = f.UnstructuredClientForMapping - if o.dryRun { - // TODO(juanvallejo): This can be cleaned up even further by creating - // a PrintFlags struct that binds the --dry-run flag, and whose - // ToPrinter method returns a printer that understands how to print - // this success message. - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -199,13 +206,20 @@ func (o *SetLastAppliedOptions) RunSetLastApplied() error { info := o.infoList[i] finalObj := info.Object - if !o.dryRun { + if o.dryRunStrategy != cmdutil.DryRunClient { mapping := info.ResourceMapping() client, err := o.unstructuredClientForMapping(mapping) if err != nil { return err } - helper := resource.NewHelper(client, mapping) + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { + return err + } + } + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer) finalObj, err = helper.Patch(info.Namespace, info.Name, patch.PatchType, patch.Patch, nil) if err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go index c2d44d06256..9d00c982a98 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/patcher.go @@ -81,11 +81,11 @@ func newPatcher(o *ApplyOptions, info *resource.Info) (*Patcher, error) { } helper := resource.NewHelper(info.Client, info.Mapping) - if o.ServerDryRun { - if err := resource.VerifyDryRun(info.Mapping.GroupVersionKind, o.DynamicClient, o.DiscoveryClient); err != nil { + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { return nil, err } - helper.DryRun(o.ServerDryRun) + helper.DryRun(true) } return &Patcher{ Mapping: info.Mapping, @@ -97,7 +97,7 @@ func newPatcher(o *ApplyOptions, info *resource.Info) (*Patcher, error) { Cascade: o.DeleteOptions.Cascade, Timeout: o.DeleteOptions.Timeout, GracePeriod: o.DeleteOptions.GracePeriod, - ServerDryRun: o.ServerDryRun, + ServerDryRun: o.DryRunStrategy == cmdutil.DryRunServer, OpenapiSchema: openapiSchema, Retries: maxPatchRetry, }, nil diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/apply/prune.go b/staging/src/k8s.io/kubectl/pkg/cmd/apply/prune.go index eb118b5046d..86598818a54 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/apply/prune.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/apply/prune.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/printers" "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubectl/pkg/cmd/util" ) type pruner struct { @@ -39,10 +40,9 @@ type pruner struct { labelSelector string fieldSelector string - cascade bool - serverDryRun bool - dryRun bool - gracePeriod int + cascade bool + dryRunStrategy cmdutil.DryRunStrategy + gracePeriod int toPrinter func(string) (printers.ResourcePrinter, error) @@ -58,10 +58,9 @@ func newPruner(o *ApplyOptions) pruner { visitedUids: o.VisitedUids, visitedNamespaces: o.VisitedNamespaces, - cascade: o.DeleteOptions.Cascade, - dryRun: o.DryRun, - serverDryRun: o.ServerDryRun, - gracePeriod: o.DeleteOptions.GracePeriod, + cascade: o.DeleteOptions.Cascade, + dryRunStrategy: o.DryRunStrategy, + gracePeriod: o.DeleteOptions.GracePeriod, toPrinter: o.ToPrinter, @@ -126,7 +125,7 @@ func (p *pruner) prune(namespace string, mapping *meta.RESTMapping) error { continue } name := metadata.GetName() - if !p.dryRun { + if p.dryRunStrategy != cmdutil.DryRunClient { if err := p.delete(namespace, name, mapping); err != nil { return err } @@ -142,7 +141,7 @@ func (p *pruner) prune(namespace string, mapping *meta.RESTMapping) error { } func (p *pruner) delete(namespace, name string, mapping *meta.RESTMapping) error { - return runDelete(namespace, name, mapping, p.dynamicClient, p.cascade, p.gracePeriod, p.serverDryRun) + return runDelete(namespace, name, mapping, p.dynamicClient, p.cascade, p.gracePeriod, p.dryRunStrategy == cmdutil.DryRunServer) } func runDelete(namespace, name string, mapping *meta.RESTMapping, c dynamic.Interface, cascade bool, gracePeriod int, serverDryRun bool) error { diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go index c9ff127acdf..50d48c7387c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go @@ -75,7 +75,8 @@ type AutoscaleOptions struct { args []string enforceNamespace bool namespace string - dryRun bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.DryRunVerifier builder *resource.Builder generatorFunc func(string, *meta.RESTMapping) (generate.StructuredGenerator, error) @@ -136,13 +137,21 @@ func NewCmdAutoscale(f cmdutil.Factory, ioStreams genericclioptions.IOStreams) * // Complete verifies command line arguments and loads data from the command environment func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { var err error - o.dryRun = cmdutil.GetClientSideDryRun(cmd) - o.createAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) - o.builder = f.NewBuilder() + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } discoveryClient, err := f.ToDiscoveryClient() if err != nil { return err } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + o.createAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + o.builder = f.NewBuilder() o.scaleKindResolver = scale.NewDiscoveryScaleKindResolver(discoveryClient) o.args = args o.RecordFlags.Complete(cmd) @@ -183,9 +192,7 @@ func (o *AutoscaleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - if o.dryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) return o.PrintFlags.ToPrinter() } @@ -250,7 +257,7 @@ func (o *AutoscaleOptions) Run() error { klog.V(4).Infof("error recording current command: %v", err) } - if o.dryRun { + if o.dryRunStrategy == cmdutil.DryRunClient { count++ printer, err := o.ToPrinter("created") @@ -264,7 +271,14 @@ func (o *AutoscaleOptions) Run() error { return err } - actualHPA, err := o.HPAClient.HorizontalPodAutoscalers(o.namespace).Create(context.TODO(), hpa, metav1.CreateOptions{}) + createOptions := metav1.CreateOptions{} + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(hpa.GroupVersionKind()); err != nil { + return err + } + createOptions.DryRun = []string{metav1.DryRunAll} + } + actualHPA, err := o.HPAClient.HorizontalPodAutoscalers(o.namespace).Create(context.TODO(), hpa, createOptions) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/create/BUILD index 1795f2c888a..127d3e47b15 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/BUILD @@ -96,6 +96,7 @@ go_test( "//staging/src/k8s.io/client-go/rest:go_default_library", "//staging/src/k8s.io/client-go/rest/fake:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/testing:go_default_library", + "//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library", "//staging/src/k8s.io/kubectl/pkg/generate/versioned:go_default_library", "//staging/src/k8s.io/kubectl/pkg/scheme:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go index fea4f363aa1..2ed639cdaf6 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create.go @@ -50,7 +50,8 @@ type CreateOptions struct { PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags - DryRun bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier FilenameOptions resource.FilenameOptions Selector string @@ -191,11 +192,21 @@ func (o *CreateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } - o.DryRun = cmdutil.GetClientSideDryRun(cmd) - - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -260,10 +271,20 @@ func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { klog.V(4).Infof("error recording current command: %v", err) } - if !o.DryRun { - if err := createAndRefresh(info); err != nil { + if o.DryRunStrategy != cmdutil.DryRunClient { + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + } + obj, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + Create(info.Namespace, true, info.Object) + if err != nil { return cmdutil.AddSourceToErr("creating", info.Source, err) } + info.Refresh(obj, true) } count++ @@ -297,16 +318,6 @@ func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags return editOptions.Run() } -// createAndRefresh creates an object from input info and refreshes info with that object -func createAndRefresh(info *resource.Info) error { - obj, err := resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, true, info.Object) - if err != nil { - return err - } - info.Refresh(obj, true) - return nil -} - // NameFromCommandArgs is a utility function for commands that assume the first argument is a resource name func NameFromCommandArgs(cmd *cobra.Command, args []string) (string, error) { argsLen := cmd.ArgsLenAtDash() @@ -328,9 +339,9 @@ type CreateSubcommandOptions struct { Name string // StructuredGenerator is the resource generator for the object being created StructuredGenerator generate.StructuredGenerator - // DryRun is true if the command should be simulated but not run against the server - DryRun bool - CreateAnnotation bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier + CreateAnnotation bool Namespace string EnforceNamespace bool @@ -360,12 +371,22 @@ func (o *CreateSubcommandOptions) Complete(f cmdutil.Factory, cmd *cobra.Command o.Name = name o.StructuredGenerator = generator - o.DryRun = cmdutil.GetClientSideDryRun(cmd) + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -399,7 +420,7 @@ func (o *CreateSubcommandOptions) Run() error { if err != nil { return err } - if !o.DryRun { + if o.DryRunStrategy != cmdutil.DryRunClient { // create subcommands have compiled knowledge of things they create, so type them directly gvks, _, err := scheme.Scheme.ObjectKinds(obj) if err != nil { @@ -423,7 +444,14 @@ func (o *CreateSubcommandOptions) Run() error { if mapping.Scope.Name() == meta.RESTScopeNameRoot { o.Namespace = "" } - actualObject, err := o.DynamicClient.Resource(mapping.Resource).Namespace(o.Namespace).Create(asUnstructured, metav1.CreateOptions{}) + createOptions := metav1.CreateOptions{} + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { + return err + } + createOptions.DryRun = []string{metav1.DryRunAll} + } + actualObject, err := o.DynamicClient.Resource(mapping.Resource).Namespace(o.Namespace).Create(asUnstructured, createOptions) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go index 901af7e61b4..06111b85649 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go @@ -200,8 +200,15 @@ func (c *CreateClusterRoleOptions) RunCreateRole() error { } // Create ClusterRole. - if !c.DryRun { - clusterRole, err = c.Client.ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) + if c.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if c.DryRunStrategy == cmdutil.DryRunServer { + if err := c.DryRunVerifier.HasSupport(clusterRole.GroupVersionKind()); err != nil { + return err + } + createOptions.DryRun = []string{metav1.DryRunAll} + } + clusterRole, err = c.Client.ClusterRoles().Create(context.TODO(), clusterRole, createOptions) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go index e360a7367aa..f349f0e1e29 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go @@ -41,10 +41,10 @@ var ( cronjobExample = templates.Examples(` # Create a cronjob - kubectl create cronjob my-job --image=busybox + kubectl create cronjob my-job --image=busybox # Create a cronjob with command - kubectl create cronjob my-job --image=busybox -- date + kubectl create cronjob my-job --image=busybox -- date # Create a cronjob with schedule kubectl create cronjob test-job --image=busybox --schedule="*/1 * * * *"`) @@ -61,11 +61,12 @@ type CreateCronJobOptions struct { Command []string Restart string - Namespace string - Client batchv1beta1client.BatchV1beta1Interface - DryRun bool - Builder *resource.Builder - Cmd *cobra.Command + Namespace string + Client batchv1beta1client.BatchV1beta1Interface + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier + Builder *resource.Builder + Cmd *cobra.Command genericclioptions.IOStreams } @@ -134,10 +135,20 @@ func (o *CreateCronJobOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, a o.Builder = f.NewBuilder() o.Cmd = cmd - o.DryRun = cmdutil.GetClientSideDryRun(cmd) - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -163,9 +174,16 @@ func (o *CreateCronJobOptions) Run() error { var cronjob *batchv1beta1.CronJob cronjob = o.createCronJob() - if !o.DryRun { + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(cronjob.GroupVersionKind()); err != nil { + return err + } + createOptions.DryRun = []string{metav1.DryRunAll} + } var err error - cronjob, err = o.Client.CronJobs(o.Namespace).Create(context.TODO(), cronjob, metav1.CreateOptions{}) + cronjob, err = o.Client.CronJobs(o.Namespace).Create(context.TODO(), cronjob, createOptions) if err != nil { return fmt.Errorf("failed to create cronjob: %v", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment_test.go index d107c78e835..3dc1c7c07fc 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment_test.go @@ -28,6 +28,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/rest/fake" cmdtesting "k8s.io/kubectl/pkg/cmd/testing" + cmdutil "k8s.io/kubectl/pkg/cmd/util" generateversioned "k8s.io/kubectl/pkg/generate/versioned" "k8s.io/kubectl/pkg/scheme" ) @@ -139,9 +140,9 @@ func TestCreateDeploymentNoImage(t *testing.T) { cmd.Flags().Set("output", "name") options := &DeploymentOpts{ CreateSubcommandOptions: &CreateSubcommandOptions{ - PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), - DryRun: true, - IOStreams: ioStreams, + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + DryRunStrategy: cmdutil.DryRunClient, + IOStreams: ioStreams, }, } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go index 83e640e45fc..a295b6a0976 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go @@ -63,11 +63,12 @@ type CreateJobOptions struct { From string Command []string - Namespace string - Client batchv1client.BatchV1Interface - DryRun bool - Builder *resource.Builder - Cmd *cobra.Command + Namespace string + Client batchv1client.BatchV1Interface + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier + Builder *resource.Builder + Cmd *cobra.Command genericclioptions.IOStreams } @@ -133,10 +134,20 @@ func (o *CreateJobOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args o.Builder = f.NewBuilder() o.Cmd = cmd - o.DryRun = cmdutil.GetClientSideDryRun(cmd) - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -191,9 +202,16 @@ func (o *CreateJobOptions) Run() error { job = o.createJobFromCronJob(cronJob) } - if !o.DryRun { + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(job.GroupVersionKind()); err != nil { + return err + } + createOptions.DryRun = []string{metav1.DryRunAll} + } var err error - job, err = o.Client.Jobs(o.Namespace).Create(context.TODO(), job, metav1.CreateOptions{}) + job, err = o.Client.Jobs(o.Namespace).Create(context.TODO(), job, createOptions) if err != nil { return fmt.Errorf("failed to create job: %v", err) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go index cd07caf31e4..0771b106622 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" clientgorbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/scheme" @@ -126,12 +127,13 @@ type CreateRoleOptions struct { Resources []ResourceOptions ResourceNames []string - DryRun bool - OutputFormat string - Namespace string - Client clientgorbacv1.RbacV1Interface - Mapper meta.RESTMapper - PrintObj func(obj runtime.Object) error + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier + OutputFormat string + Namespace string + Client clientgorbacv1.RbacV1Interface + Mapper meta.RESTMapper + PrintObj func(obj runtime.Object) error genericclioptions.IOStreams } @@ -236,12 +238,22 @@ func (o *CreateRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args return err } - o.DryRun = cmdutil.GetClientSideDryRun(cmd) + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) o.OutputFormat = cmdutil.GetFlagString(cmd, "output") - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -341,8 +353,15 @@ func (o *CreateRoleOptions) RunCreateRole() error { role.Rules = rules // Create role. - if !o.DryRun { - role, err = o.Client.Roles(o.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}) + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(role.GroupVersionKind()); err != nil { + return err + } + createOptions.DryRun = []string{metav1.DryRunAll} + } + role, err = o.Client.Roles(o.Namespace).Create(context.TODO(), role, createOptions) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go index 5712fff5a93..04a7062e238 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go @@ -211,7 +211,19 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ return cmdutil.UsageErrorf(cmd, "error: cannot specify both a node name and a --selector option") } - o.drainer.DryRun = cmdutil.GetClientSideDryRun(cmd) + o.drainer.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.drainer.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) if o.drainer.Client, err = f.KubernetesClientSet(); err != nil { return err @@ -232,9 +244,7 @@ func (o *DrainCmdOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ o.ToPrinter = func(operation string) (printers.ResourcePrinterFunc, error) { o.PrintFlags.NamePrintFlags.Operation = operation - if o.drainer.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.drainer.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { @@ -325,7 +335,7 @@ func (o *DrainCmdOptions) deleteOrEvictPodsSimple(nodeInfo *resource.Info) error if warnings := list.Warnings(); warnings != "" { fmt.Fprintf(o.ErrOut, "WARNING: %s\n", warnings) } - if o.drainer.DryRun { + if o.drainer.DryRunStrategy == cmdutil.DryRunClient { for _, pod := range list.Pods() { fmt.Fprintf(o.Out, "evicting pod %s/%s (dry run)\n", pod.Namespace, pod.Name) } @@ -381,8 +391,14 @@ func (o *DrainCmdOptions) RunCordonOrUncordon(desired bool) error { } printObj(nodeInfo.Object, o.Out) } else { - if !o.drainer.DryRun { - err, patchErr := c.PatchOrReplace(o.drainer.Client) + if o.drainer.DryRunStrategy != cmdutil.DryRunClient { + if o.drainer.DryRunStrategy == cmdutil.DryRunServer { + if err := o.drainer.DryRunVerifier.HasSupport(gvk); err != nil { + printError(err) + continue + } + } + err, patchErr := c.PatchOrReplace(o.drainer.Client, o.drainer.DryRunStrategy == cmdutil.DryRunServer) if patchErr != nil { printError(patchErr) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/expose/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/expose/BUILD index 43906da8567..d02875b038c 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/expose/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/cmd/expose/BUILD @@ -8,7 +8,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -16,7 +15,6 @@ go_library( "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/printers:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/resource:go_default_library", - "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library", "//staging/src/k8s.io/kubectl/pkg/generate:go_default_library", "//staging/src/k8s.io/kubectl/pkg/generate/versioned:go_default_library", diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go index 942d8b0f8d9..a5bb4787448 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go @@ -24,7 +24,6 @@ import ( "k8s.io/klog" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" "k8s.io/apimachinery/pkg/runtime" @@ -32,7 +31,6 @@ import ( "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/printers" "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/dynamic" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/generate" generateversioned "k8s.io/kubectl/pkg/generate/versioned" @@ -89,7 +87,8 @@ type ExposeServiceOptions struct { PrintFlags *genericclioptions.PrintFlags PrintObj printers.ResourcePrinterFunc - DryRun bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier EnforceNamespace bool Generators func(string) map[string]generate.Generator @@ -101,8 +100,8 @@ type ExposeServiceOptions struct { Namespace string Mapper meta.RESTMapper - DynamicClient dynamic.Interface - Builder *resource.Builder + Builder *resource.Builder + ClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) Recorder genericclioptions.Recorder genericclioptions.IOStreams @@ -167,11 +166,22 @@ func NewCmdExposeService(f cmdutil.Factory, streams genericclioptions.IOStreams) } func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { - o.DryRun = cmdutil.GetClientSideDryRun(cmd) - - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") + var err error + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -184,13 +194,9 @@ func (o *ExposeServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) e return err } - o.DynamicClient, err = f.DynamicClient() - if err != nil { - return err - } - o.Generators = generateversioned.GeneratorFn o.Builder = f.NewBuilder() + o.ClientForMapping = f.ClientForMapping o.CanBeExposed = polymorphichelpers.CanBeExposedFn o.MapBasedSelectorForObject = polymorphichelpers.MapBasedSelectorForObjectFn o.ProtocolsForObject = polymorphichelpers.ProtocolsForObjectFn @@ -325,7 +331,7 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro klog.V(4).Infof("error recording current command: %v", err) } - if o.DryRun { + if o.DryRunStrategy == cmdutil.DryRunClient { return o.PrintObj(object, o.Out) } if err := util.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), object, scheme.DefaultJSONEncoder()); err != nil { @@ -344,8 +350,20 @@ func (o *ExposeServiceOptions) RunExpose(cmd *cobra.Command, args []string) erro if err != nil { return err } + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(objMapping.GroupVersionKind); err != nil { + return err + } + } // Serialize the object with the annotation applied. - actualObject, err := o.DynamicClient.Resource(objMapping.Resource).Namespace(o.Namespace).Create(asUnstructured, metav1.CreateOptions{}) + client, err := o.ClientForMapping(objMapping) + if err != nil { + return err + } + actualObject, err := resource. + NewHelper(client, objMapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + Create(o.Namespace, false, asUnstructured) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go index e75260b57b2..46a5b02bcb7 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go @@ -153,7 +153,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "type": "LoadBalancer", "dry-run": "true"}, + flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "type": "LoadBalancer", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -184,7 +184,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "type": "LoadBalancer", "session-affinity": "ClientIP", "dry-run": "true"}, + flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "type": "LoadBalancer", "session-affinity": "ClientIP", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -216,7 +216,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "10.10.10.10", "dry-run": "true"}, + flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "10.10.10.10", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -248,7 +248,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "None", "dry-run": "true"}, + flags: map[string]string{"selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "None", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -280,7 +280,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"selector": "func=stream", "name": "foo", "labels": "svc=test", "cluster-ip": "None", "dry-run": "true"}, + flags: map[string]string{"selector": "func=stream", "name": "foo", "labels": "svc=test", "cluster-ip": "None", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -306,7 +306,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"filename": "../../../testdata/redis-master-service.yaml", "selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "dry-run": "true"}, + flags: map[string]string{"filename": "../../../testdata/redis-master-service.yaml", "selector": "func=stream", "protocol": "UDP", "port": "14", "name": "foo", "labels": "svc=test", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -335,7 +335,7 @@ func TestRunExposeService(t *testing.T) { }, flags: map[string]string{"selector": "svc=frompod", "port": "90", "labels": "svc=frompod", "generator": "service/v2"}, output: &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{Name: "a-name-that-is-toooo-big-for-a-service-because-it-can-only-handle-63-characters", Namespace: "", Labels: map[string]string{"svc": "frompod"}}, + ObjectMeta: metav1.ObjectMeta{Name: "a-name-that-is-toooo-big-for-a-service-because-it-can-only-handle-63-characters"[:63], Namespace: "", Labels: map[string]string{"svc": "frompod"}}, Spec: corev1.ServiceSpec{ Ports: []corev1.ServicePort{ { @@ -375,7 +375,7 @@ func TestRunExposeService(t *testing.T) { }, }, }, - flags: map[string]string{"selector": "svc=fromfoo", "generator": "service/v2", "name": "fromfoo", "dry-run": "true"}, + flags: map[string]string{"selector": "svc=fromfoo", "generator": "service/v2", "name": "fromfoo", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "fromfoo", Namespace: "", Labels: map[string]string{"svc": "multiport"}}, Spec: corev1.ServiceSpec{ @@ -433,7 +433,7 @@ func TestRunExposeService(t *testing.T) { }, }, }, - flags: map[string]string{"selector": "svc=fromfoo", "generator": "service/v2", "name": "fromfoo", "dry-run": "true"}, + flags: map[string]string{"selector": "svc=fromfoo", "generator": "service/v2", "name": "fromfoo", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "fromfoo", Namespace: "", Labels: map[string]string{"svc": "multiport"}}, Spec: corev1.ServiceSpec{ @@ -544,7 +544,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"selector": "func=stream", "protocol": "SCTP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "10.10.10.10", "dry-run": "true"}, + flags: map[string]string{"selector": "func=stream", "protocol": "SCTP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "10.10.10.10", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -576,7 +576,7 @@ func TestRunExposeService(t *testing.T) { Selector: map[string]string{"app": "go"}, }, }, - flags: map[string]string{"selector": "func=stream", "protocol": "SCTP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "None", "dry-run": "true"}, + flags: map[string]string{"selector": "func=stream", "protocol": "SCTP", "port": "14", "name": "foo", "labels": "svc=test", "cluster-ip": "None", "dry-run": "client"}, output: &corev1.Service{ ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "", Labels: map[string]string{"svc": "test"}}, Spec: corev1.ServiceSpec{ @@ -611,6 +611,8 @@ func TestRunExposeService(t *testing.T) { switch p, m := req.URL.Path, req.Method; { case p == test.calls[m] && m == "GET": return &http.Response{StatusCode: test.status, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, test.input)}, nil + case p == test.calls[m] && m == "POST": + return &http.Response{StatusCode: test.status, Header: cmdtesting.DefaultHeader(), Body: cmdtesting.ObjBody(codec, test.output)}, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go b/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go index 883a5fb1f28..5af6b5245c8 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/label/label.go @@ -56,7 +56,7 @@ type LabelOptions struct { overwrite bool list bool local bool - dryrun bool + dryRunStrategy cmdutil.DryRunStrategy all bool resourceVersion string selector string @@ -74,6 +74,7 @@ type LabelOptions struct { enforceNamespace bool builder *resource.Builder unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + dryRunVerifier *resource.DryRunVerifier // Common shared fields genericclioptions.IOStreams @@ -164,14 +165,23 @@ func (o *LabelOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } o.outputFormat = cmdutil.GetFlagString(cmd, "output") - o.dryrun = cmdutil.GetClientSideDryRun(cmd) + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - if o.dryrun { - o.PrintFlags.Complete("%s (dry run)") - } - return o.PrintFlags.ToPrinter() } @@ -267,7 +277,7 @@ func (o *LabelOptions) RunLabel() error { if err != nil { return err } - if o.dryrun || o.local || o.list { + if o.dryRunStrategy == cmdutil.DryRunClient || o.local || o.list { err = labelFunc(obj, o.overwrite, o.resourceVersion, o.newLabels, o.removeLabels) if err != nil { return err @@ -311,11 +321,17 @@ func (o *LabelOptions) RunLabel() error { } mapping := info.ResourceMapping() + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { + return err + } + } client, err := o.unstructuredClientForMapping(mapping) if err != nil { return err } - helper := resource.NewHelper(client, mapping) + helper := resource.NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer) if createdPatch { outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go b/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go index 8616a85d3c7..9b5fd9f5c7b 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go @@ -60,7 +60,8 @@ type PatchOptions struct { namespace string enforceNamespace bool - dryRun bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.DryRunVerifier outputFormat string args []string builder *resource.Builder @@ -139,13 +140,14 @@ func (o *PatchOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st } o.outputFormat = cmdutil.GetFlagString(cmd, "output") - o.dryRun = cmdutil.GetClientSideDryRun(cmd) + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - if o.dryRun { - o.PrintFlags.Complete("%s (dry run)") - } return o.PrintFlags.ToPrinter() } @@ -157,6 +159,15 @@ func (o *PatchOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []st o.args = args o.builder = f.NewBuilder() o.unstructuredClientForMapping = f.UnstructuredClientForMapping + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) return nil } @@ -210,14 +221,21 @@ func (o *PatchOptions) RunPatch() error { count++ name, namespace := info.Name, info.Namespace - if !o.Local && !o.dryRun { + if !o.Local && o.dryRunStrategy != cmdutil.DryRunClient { mapping := info.ResourceMapping() + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { + return err + } + } client, err := o.unstructuredClientForMapping(mapping) if err != nil { return err } - helper := resource.NewHelper(client, mapping) + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer) patchedObj, err := helper.Patch(namespace, name, patchType, patchBytes, nil) if err != nil { return err diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go index 899240fe255..eae7a1ef037 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollingupdate/rollingupdate.go @@ -196,7 +196,7 @@ func (o *RollingUpdateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, a if len(args) > 0 { o.OldName = args[0] } - o.DryRun = cmdutil.GetClientSideDryRun(cmd) + o.DryRun = getClientSideDryRun(cmd) o.OutputFormat = cmdutil.GetFlagString(cmd, "output") o.KeepOldName = len(args) == 1 o.ShouldValidate = cmdutil.GetFlagBool(cmd, "validate") @@ -467,3 +467,14 @@ func findNewName(args []string, oldRc *corev1.ReplicationController) string { } return "" } + +func getClientSideDryRun(cmd *cobra.Command) bool { + dryRunStrategy, err := cmdutil.GetDryRunStrategy(cmd) + if err != nil { + klog.Fatalf("error accessing --dry-run flag for command %s: %v", cmd.Name(), err) + } + if dryRunStrategy == cmdutil.DryRunServer { + klog.Fatalf("--dry-run=server for command %s is not supported yet", cmd.Name()) + } + return dryRunStrategy == cmdutil.DryRunClient +} diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go index 802d6e161c4..6b42a69a052 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go @@ -39,7 +39,8 @@ type UndoOptions struct { Builder func() *resource.Builder ToRevision int64 - DryRun bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier Resources []string Namespace string EnforceNamespace bool @@ -104,18 +105,28 @@ func NewCmdRolloutUndo(f cmdutil.Factory, streams genericclioptions.IOStreams) * // Complete completes al the required options func (o *UndoOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Resources = args - o.DryRun = cmdutil.GetClientSideDryRun(cmd) - var err error + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + if o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace(); err != nil { return err } o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { o.PrintFlags.NamePrintFlags.Operation = operation - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) return o.PrintFlags.ToPrinter() } @@ -156,7 +167,12 @@ func (o *UndoOptions) RunUndo() error { return err } - result, err := rollbacker.Rollback(info.Object, nil, o.ToRevision, o.DryRun) + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err + } + } + result, err := rollbacker.Rollback(info.Object, nil, o.ToRevision, o.DryRunStrategy) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/run/BUILD b/staging/src/k8s.io/kubectl/pkg/cmd/run/BUILD index 6a70f3ca3ad..0e6f5f5b4b9 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/run/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/cmd/run/BUILD @@ -18,7 +18,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/genericclioptions:go_default_library", "//staging/src/k8s.io/cli-runtime/pkg/resource:go_default_library", - "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//staging/src/k8s.io/client-go/tools/cache:go_default_library", diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go b/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go index 721b21aa0a9..b7a49a4c4db 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/run/run.go @@ -36,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/cli-runtime/pkg/resource" - "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" @@ -74,7 +73,7 @@ var ( kubectl run hazelcast --image=hazelcast/hazelcast --labels="app=hazelcast,env=prod" # Dry run. Print the corresponding API objects without creating them. - kubectl run nginx --image=nginx --dry-run + kubectl run nginx --image=nginx --dry-run=client # Start a nginx pod, but overload the spec with a partial set of values parsed from JSON. kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1", "spec": { ... } }' @@ -107,13 +106,12 @@ type RunOptions struct { DeleteFlags *delete.DeleteFlags DeleteOptions *delete.DeleteOptions - DryRun bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier PrintObj func(runtime.Object) error Recorder genericclioptions.Recorder - DynamicClient dynamic.Interface - ArgsLenAtDash int Attach bool Expose bool @@ -147,7 +145,7 @@ func NewCmdRun(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Co o := NewRunOptions(streams) cmd := &cobra.Command{ - Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--dry-run=bool] [--overrides=inline-json] [--command] -- [COMMAND] [args...]", + Use: "run NAME --image=image [--env=\"key=value\"] [--port=port] [--dry-run=server|client] [--overrides=inline-json] [--command] -- [COMMAND] [args...]", DisableFlagsInUseLine: true, Short: i18n.T("Run a particular image on the cluster"), Long: runLong, @@ -211,22 +209,27 @@ func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return err } - o.DynamicClient, err = f.DynamicClient() + o.ArgsLenAtDash = cmd.ArgsLenAtDash() + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) if err != nil { return err } - - o.ArgsLenAtDash = cmd.ArgsLenAtDash() - o.DryRun = cmdutil.GetClientSideDryRun(cmd) + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) attachFlag := cmd.Flags().Lookup("attach") if !attachFlag.Changed && o.Interactive { o.Attach = true } - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -235,7 +238,7 @@ func (o *RunOptions) Complete(f cmdutil.Factory, cmd *cobra.Command) error { return printer.PrintObj(obj, o.Out) } - deleteOpts := o.DeleteFlags.ToOptions(o.DynamicClient, o.IOStreams) + deleteOpts := o.DeleteFlags.ToOptions(dynamicClient, o.IOStreams) deleteOpts.IgnoreNotFound = true deleteOpts.WaitForDeletion = false deleteOpts.GracePeriod = -1 @@ -295,7 +298,7 @@ func (o *RunOptions) Run(f cmdutil.Factory, cmd *cobra.Command, args []string) e return cmdutil.UsageErrorf(cmd, "--rm should only be used for attached containers") } - if o.Attach && o.DryRun { + if o.Attach && o.DryRunStrategy != cmdutil.DryRunNone { return cmdutil.UsageErrorf(cmd, "--dry-run can't be used with attached containers options (--attach, --stdin, or --tty)") } @@ -657,7 +660,7 @@ func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command } actualObj := obj - if !o.DryRun { + if o.DryRunStrategy != cmdutil.DryRunClient { if err := util.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), obj, scheme.DefaultJSONEncoder()); err != nil { return nil, err } @@ -665,7 +668,15 @@ func (o *RunOptions) createGeneratedObject(f cmdutil.Factory, cmd *cobra.Command if err != nil { return nil, err } - actualObj, err = resource.NewHelper(client, mapping).Create(namespace, false, obj) + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(mapping.GroupVersionKind); err != nil { + return nil, err + } + } + actualObj, err = resource. + NewHelper(client, mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + Create(namespace, false, obj) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go index 042c1012879..63463d438bd 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go @@ -390,7 +390,7 @@ func TestGenerateService(t *testing.T) { addRunFlags(cmd, opts) if !test.expectPOST { - opts.DryRun = true + opts.DryRunStrategy = cmdutil.DryRunClient } if len(test.port) > 0 { @@ -464,7 +464,7 @@ func TestRunValidations(t *testing.T) { flags: map[string]string{ "image": "busybox", "attach": "true", - "dry-run": "true", + "dry-run": "client", }, expectedErr: "can't be used with attached containers options", }, @@ -474,7 +474,7 @@ func TestRunValidations(t *testing.T) { flags: map[string]string{ "image": "busybox", "stdin": "true", - "dry-run": "true", + "dry-run": "client", }, expectedErr: "can't be used with attached containers options", }, @@ -485,7 +485,7 @@ func TestRunValidations(t *testing.T) { "image": "busybox", "tty": "true", "stdin": "true", - "dry-run": "true", + "dry-run": "client", }, expectedErr: "can't be used with attached containers options", }, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go index c51b2a429f0..1e5af2c63d3 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go @@ -119,7 +119,8 @@ type EnvOptions struct { envArgs []string resources []string output string - dryRun bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.DryRunVerifier builder func() *resource.Builder updatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc namespace string @@ -216,15 +217,22 @@ func (o *EnvOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []stri o.updatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn o.output = cmdutil.GetFlagString(cmd, "output") - o.dryRun = cmdutil.GetClientSideDryRun(cmd) - - if o.dryRun { - // TODO(juanvallejo): This can be cleaned up even further by creating - // a PrintFlags struct that binds the --dry-run flag, and whose - // ToPrinter method returns a printer that understands how to print - // this success message. - o.PrintFlags.Complete("%s (dry run)") + var err error + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -484,14 +492,24 @@ func (o *EnvOptions) RunEnv() error { continue } - if o.Local || o.dryRun { + if o.Local || o.dryRunStrategy == cmdutil.DryRunClient { if err := o.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } continue } - actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + allErrs = append(allErrs, err) + continue + } + } + + actual, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch env update to pod template: %v", err)) continue diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go index 335ded07067..634aebf48d2 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go @@ -22,7 +22,7 @@ import ( "github.com/spf13/cobra" "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -44,13 +44,14 @@ type SetImageOptions struct { PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags - Infos []*resource.Info - Selector string - DryRun bool - All bool - Output string - Local bool - ResolveImage ImageResolver + Infos []*resource.Info + Selector string + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier + All bool + Output string + Local bool + ResolveImage ImageResolver PrintObj printers.ResourcePrinterFunc Recorder genericclioptions.Recorder @@ -138,13 +139,23 @@ func (o *SetImageOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args [ } o.UpdatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn - o.DryRun = cmdutil.GetClientSideDryRun(cmd) + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) o.Output = cmdutil.GetFlagString(cmd, "output") o.ResolveImage = resolveImageFunc - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -256,15 +267,23 @@ func (o *SetImageOptions) Run() error { continue } - if o.Local || o.DryRun { + if o.Local || o.DryRunStrategy == cmdutil.DryRunClient { if err := o.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } continue } + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err + } + } // patch the change - actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + actual, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch image update to pod template: %v", err)) continue diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go index 26d175264fd..c3ad8524576 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go @@ -22,7 +22,7 @@ import ( "github.com/spf13/cobra" "k8s.io/klog" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -74,7 +74,7 @@ type SetResourcesOptions struct { All bool Local bool - DryRun bool + DryRunStrategy cmdutil.DryRunStrategy PrintObj printers.ResourcePrinterFunc Recorder genericclioptions.Recorder @@ -85,6 +85,7 @@ type SetResourcesOptions struct { UpdatePodSpecForObject polymorphichelpers.UpdatePodSpecForObjectFunc Resources []string + DryRunVerifier *resource.DryRunVerifier genericclioptions.IOStreams } @@ -150,11 +151,21 @@ func (o *SetResourcesOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, ar o.UpdatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn o.Output = cmdutil.GetFlagString(cmd, "output") - o.DryRun = cmdutil.GetClientSideDryRun(cmd) - - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -270,16 +281,26 @@ func (o *SetResourcesOptions) Run() error { continue } - if o.Local || o.DryRun { + if o.Local || o.DryRunStrategy == cmdutil.DryRunClient { if err := o.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } continue } - actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + allErrs = append(allErrs, fmt.Errorf("failed to patch resources update to pod template %v", err)) + continue + } + } + + actual, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { - allErrs = append(allErrs, fmt.Errorf("failed to patch limit update to pod template %v", err)) + allErrs = append(allErrs, fmt.Errorf("failed to patch resources update to pod template %v", err)) continue } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go index 93fb17aa158..94617421080 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go @@ -44,7 +44,8 @@ type SetSelectorOptions struct { ResourceBuilderFlags *genericclioptions.ResourceBuilderFlags PrintFlags *genericclioptions.PrintFlags RecordFlags *genericclioptions.RecordFlags - dryrun bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.DryRunVerifier // set by args resources []string @@ -129,7 +130,19 @@ func (o *SetSelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg return err } - o.dryrun = cmdutil.GetClientSideDryRun(cmd) + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) o.resources, o.selector, err = getResourcesAndSelector(args) if err != nil { @@ -137,11 +150,9 @@ func (o *SetSelectorOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg } o.ResourceFinder = o.ResourceBuilderFlags.ToBuilder(f, o.resources) - o.WriteToServer = !(*o.ResourceBuilderFlags.Local || o.dryrun) + o.WriteToServer = !(*o.ResourceBuilderFlags.Local || o.dryRunStrategy == cmdutil.DryRunClient) - if o.dryrun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -204,8 +215,16 @@ func (o *SetSelectorOptions) RunSelector() error { if !o.WriteToServer { return o.PrintObj(info.Object, o.Out) } + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + return err + } + } - actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + actual, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { return err } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go index be31b354e7f..d5600e239e0 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go @@ -21,9 +21,9 @@ import ( "fmt" "github.com/spf13/cobra" + v1 "k8s.io/api/core/v1" "k8s.io/klog" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" @@ -52,7 +52,7 @@ var ( kubectl set serviceaccount deployment nginx-deployment serviceaccount1 # Print the result (in yaml format) of updated nginx deployment with serviceaccount from local file, without hitting apiserver - kubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run -o yaml + kubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-run=client -o yaml `)) ) @@ -62,7 +62,8 @@ type SetServiceAccountOptions struct { RecordFlags *genericclioptions.RecordFlags fileNameOptions resource.FilenameOptions - dryRun bool + dryRunStrategy cmdutil.DryRunStrategy + dryRunVerifier *resource.DryRunVerifier shortOutput bool all bool output string @@ -128,13 +129,23 @@ func (o *SetServiceAccountOptions) Complete(f cmdutil.Factory, cmd *cobra.Comman } o.shortOutput = cmdutil.GetFlagString(cmd, "output") == "name" - o.dryRun = cmdutil.GetClientSideDryRun(cmd) + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.dryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) o.output = cmdutil.GetFlagString(cmd, "output") o.updatePodSpecForObject = polymorphichelpers.UpdatePodSpecForObjectFn - if o.dryRun { - o.PrintFlags.Complete("%s (dry run)") - } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -195,13 +206,22 @@ func (o *SetServiceAccountOptions) Run() error { patchErrs = append(patchErrs, fmt.Errorf("error: %s %v\n", name, patch.Err)) continue } - if o.local || o.dryRun { + if o.local || o.dryRunStrategy == cmdutil.DryRunClient { if err := o.PrintObj(info.Object, o.Out); err != nil { patchErrs = append(patchErrs, err) } continue } - actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + if o.dryRunStrategy == cmdutil.DryRunServer { + if err := o.dryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + patchErrs = append(patchErrs, err) + continue + } + } + actual, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { patchErrs = append(patchErrs, fmt.Errorf("failed to patch ServiceAccountName %v", err)) continue diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go index 30b7486ece0..8499c9743a2 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go @@ -65,7 +65,8 @@ type SubjectOptions struct { ContainerSelector string Output string All bool - DryRun bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier Local bool Users []string @@ -120,11 +121,22 @@ func NewCmdSubject(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobr // Complete completes all required options func (o *SubjectOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { o.Output = cmdutil.GetFlagString(cmd, "output") - o.DryRun = cmdutil.GetClientSideDryRun(cmd) - - if o.DryRun { - o.PrintFlags.Complete("%s (dry run)") + var err error + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err } + dynamicClient, err := f.DynamicClient() + if err != nil { + return err + } + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return err + } + o.DryRunVerifier = resource.NewDryRunVerifier(dynamicClient, discoveryClient) + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) printer, err := o.PrintFlags.ToPrinter() if err != nil { return err @@ -250,14 +262,23 @@ func (o *SubjectOptions) Run(fn updateSubjects) error { continue } - if o.Local || o.DryRun { + if o.Local || o.DryRunStrategy == cmdutil.DryRunClient { if err := o.PrintObj(info.Object, o.Out); err != nil { allErrs = append(allErrs, err) } continue } - actual, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) + if o.DryRunStrategy == cmdutil.DryRunServer { + if err := o.DryRunVerifier.HasSupport(info.Mapping.GroupVersionKind); err != nil { + allErrs = append(allErrs, err) + continue + } + } + actual, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + Patch(info.Namespace, info.Name, types.StrategicMergePatchType, patch.Patch, nil) if err != nil { allErrs = append(allErrs, fmt.Errorf("failed to patch subjects to rolebinding: %v", err)) continue diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go index a3e338430fe..88ee7f39d86 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -524,19 +524,6 @@ const ( DryRunServer ) -// TODO(julianvmodesto): remove GetClientSideDryRun once we support -// server-side dry-run in all commands -func GetClientSideDryRun(cmd *cobra.Command) bool { - dryRunStrategy, err := GetDryRunStrategy(cmd) - if err != nil { - klog.Fatalf("error accessing --dry-run flag for command %s: %v", cmd.Name(), err) - } - if dryRunStrategy == DryRunServer { - klog.Fatalf("--dry-run=server for command %s is not supported yet", cmd.Name()) - } - return dryRunStrategy == DryRunClient -} - func GetDryRunStrategy(cmd *cobra.Command) (DryRunStrategy, error) { var dryRunFlag = GetFlagString(cmd, "dry-run") b, err := strconv.ParseBool(dryRunFlag) diff --git a/staging/src/k8s.io/kubectl/pkg/drain/BUILD b/staging/src/k8s.io/kubectl/pkg/drain/BUILD index b0eb04a905e..d55cd837cf3 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/drain/BUILD @@ -27,7 +27,9 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/cli-runtime/pkg/resource:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library", ], ) diff --git a/staging/src/k8s.io/kubectl/pkg/drain/cordon.go b/staging/src/k8s.io/kubectl/pkg/drain/cordon.go index f7bb7d4d425..cfe2b8b94e1 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/cordon.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/cordon.go @@ -72,7 +72,7 @@ func (c *CordonHelper) UpdateIfRequired(desired bool) bool { // updating the given node object; it may return error if the object cannot be encoded as // JSON, or if either patch or update calls fail; it will also return a second error // whenever creating a patch has failed -func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, error) { +func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface, serverDryRun bool) (error, error) { client := clientset.CoreV1().Nodes() oldData, err := json.Marshal(c.node) @@ -89,9 +89,17 @@ func (c *CordonHelper) PatchOrReplace(clientset kubernetes.Interface) (error, er patchBytes, patchErr := strategicpatch.CreateTwoWayMergePatch(oldData, newData, c.node) if patchErr == nil { - _, err = client.Patch(context.TODO(), c.node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) + patchOptions := metav1.PatchOptions{} + if serverDryRun { + patchOptions.DryRun = []string{metav1.DryRunAll} + } + _, err = client.Patch(context.TODO(), c.node.Name, types.StrategicMergePatchType, patchBytes, patchOptions) } else { - _, err = client.Update(context.TODO(), c.node, metav1.UpdateOptions{}) + updateOptions := metav1.UpdateOptions{} + if serverDryRun { + updateOptions.DryRun = []string{metav1.DryRunAll} + } + _, err = client.Update(context.TODO(), c.node, updateOptions) } return err, patchErr } diff --git a/staging/src/k8s.io/kubectl/pkg/drain/default.go b/staging/src/k8s.io/kubectl/pkg/drain/default.go index ec0351b0fff..3df949f0e8a 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/default.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/default.go @@ -57,7 +57,7 @@ func RunCordonOrUncordon(drainer *Helper, node *corev1.Node, desired bool) error return nil } - err, patchErr := c.PatchOrReplace(drainer.Client) + err, patchErr := c.PatchOrReplace(drainer.Client, false) if patchErr != nil { return patchErr } diff --git a/staging/src/k8s.io/kubectl/pkg/drain/drain.go b/staging/src/k8s.io/kubectl/pkg/drain/drain.go index fefe0b5a546..1d3819f5765 100644 --- a/staging/src/k8s.io/kubectl/pkg/drain/drain.go +++ b/staging/src/k8s.io/kubectl/pkg/drain/drain.go @@ -31,7 +31,9 @@ import ( "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/kubernetes" + cmdutil "k8s.io/kubectl/pkg/cmd/util" ) const ( @@ -66,8 +68,8 @@ type Helper struct { Out io.Writer ErrOut io.Writer - // TODO(justinsb): unnecessary? - DryRun bool + DryRunStrategy cmdutil.DryRunStrategy + DryRunVerifier *resource.DryRunVerifier // OnPodDeletedOrEvicted is called when a pod is evicted/deleted; for printing progress output OnPodDeletedOrEvicted func(pod *corev1.Pod, usingEviction bool) @@ -125,16 +127,29 @@ func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions { gracePeriodSeconds := int64(d.GracePeriodSeconds) deleteOptions.GracePeriodSeconds = &gracePeriodSeconds } + if d.DryRunStrategy == cmdutil.DryRunServer { + deleteOptions.DryRun = []string{metav1.DryRunAll} + } return deleteOptions } // DeletePod will delete the given pod, or return an error if it couldn't func (d *Helper) DeletePod(pod corev1.Pod) error { + if d.DryRunStrategy == cmdutil.DryRunServer { + if err := d.DryRunVerifier.HasSupport(pod.GroupVersionKind()); err != nil { + return err + } + } return d.Client.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, d.makeDeleteOptions()) } // EvictPod will evict the give pod, or return an error if it couldn't func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error { + if d.DryRunStrategy == cmdutil.DryRunServer { + if err := d.DryRunVerifier.HasSupport(pod.GroupVersionKind()); err != nil { + return err + } + } eviction := &policyv1beta1.Eviction{ TypeMeta: metav1.TypeMeta{ APIVersion: policyGroupVersion, @@ -237,7 +252,12 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF for _, pod := range pods { go func(pod corev1.Pod, returnCh chan error) { for { - fmt.Fprintf(d.Out, "evicting pod %s/%s\n", pod.Namespace, pod.Name) + switch d.DryRunStrategy { + case cmdutil.DryRunServer: + fmt.Fprintf(d.Out, "evicting pod %s/%s (server dry run)\n", pod.Namespace, pod.Name) + default: + fmt.Fprintf(d.Out, "evicting pod %s/%s\n", pod.Namespace, pod.Name) + } select { case <-ctx.Done(): // return here or we'll leak a goroutine. @@ -259,6 +279,10 @@ func (d *Helper) evictPods(pods []corev1.Pod, policyGroupVersion string, getPodF return } } + if d.DryRunStrategy == cmdutil.DryRunServer { + returnCh <- nil + return + } params := waitForDeleteParams{ ctx: ctx, pods: []corev1.Pod{pod}, diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/BUILD b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/BUILD index bf54cc9f91b..6e950c73321 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/BUILD +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/BUILD @@ -52,6 +52,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/reference:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/kubectl/pkg/apps:go_default_library", + "//staging/src/k8s.io/kubectl/pkg/cmd/util:go_default_library", "//staging/src/k8s.io/kubectl/pkg/describe/versioned:go_default_library", "//staging/src/k8s.io/kubectl/pkg/scheme:go_default_library", "//staging/src/k8s.io/kubectl/pkg/util/deployment:go_default_library", diff --git a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go index bcead995b9c..ed51da2fcb7 100644 --- a/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go +++ b/staging/src/k8s.io/kubectl/pkg/polymorphichelpers/rollback.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/client-go/kubernetes" "k8s.io/kubectl/pkg/apps" + cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/scheme" deploymentutil "k8s.io/kubectl/pkg/util/deployment" ) @@ -45,7 +46,7 @@ const ( // Rollbacker provides an interface for resources that can be rolled back. type Rollbacker interface { - Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) + Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) } type RollbackVisitor struct { @@ -95,7 +96,7 @@ type DeploymentRollbacker struct { c kubernetes.Interface } -func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { +func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { if toRevision < 0 { return "", revisionNotFoundErr(toRevision) } @@ -119,7 +120,7 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m if err != nil { return "", err } - if dryRun { + if dryRunStrategy == cmdutil.DryRunClient { return printTemplate(&rsForRevision.Spec.Template) } if deployment.Spec.Paused { @@ -153,8 +154,12 @@ func (r *DeploymentRollbacker) Rollback(obj runtime.Object, updatedAnnotations m return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } + patchOptions := metav1.PatchOptions{} + if dryRunStrategy == cmdutil.DryRunServer { + patchOptions.DryRun = []string{metav1.DryRunAll} + } // Restore revision - if _, err = r.c.AppsV1().Deployments(namespace).Patch(context.TODO(), name, patchType, patch, metav1.PatchOptions{}); err != nil { + if _, err = r.c.AppsV1().Deployments(namespace).Patch(context.TODO(), name, patchType, patch, patchOptions); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } return rollbackSuccess, nil @@ -255,7 +260,7 @@ type DaemonSetRollbacker struct { c kubernetes.Interface } -func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { +func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { if toRevision < 0 { return "", revisionNotFoundErr(toRevision) } @@ -276,7 +281,7 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma return "", revisionNotFoundErr(toRevision) } - if dryRun { + if dryRunStrategy == cmdutil.DryRunClient { appliedDS, err := applyDaemonSetHistory(ds, toHistory) if err != nil { return "", err @@ -293,8 +298,12 @@ func (r *DaemonSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations ma return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil } + patchOptions := metav1.PatchOptions{} + if dryRunStrategy == cmdutil.DryRunServer { + patchOptions.DryRun = []string{metav1.DryRunAll} + } // Restore revision - if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(context.TODO(), accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw, metav1.PatchOptions{}); err != nil { + if _, err = r.c.AppsV1().DaemonSets(accessor.GetNamespace()).Patch(context.TODO(), accessor.GetName(), types.StrategicMergePatchType, toHistory.Data.Raw, patchOptions); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } @@ -342,7 +351,7 @@ type StatefulSetRollbacker struct { } // toRevision is a non-negative integer, with 0 being reserved to indicate rolling back to previous configuration -func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRun bool) (string, error) { +func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations map[string]string, toRevision int64, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { if toRevision < 0 { return "", revisionNotFoundErr(toRevision) } @@ -363,7 +372,7 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations return "", revisionNotFoundErr(toRevision) } - if dryRun { + if dryRunStrategy == cmdutil.DryRunClient { appliedSS, err := applyRevision(sts, toHistory) if err != nil { return "", err @@ -380,8 +389,12 @@ func (r *StatefulSetRollbacker) Rollback(obj runtime.Object, updatedAnnotations return fmt.Sprintf("%s (current template already matches revision %d)", rollbackSkipped, toRevision), nil } + patchOptions := metav1.PatchOptions{} + if dryRunStrategy == cmdutil.DryRunServer { + patchOptions.DryRun = []string{metav1.DryRunAll} + } // Restore revision - if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(context.TODO(), sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw, metav1.PatchOptions{}); err != nil { + if _, err = r.c.AppsV1().StatefulSets(sts.Namespace).Patch(context.TODO(), sts.Name, types.StrategicMergePatchType, toHistory.Data.Raw, patchOptions); err != nil { return "", fmt.Errorf("failed restoring revision %d: %v", toRevision, err) } diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index 2a4d0bbd3cb..0181b8e3c7e 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -37,6 +37,25 @@ run_kubectl_apply_tests() { # Clean up kubectl delete pods test-pod "${kube_flags[@]:?}" + ### set-last-applied + # Pre-Condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # Command: create "test-pod" (doesn't exist) should create this pod without last-applied annotation + kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + # Post-Condition: pod "test-pod" is created + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' + # Pre-Condition: pod "test-pod" does not have configuration annotation + ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" || exit 1 + # Dry-run set-last-applied + kubectl apply set-last-applied --dry-run=client -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}" + kubectl apply set-last-applied --dry-run=server -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}" + ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" || exit 1 + # Command + kubectl apply set-last-applied -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}" + # Post-Condition: pod "test-pod" has configuration annotation + grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" + # Clean up + kubectl delete pods test-pod "${kube_flags[@]:?}" ## kubectl apply should be able to clear defaulted fields. # Pre-Condition: no deployment exists @@ -75,18 +94,21 @@ run_kubectl_apply_tests() { # cleanup kubectl delete pods selector-test-pod - ## kubectl apply --server-dry-run + ## kubectl apply --dry-run=server # Pre-Condition: no POD exists kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply dry-run kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + kubectl apply --dry-run=true -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + kubectl apply --dry-run=client -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + kubectl apply --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # No pod exists kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply non dry-run creates the pod kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # apply changes - kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" + kubectl apply --dry-run=server -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" # Post-Condition: label still has initial value kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' @@ -117,7 +139,7 @@ run_kubectl_apply_tests() { __EOF__ # Dry-run create the CR - kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" + kubectl "${kube_flags[@]:?}" apply --dry-run=server -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist ! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1 @@ -273,18 +295,18 @@ run_kubectl_apply_tests() { # Clean up kubectl delete pods test-pod "${kube_flags[@]:?}" - ## kubectl apply --server-dry-run + ## kubectl apply --dry-run=server # Pre-Condition: no POD exists kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply dry-run - kubectl apply --server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + kubectl apply --server-side --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # No pod exists kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply non dry-run creates the pod kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # apply changes - kubectl apply --server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" + kubectl apply --server-side --dry-run=server -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" # Post-Condition: label still has initial value kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' @@ -315,7 +337,7 @@ run_kubectl_apply_tests() { __EOF__ # Dry-run create the CR - kubectl "${kube_flags[@]:?}" apply --server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" + kubectl "${kube_flags[@]:?}" apply --server-side --dry-run=server -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist ! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1 diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index cdb7d208b99..b880dab4938 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -79,7 +79,8 @@ run_daemonset_history_tests() { kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]:?}" + kubectl rollout undo daemonset --dry-run=client "${kube_flags[@]:?}" + kubectl rollout undo daemonset --dry-run=server "${kube_flags[@]:?}" kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" kube::test::get_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" @@ -212,6 +213,10 @@ run_deployment_tests() { ### Test kubectl create deployment with image and command # Pre-Condition: No deployment exists. kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # Dry-run command + kubectl create deployment nginx-with-command --dry-run=client --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity + kubectl create deployment nginx-with-command --dry-run=server --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity # Post-Condition: Deployment "nginx" is created. @@ -263,9 +268,15 @@ run_deployment_tests() { ### Auto scale deployment # Pre-condition: no deployment exists kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # Pre-condition: no hpa exists + kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':' # Command kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}" kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' + # Dry-run autoscale + kubectl-with-retry autoscale deployment nginx-deployment --dry-run=client "${kube_flags[@]:?}" --min=2 --max=3 + kubectl-with-retry autoscale deployment nginx-deployment --dry-run=server "${kube_flags[@]:?}" --min=2 --max=3 + kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':' # autoscale 2~3 pods, no CPU utilization specified kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3 kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' @@ -289,7 +300,8 @@ run_deployment_tests() { kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]:?}" kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]:?}" | grep "test-cmd" + kubectl rollout undo deployment nginx --dry-run=client "${kube_flags[@]:?}" | grep "test-cmd" + kubectl rollout undo deployment nginx --dry-run=server "${kube_flags[@]:?}" kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Rollback to revision 1 kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}" @@ -337,6 +349,11 @@ run_deployment_tests() { kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" + # Dry-run set the deployment's image + kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=client "${kube_flags[@]:?}" + kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=server "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set the deployment's image kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}" kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" @@ -383,6 +400,10 @@ run_deployment_tests() { kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2' # Assert single value in deployment env kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1' + # Dry-run set env + kubectl set env deployment nginx-deployment --dry-run=client --from=configmap/test-set-env-config "${kube_flags[@]:?}" + kubectl set env deployment nginx-deployment --dry-run=server --from=configmap/test-set-env-config "${kube_flags[@]:?}" + kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1' # Set env of deployments by configmap kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]:?}" # Assert all values in deployment env @@ -431,7 +452,8 @@ run_statefulset_history_tests() { kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]:?}" + kubectl rollout undo statefulset --dry-run=client "${kube_flags[@]:?}" + kubectl rollout undo statefulset --dry-run=server "${kube_flags[@]:?}" kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" kube::test::get_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" @@ -610,8 +632,16 @@ run_rs_tests() { kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '2' kubectl set env rs/frontend "${kube_flags[@]:?}" foo=bar kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3' + kubectl set resources rs/frontend --dry-run=client "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi + kubectl set resources rs/frontend --dry-run=server "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3' kubectl set resources rs/frontend "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4' + kubectl set serviceaccount rs/frontend --dry-run=client "${kube_flags[@]:?}" serviceaccount1 + kubectl set serviceaccount rs/frontend --dry-run=server "${kube_flags[@]:?}" serviceaccount1 + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4' + kubectl set serviceaccount rs/frontend "${kube_flags[@]:?}" serviceaccount1 + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5' ### Delete replica set with id # Pre-condition: frontend replica set exists diff --git a/test/cmd/batch.sh b/test/cmd/batch.sh index 6f939a19c5e..c16dd30e9e1 100755 --- a/test/cmd/batch.sh +++ b/test/cmd/batch.sh @@ -33,6 +33,12 @@ run_job_tests() { # Post-condition: namespace 'test-jobs' is created. kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs' + # Pre-condition: cronjob does not exist + kube::test::get_object_assert 'cronjob --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':' + # Dry-run create CronJob + kubectl create cronjob pi --dry-run=client --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}" + kubectl create cronjob pi --dry-run=server --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}" + kube::test::get_object_assert 'cronjob' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':' ### Create a cronjob in a specific namespace kubectl create cronjob pi --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}" # Post-Condition: assertion object exists @@ -47,6 +53,12 @@ run_job_tests() { # Post-condition: The test-job wasn't created actually kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}{{end}}" '' + # Pre-condition: job does not exist + kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':' + ### Dry-run create a job in a specific namespace + kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=client + kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=server + kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':' ### Create a job in a specific namespace kubectl create job test-job --from=cronjob/pi --namespace=test-jobs # Post-Condition: assertion object exists diff --git a/test/cmd/core.sh b/test/cmd/core.sh index 224dc25c826..3b5f30c90b7 100755 --- a/test/cmd/core.sh +++ b/test/cmd/core.sh @@ -40,7 +40,10 @@ run_configmap_tests() { # Pre-condition: configmap test-configmap and test-binary-configmap does not exist kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':' kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-binary-configmap\\\" }}found{{end}}{{end}}:" ':' - + # Dry-run command + kubectl create configmap test-configmap --dry-run=client --from-literal=key1=value1 --namespace=test-configmaps + kubectl create configmap test-configmap --dry-run=server --from-literal=key1=value1 --namespace=test-configmaps + kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':' # Command kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps kubectl create configmap test-binary-configmap --from-file <( head -c 256 /dev/urandom ) --namespace=test-configmaps @@ -217,6 +220,10 @@ run_pod_tests() { ### Create a generic secret # Pre-condition: no SECRET exists kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' + # Dry-run command + kubectl create secret generic test-secret --dry-run=client --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod + kubectl create secret generic test-secret --dry-run=server --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod + kube::test::get_object_assert 'secrets --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' # Command kubectl create secret generic test-secret --from-literal=key-1=value1 --type=test-type --namespace=test-kubectl-describe-pod # Post-condition: secret exists and has expected values @@ -235,6 +242,12 @@ run_pod_tests() { kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" 'test-configmap' ### Create a pod disruption budget with minAvailable + # Pre-condition: pdb does not exist + kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':' + # Dry-run command + kubectl create pdb test-pdb-1 --dry-run=client --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod + kubectl create pdb test-pdb-1 --dry-run=server --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod + kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':' # Command kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod # Post-condition: pdb exists and has expected values @@ -272,6 +285,17 @@ run_pod_tests() { kubectl delete pdb/test-pdb-1 pdb/test-pdb-2 pdb/test-pdb-3 pdb/test-pdb-4 --namespace=test-kubectl-describe-pod kubectl delete namespace test-kubectl-describe-pod + ### Priority Class + kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':' + # Dry-run command + kubectl create priorityclass test-priorityclass --dry-run=client + kubectl create priorityclass test-priorityclass --dry-run=server + kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':' + # Command + kubectl create priorityclass test-priorityclass + kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" 'found:' + kubectl delete priorityclass test-priorityclass + ### Create two PODs # Pre-condition: no POD exists create_and_use_new_namespace @@ -299,6 +323,15 @@ run_pod_tests() { # Post-condition: valid-pod POD is created kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + ### Dry-run label the valid-pod POD + # Pre-condition: valid-pod is not labelled + kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:' + # Command + kubectl label pods valid-pod new-name=new-valid-pod --dry-run=client "${kube_flags[@]}" + kubectl label pods valid-pod new-name=new-valid-pod --dry-run=server "${kube_flags[@]}" + # Post-condition: valid-pod is not labelled + kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:' + ### Label the valid-pod POD # Pre-condition: valid-pod is not labelled kube::test::get_object_assert 'pod valid-pod' "{{range${labels_field:?}}}{{.}}:{{end}}" 'valid-pod:' @@ -315,6 +348,15 @@ run_pod_tests() { # Post-condition: valid pod contains "emptylabel" with no value kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.emptylabel}}" '' + ### Dry-run annotate the valid-pod POD with empty annotation value + # Pre-condition: valid-pod does not have annotation "emptyannotation" + kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '' + # Command + kubectl annotate pods valid-pod emptyannotation="" --dry-run=client "${kube_flags[@]}" + kubectl annotate pods valid-pod emptyannotation="" --dry-run=server "${kube_flags[@]}" + # Post-condition: valid-pod does not have annotation "emptyannotation" + kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '' + ### Annotate the valid-pod POD with empty annotation value # Pre-condition: valid-pod does not have annotation "emptyannotation" kube::test::get_object_assert 'pod valid-pod' "{{${annotations_field:?}.emptyannotation}}" '' @@ -463,6 +505,11 @@ run_pod_tests() { kubectl patch "${kube_flags[@]}" pod valid-pod --type="json" -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"nginx"}]' # Post-condition: valid-pod POD has image nginx kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' + # Dry-run change image + kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=client -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}' + kubectl patch "${kube_flags[@]}" pod valid-pod --record --dry-run=server -p='{"spec":{"containers":[{"name": "kubernetes-serve-hostname", "image": "not-nginx"}]}}' + # Post-condition: valid-pod POD has image nginx + kube::test::get_object_assert pods "{{range.items}}{{$image_field}}:{{end}}" 'nginx:' # prove that yaml input works too YAML_PATCH=$'spec:\n containers:\n - name: kubernetes-serve-hostname\n image: changed-with-yaml\n' kubectl patch "${kube_flags[@]}" pod valid-pod -p="${YAML_PATCH}" @@ -689,7 +736,7 @@ run_create_secret_tests() { # check to make sure that replace correctly PUTs to a URL - kubectl create configmap tester-update-cm -o json --dry-run | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f - + kubectl create configmap tester-update-cm -o json --dry-run=client | kubectl create "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps -f - output_message=$(kubectl create configmap tester-update-cm --from-literal=key1=config1 -o json --dry-run | kubectl replace "${kube_flags[@]}" --raw /api/v1/namespaces/default/configmaps/tester-update-cm -f -) # the message should show the body returned which will include a UID not present in the input kube::test::if_has_string "${output_message}" 'uid' @@ -832,6 +879,12 @@ run_service_accounts_tests() { kube::test::get_object_assert 'namespaces/test-service-accounts' "{{$id_field}}" 'test-service-accounts' ### Create a service account in a specific namespace + # Pre-condition: service account does not exist + kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':' + # Dry-run command + kubectl create serviceaccount test-service-account --dry-run=client --namespace=test-service-accounts + kubectl create serviceaccount test-service-account --dry-run=server --namespace=test-service-accounts + kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':' # Command kubectl create serviceaccount test-service-account --namespace=test-service-accounts # Post-condition: secret exists and has expected values @@ -893,7 +946,8 @@ run_service_tests() { # prove role=master kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" # Show dry-run works on running selector - kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}" + kubectl set selector services redis-master role=padawan --dry-run=client -o yaml "${kube_flags[@]}" + kubectl set selector services redis-master role=padawan --dry-run=server -o yaml "${kube_flags[@]}" ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1 kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" # --resource-version= succeeds @@ -994,6 +1048,10 @@ __EOF__ ### Create an ExternalName service # Pre-condition: Only the default kubernetes service exist kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + # Dry-run command + kubectl create service externalname beep-boop --dry-run=client --external-name bar.com + kubectl create service externalname beep-boop --dry-run=server --external-name bar.com + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' # Command kubectl create service externalname beep-boop --external-name bar.com # Post-condition: beep-boop service is created @@ -1013,6 +1071,13 @@ __EOF__ ### Create pod and service # Pre-condition: no pod exists kube::test::wait_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + # Pre-condition: Only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' + # Dry-run command + kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=client --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } ' + kubectl run testmetadata --image=nginx --port=80 --expose --dry-run=server --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } ' + # Check only the default kubernetes services exist + kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" 'kubernetes:' # Command kubectl run testmetadata --image=nginx --port=80 --expose --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } ' # Check result @@ -1305,9 +1370,14 @@ run_namespace_tests() { kube::log::status "Testing kubectl(v1:namespaces)" ### Create a new namespace - # Pre-condition: only the "default" namespace exists - # The Pre-condition doesn't hold anymore after we create and switch namespaces before creating pods with same name in the test. - # kube::test::get_object_assert namespaces "{{range.items}}{{$id_field}}:{{end}}" 'default:' + # Pre-condition: test namespace does not exist + output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" ' not found' + # Dry-run command + kubectl create namespace my-namespace --dry-run=client + kubectl create namespace my-namespace --dry-run=server + output_message=$(! kubectl get ns/my-namespace 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" ' not found' # Command kubectl create namespace my-namespace # Post-condition: namespace 'my-namespace' is created. @@ -1325,6 +1395,21 @@ run_namespace_tests() { kube::test::if_has_string "${output_message}" 'warning: deleting cluster-scoped resources' kube::test::if_has_string "${output_message}" 'namespace "my-namespace" deleted' + ### Quota + kubectl create namespace quotas + kube::test::get_object_assert 'namespaces/quotas' "{{$id_field}}" 'quotas' + kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':' + # Dry-run command + kubectl create quota test-quota --dry-run=client --namespace=quotas + kubectl create quota test-quota --dry-run=server --namespace=quotas + kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':' + # Command + kubectl create quota test-quota --namespace=quotas + kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" 'found:' + # Clean up + kubectl delete quota test-quota --namespace=quotas + kubectl delete namespace quotas + ###################### # Pods in Namespaces # ###################### diff --git a/test/cmd/create.sh b/test/cmd/create.sh index 7d0de646eb4..cc393ca2fe4 100755 --- a/test/cmd/create.sh +++ b/test/cmd/create.sh @@ -18,6 +18,26 @@ set -o errexit set -o nounset set -o pipefail +# Runs tests related to kubectl create --dry-run. +run_kubectl_create_dry_run_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl create dry-run" + + # Pre-Condition: no POD exists + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # dry-run create + kubectl create --dry-run=client -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + kubectl create --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}" + # check no POD exists + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + + set +o nounset + set +o errexit +} + # Runs tests related to kubectl create --filename(-f) --selector(-l). run_kubectl_create_filter_tests() { set -o nounset diff --git a/test/cmd/node-management.sh b/test/cmd/node-management.sh index df96306020b..74db489d047 100755 --- a/test/cmd/node-management.sh +++ b/test/cmd/node-management.sh @@ -22,6 +22,7 @@ run_cluster_management_tests() { set -o nounset set -o errexit + create_and_use_new_namespace kube::log::status "Testing cluster-management commands" kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' @@ -90,13 +91,15 @@ __EOF__ ### kubectl cordon update with --dry-run does not mark node unschedulable # Pre-condition: node is schedulable kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' - kubectl cordon "127.0.0.1" --dry-run + kubectl cordon "127.0.0.1" --dry-run=client + kubectl cordon "127.0.0.1" --dry-run=server kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' ### kubectl drain update with --dry-run does not mark node unschedulable # Pre-condition: node is schedulable kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' - kubectl drain "127.0.0.1" --dry-run + kubectl drain "127.0.0.1" --dry-run=client + kubectl drain "127.0.0.1" --dry-run=server # Post-condition: node still exists, node is still schedulable kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:' kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' @@ -106,6 +109,11 @@ __EOF__ kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' # Pre-condition: test-pod-1 and test-pod-2 exist kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,' + # dry-run command + kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=client + kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=server + kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,' + # command kubectl drain "127.0.0.1" --pod-selector 'e in (f)' # only "test-pod-1" should have been matched and deleted - test-pod-2 should still exist kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.name}}" 'test-pod-2' @@ -118,7 +126,9 @@ __EOF__ ### kubectl uncordon update with --dry-run is a no-op # Pre-condition: node is already schedulable kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' - response=$(kubectl uncordon "127.0.0.1" --dry-run) + response=$(kubectl uncordon "127.0.0.1" --dry-run=client) + kube::test::if_has_string "${response}" 'already uncordoned' + response=$(kubectl uncordon "127.0.0.1" --dry-run=server) kube::test::if_has_string "${response}" 'already uncordoned' # Post-condition: node is still schedulable kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '' diff --git a/test/cmd/rbac.sh b/test/cmd/rbac.sh index 52fb55e3e93..02e304bb791 100755 --- a/test/cmd/rbac.sh +++ b/test/cmd/rbac.sh @@ -29,6 +29,14 @@ run_clusterroles_tests() { kube::test::get_object_assert clusterroles/cluster-admin "{{.metadata.name}}" 'cluster-admin' kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin' + # Pre-condition: no ClusterRole pod-admin exists + output_message=$(! kubectl get clusterrole pod-admin 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'clusterroles.rbac.authorization.k8s.io "pod-admin" not found' + # Dry-run test `kubectl create clusterrole` + kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --dry-run=client --verb=* --resource=pods + kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --dry-run=server --verb=* --resource=pods + output_message=$(! kubectl get clusterrole pod-admin 2>&1 "${kube_flags[@]:?}") + kube::test::if_has_string "${output_message}" 'clusterroles.rbac.authorization.k8s.io "pod-admin" not found' # test `kubectl create clusterrole` kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --verb=* --resource=pods kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:' @@ -55,10 +63,21 @@ run_clusterroles_tests() { kubectl create "${kube_flags[@]}" clusterrole aggregation-reader --aggregation-rule="foo1=foo2" kube::test::get_object_assert clusterrole/aggregation-reader "{{${id_field:?}}}" 'aggregation-reader' + # Pre-condition: no ClusterRoleBinding super-admin exists + output_message=$(! kubectl get clusterrolebinding super-admin 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" 'clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found' + # Dry-run test `kubectl create clusterrolebinding` + kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --dry-run=client --clusterrole=admin --user=super-admin + kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --dry-run=server --clusterrole=admin --user=super-admin + output_message=$(! kubectl get clusterrolebinding super-admin 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" 'clusterrolebindings.rbac.authorization.k8s.io "super-admin" not found' # test `kubectl create clusterrolebinding` # test `kubectl set subject clusterrolebinding` kubectl create "${kube_flags[@]}" clusterrolebinding super-admin --clusterrole=admin --user=super-admin kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:' + kubectl set subject --dry-run=client "${kube_flags[@]}" clusterrolebinding super-admin --user=foo + kubectl set subject --dry-run=server "${kube_flags[@]}" clusterrolebinding super-admin --user=foo + kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:' kubectl set subject "${kube_flags[@]}" clusterrolebinding super-admin --user=foo kube::test::get_object_assert clusterrolebinding/super-admin "{{range.subjects}}{{.name}}:{{end}}" 'super-admin:foo:' kubectl create "${kube_flags[@]}" clusterrolebinding multi-users --clusterrole=admin --user=user-1 --user=user-2 @@ -86,6 +105,10 @@ run_clusterroles_tests() { # test `kubectl create rolebinding` # test `kubectl set subject rolebinding` + kubectl create "${kube_flags[@]}" rolebinding admin --dry-run=client --clusterrole=admin --user=default-admin + kubectl create "${kube_flags[@]}" rolebinding admin --dry-run=server --clusterrole=admin --user=default-admin + output_message=$(! kubectl get rolebinding/admin 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" ' not found' kubectl create "${kube_flags[@]}" rolebinding admin --clusterrole=admin --user=default-admin kube::test::get_object_assert rolebinding/admin "{{.roleRef.kind}}" 'ClusterRole' kube::test::get_object_assert rolebinding/admin "{{range.subjects}}{{.name}}:{{end}}" 'default-admin:' @@ -122,6 +145,11 @@ run_role_tests() { create_and_use_new_namespace kube::log::status "Testing role" + # Dry-run create + kubectl create "${kube_flags[@]}" role pod-admin --dry-run=client --verb=* --resource=pods + kubectl create "${kube_flags[@]}" role pod-admin --dry-run=server --verb=* --resource=pods + output_message=$(! kubectl get role/pod-admin 2>&1 "${kube_flags[@]}") + kube::test::if_has_string "${output_message}" ' not found' # Create Role from command (only resource) kubectl create "${kube_flags[@]}" role pod-admin --verb=* --resource=pods kube::test::get_object_assert role/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:' diff --git a/test/cmd/run.sh b/test/cmd/run.sh index 1e512b5245f..af0925cdbb4 100755 --- a/test/cmd/run.sh +++ b/test/cmd/run.sh @@ -25,6 +25,12 @@ run_kubectl_run_tests() { create_and_use_new_namespace kube::log::status "Testing kubectl run" + # Command with dry-run + kubectl run --dry-run=client nginx-extensions "--image=${IMAGE_NGINX}" "${kube_flags[@]:?}" + kubectl run --dry-run=server nginx-extensions "--image=${IMAGE_NGINX}" "${kube_flags[@]:?}" + # Post-Condition: no Pod exists + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + # Pre-Condition: no Pod exists kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command