Merge pull request #52440 from juanvallejo/jvallejo/add-dry-run-flag-kubectl-drain
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. add --dry-run option to kubectl drain **Release note**: ```release-note Added --dry-run option to `kubectl drain` ``` Adds a `--dry-run` flag to `kubectl <cordon, uncordon, drain>` @fabianofranz @kubernetes/sig-cli-misc
This commit is contained in:
commit
3f1a2e43b4
@ -4214,35 +4214,57 @@ run_certificates_tests() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
run_cluster_management_tests() {
|
run_cluster_management_tests() {
|
||||||
set -o nounset
|
set -o nounset
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
kube::log::status "Testing cluster-management commands"
|
kube::log::status "Testing cluster-management commands"
|
||||||
|
|
||||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||||
|
|
||||||
### kubectl drain command fails when both --selector and a node argument are given
|
### kubectl cordon update with --dry-run does not mark node unschedulable
|
||||||
# Pre-condition: node exists and contains label test=label
|
# Pre-condition: node is schedulable
|
||||||
kubectl label node "127.0.0.1" "test=label"
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
|
kubectl cordon "127.0.0.1" --dry-run
|
||||||
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
|
||||||
|
|
||||||
### kubectl cordon command fails when no arguments are passed
|
### kubectl drain update with --dry-run does not mark node unschedulable
|
||||||
# Pre-condition: node exists
|
# Pre-condition: node is schedulable
|
||||||
response=$(! kubectl cordon 2>&1)
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
|
kubectl drain "127.0.0.1" --dry-run
|
||||||
|
# Post-condition: node still exists, node is still schedulable
|
||||||
|
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
|
||||||
### kubectl cordon selects all nodes with an empty --selector=
|
### kubectl uncordon update with --dry-run is a no-op
|
||||||
# Pre-condition: node "127.0.0.1" is uncordoned
|
# Pre-condition: node is already schedulable
|
||||||
kubectl uncordon "127.0.0.1"
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
response=$(kubectl cordon --selector=)
|
response=$(kubectl uncordon "127.0.0.1" --dry-run)
|
||||||
kube::test::if_has_string "${response}" 'node "127.0.0.1" cordoned'
|
kube::test::if_has_string "${response}" 'already uncordoned'
|
||||||
# Post-condition: node "127.0.0.1" is cordoned
|
# Post-condition: node is still schedulable
|
||||||
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
|
||||||
|
|
||||||
set +o nounset
|
### kubectl drain command fails when both --selector and a node argument are given
|
||||||
set +o errexit
|
# Pre-condition: node exists and contains label test=label
|
||||||
|
kubectl label node "127.0.0.1" "test=label"
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
|
||||||
|
response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
|
||||||
|
kube::test::if_has_string "${response}" 'cannot specify both a node name'
|
||||||
|
|
||||||
|
### kubectl cordon command fails when no arguments are passed
|
||||||
|
# Pre-condition: node exists
|
||||||
|
response=$(! kubectl cordon 2>&1)
|
||||||
|
kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
|
||||||
|
|
||||||
|
### kubectl cordon selects all nodes with an empty --selector=
|
||||||
|
# Pre-condition: node "127.0.0.1" is uncordoned
|
||||||
|
kubectl uncordon "127.0.0.1"
|
||||||
|
response=$(kubectl cordon --selector=)
|
||||||
|
kube::test::if_has_string "${response}" 'node "127.0.0.1" cordoned'
|
||||||
|
# Post-condition: node "127.0.0.1" is cordoned
|
||||||
|
kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
|
||||||
|
|
||||||
|
set +o nounset
|
||||||
|
set +o errexit
|
||||||
}
|
}
|
||||||
|
|
||||||
run_plugins_tests() {
|
run_plugins_tests() {
|
||||||
@ -4835,12 +4857,12 @@ runTests() {
|
|||||||
record_command run_certificates_tests
|
record_command run_certificates_tests
|
||||||
fi
|
fi
|
||||||
|
|
||||||
######################
|
######################
|
||||||
# Cluster Management #
|
# Cluster Management #
|
||||||
######################
|
######################
|
||||||
if kube::test::if_supports_resource "${nodes}" ; then
|
if kube::test::if_supports_resource "${nodes}" ; then
|
||||||
record_command run_cluster_management_tests
|
record_command run_cluster_management_tests
|
||||||
fi
|
fi
|
||||||
|
|
||||||
###########
|
###########
|
||||||
# Plugins #
|
# Plugins #
|
||||||
|
@ -55,6 +55,7 @@ type DrainOptions struct {
|
|||||||
restClient *restclient.RESTClient
|
restClient *restclient.RESTClient
|
||||||
Factory cmdutil.Factory
|
Factory cmdutil.Factory
|
||||||
Force bool
|
Force bool
|
||||||
|
DryRun bool
|
||||||
GracePeriodSeconds int
|
GracePeriodSeconds int
|
||||||
IgnoreDaemonsets bool
|
IgnoreDaemonsets bool
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
@ -113,6 +114,7 @@ func NewCmdCordon(f cmdutil.Factory, out io.Writer) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
||||||
|
cmdutil.AddDryRunFlag(cmd)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,6 +141,7 @@ func NewCmdUncordon(f cmdutil.Factory, out io.Writer) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
||||||
|
cmdutil.AddDryRunFlag(cmd)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,6 +198,7 @@ func NewCmdDrain(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command {
|
|||||||
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
cmd.Flags().IntVar(&options.GracePeriodSeconds, "grace-period", -1, "Period of time in seconds given to each pod to terminate gracefully. If negative, the default value specified in the pod will be used.")
|
||||||
cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite")
|
cmd.Flags().DurationVar(&options.Timeout, "timeout", 0, "The length of time to wait before giving up, zero means infinite")
|
||||||
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter on")
|
||||||
|
cmdutil.AddDryRunFlag(cmd)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,6 +218,8 @@ func (o *DrainOptions) SetupDrain(cmd *cobra.Command, args []string) error {
|
|||||||
return cmdutil.UsageErrorf(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use))
|
return cmdutil.UsageErrorf(cmd, fmt.Sprintf("USAGE: %s [flags]", cmd.Use))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
o.DryRun = cmdutil.GetFlagBool(cmd, "dry-run")
|
||||||
|
|
||||||
if o.client, err = o.Factory.ClientSet(); err != nil {
|
if o.client, err = o.Factory.ClientSet(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -269,10 +275,13 @@ func (o *DrainOptions) RunDrain() error {
|
|||||||
var fatal error
|
var fatal error
|
||||||
|
|
||||||
for _, info := range o.nodeInfos {
|
for _, info := range o.nodeInfos {
|
||||||
err := o.deleteOrEvictPodsSimple(info)
|
var err error
|
||||||
if err == nil {
|
if !o.DryRun {
|
||||||
|
err = o.deleteOrEvictPodsSimple(info)
|
||||||
|
}
|
||||||
|
if err == nil || o.DryRun {
|
||||||
drainedNodes.Insert(info.Name)
|
drainedNodes.Insert(info.Name)
|
||||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, "node", info.Name, false, "drained")
|
cmdutil.PrintSuccess(o.mapper, false, o.Out, "node", info.Name, o.DryRun, "drained")
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(o.ErrOut, "error: unable to drain node %q, aborting command...\n\n", info.Name)
|
fmt.Fprintf(o.ErrOut, "error: unable to drain node %q, aborting command...\n\n", info.Name)
|
||||||
remainingNodes := []string{}
|
remainingNodes := []string{}
|
||||||
@ -697,29 +706,31 @@ func (o *DrainOptions) RunCordonOrUncordon(desired bool) error {
|
|||||||
}
|
}
|
||||||
unsched := node.Spec.Unschedulable
|
unsched := node.Spec.Unschedulable
|
||||||
if unsched == desired {
|
if unsched == desired {
|
||||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, false, already(desired))
|
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, o.DryRun, already(desired))
|
||||||
} else {
|
} else {
|
||||||
helper := resource.NewHelper(o.restClient, nodeInfo.Mapping)
|
if !o.DryRun {
|
||||||
node.Spec.Unschedulable = desired
|
helper := resource.NewHelper(o.restClient, nodeInfo.Mapping)
|
||||||
newData, err := json.Marshal(obj)
|
node.Spec.Unschedulable = desired
|
||||||
if err != nil {
|
newData, err := json.Marshal(obj)
|
||||||
fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
if err != nil {
|
||||||
continue
|
fmt.Fprintf(o.ErrOut, "error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_, err = helper.Patch(cmdNamespace, nodeInfo.Name, types.StrategicMergePatchType, patchBytes)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, obj)
|
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, o.DryRun, changed(desired))
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
_, err = helper.Patch(cmdNamespace, nodeInfo.Name, types.StrategicMergePatchType, patchBytes)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("error: unable to %s node %q: %v", cordonOrUncordon, nodeInfo.Name, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, false, changed(desired))
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, false, "skipped")
|
cmdutil.PrintSuccess(o.mapper, false, o.Out, nodeInfo.Mapping.Resource, nodeInfo.Name, o.DryRun, "skipped")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user