Merge pull request #12551 from eparis/underscore-to-dash

Update code and docs to use - in flag names instead of _
This commit is contained in:
Piotr Szczesniak
2015-08-12 07:16:31 +02:00
13 changed files with 15 additions and 15 deletions

View File

@@ -35,7 +35,7 @@ import (
flag "github.com/spf13/pflag"
)
var fuzzIters = flag.Int("fuzz_iters", 20, "How many fuzzing iterations to do.")
var fuzzIters = flag.Int("fuzz-iters", 20, "How many fuzzing iterations to do.")
func fuzzInternalObject(t *testing.T, forVersion string, item runtime.Object, seed int64) runtime.Object {
apitesting.FuzzerFor(t, forVersion, rand.NewSource(seed)).Fuzz(item)

View File

@@ -65,7 +65,7 @@ type NodeController struct {
// sync node status in this case, but will monitor node status updated from kubelet. If
// it doesn't receive update for this amount of time, it will start posting "NodeReady==
// ConditionUnknown". The amount of time before which NodeController start evicting pods
// is controlled via flag 'pod_eviction_timeout'.
// is controlled via flag 'pod-eviction-timeout'.
// Note: be cautious when changing the constant, it must work with nodeStatusUpdateFrequency
// in kubelet. There are several constraints:
// 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where

View File

@@ -30,7 +30,7 @@ import (
flag "github.com/spf13/pflag"
)
var fuzzIters = flag.Int("fuzz_iters", 50, "How many fuzzing iterations to do.")
var fuzzIters = flag.Int("fuzz-iters", 50, "How many fuzzing iterations to do.")
// Test a weird version/kind embedding format.
type MyWeirdCustomEmbeddedVersionKindField struct {

View File

@@ -229,7 +229,7 @@ func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manag
}
}
// Also apply oom_score_adj to processes
// Also apply oom-score-adj to processes
oomAdjuster := oom.NewOomAdjuster()
if err := oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil {
errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid))

View File

@@ -1759,7 +1759,7 @@ func (kl *Kubelet) admitPods(allPods []*api.Pod, podSyncTypes map[types.UID]Sync
// three channels (file, apiserver, and http) and creates a union of them. For
// any new change seen, will run a sync against desired state and running state. If
// no changes are seen to the configuration, will synchronize the last known desired
// state every sync_frequency seconds. Never returns.
// state every sync-frequency seconds. Never returns.
func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
glog.Info("Starting kubelet main sync loop.")
for {

View File

@@ -581,7 +581,7 @@ var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST"
// Ensure that the iptables infrastructure we use is set up. This can safely be called periodically.
func iptablesInit(ipt iptables.Interface) error {
// TODO: There is almost certainly room for optimization here. E.g. If
// we knew the service_cluster_ip_range CIDR we could fast-track outbound packets not
// we knew the service-cluster-ip-range CIDR we could fast-track outbound packets not
// destined for a service. There's probably more, help wanted.
// Danger - order of these rules matters here:
@@ -601,7 +601,7 @@ func iptablesInit(ipt iptables.Interface) error {
// the NodePort would take priority (incorrectly).
// This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems
// doubly-unlikely), but we need to be careful to keep the rules in the right order.
args := []string{ /* service_cluster_ip_range matching could go here */ }
args := []string{ /* service-cluster-ip-range matching could go here */ }
args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules")
if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil {
return err