Merge pull request #100671 from Niekvdplas/spelling-mistakes

Fixed several spelling mistakes
This commit is contained in:
Kubernetes Prow Robot
2021-04-09 05:19:45 -07:00
committed by GitHub
26 changed files with 27 additions and 27 deletions

View File

@@ -169,7 +169,7 @@ func (jm *ControllerV2) sync(cronJobKey string) (*time.Duration, error) {
cronJob, err := jm.cronJobLister.CronJobs(ns).Get(name)
switch {
case errors.IsNotFound(err):
// may be cronjob is deleted, dont need to requeue this key
// may be cronjob is deleted, don't need to requeue this key
klog.V(4).InfoS("cronjob not found, may be it is deleted", "cronjob", klog.KRef(ns, name), "err", err)
return nil, nil
case err != nil:

View File

@@ -889,7 +889,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
// induce disk pressure!
fakeClock.Step(1 * time.Minute)
summaryProvider.result = summaryStatsMaker("400Mi", "200Gi", podStats)
// Dont reclaim any disk
// Don't reclaim any disk
diskGC.summaryAfterGC = summaryStatsMaker("400Mi", "200Gi", podStats)
manager.synchronize(diskInfoProvider, activePodsFunc)

View File

@@ -464,7 +464,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
kubeInformers.Start(wait.NeverStop)
klog.InfoS("Kubelet client is not nil")
} else {
// we dont have a client to sync!
// we don't have a client to sync!
nodeIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
nodeLister = corelisters.NewNodeLister(nodeIndexer)
nodeHasSynced = func() bool { return true }

View File

@@ -144,7 +144,7 @@ func (p *Provider) RootFsStats() (*statsapi.FsStats, error) {
}
// Get the root container stats's timestamp, which will be used as the
// imageFs stats timestamp. Dont force a stats update, as we only want the timestamp.
// imageFs stats timestamp. Don't force a stats update, as we only want the timestamp.
rootStats, err := getCgroupStats(p.cadvisor, "/", false)
if err != nil {
return nil, fmt.Errorf("failed to get root container stats: %v", err)

View File

@@ -111,7 +111,7 @@ func (p *pvcEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.Scope
}
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *pvcEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil
}

View File

@@ -190,7 +190,7 @@ func (p *podEvaluator) MatchingScopes(item runtime.Object, scopeSelectors []core
}
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *podEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
uncoveredScopes := []corev1.ScopedResourceSelectorRequirement{}
for _, selector := range limitedScopes {

View File

@@ -90,7 +90,7 @@ func (p *serviceEvaluator) MatchingScopes(item runtime.Object, scopes []corev1.S
}
// UncoveredQuotaScopes takes the input matched scopes which are limited by configuration and the matched quota scopes.
// It returns the scopes which are in limited scopes but dont have a corresponding covering quota scope
// It returns the scopes which are in limited scopes but don't have a corresponding covering quota scope
func (p *serviceEvaluator) UncoveredQuotaScopes(limitedScopes []corev1.ScopedResourceSelectorRequirement, matchedQuotaScopes []corev1.ScopedResourceSelectorRequirement) ([]corev1.ScopedResourceSelectorRequirement, error) {
return []corev1.ScopedResourceSelectorRequirement{}, nil
}

View File

@@ -409,7 +409,7 @@ func (pl *ServiceAffinity) ScoreExtensions() framework.ScoreExtensions {
// addUnsetLabelsToMap backfills missing values with values we find in a map.
func addUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) {
for _, l := range labelsToAdd {
// if the label is already there, dont overwrite it.
// if the label is already there, don't overwrite it.
if _, exists := aL[l]; exists {
continue
}

View File

@@ -719,7 +719,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
scache := internalcache.New(10*time.Minute, stop)
// Design the baseline for the pods, and we will make nodes that dont fit it later.
// Design the baseline for the pods, and we will make nodes that don't fit it later.
var cpu = int64(4)
var mem = int64(500)
podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{

View File

@@ -117,7 +117,7 @@ func calculateEmptyDirMemorySize(nodeAllocatableMemory *resource.Quantity, spec
return sizeLimit
}
// size limit defaults to node allocatable (pods cant consume more memory than all pods)
// size limit defaults to node allocatable (pods can't consume more memory than all pods)
sizeLimit = nodeAllocatableMemory
zero := resource.MustParse("0")

View File

@@ -477,7 +477,7 @@ func (c *sioClient) WaitForDetachedDevice(token string) error {
go func() {
klog.V(4).Info(log("waiting for volume %s to be unmapped/detached", token))
}()
// cant find vol id, then ok.
// can't find vol id, then ok.
if _, ok := devMap[token]; !ok {
return nil
}

View File

@@ -43,7 +43,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/vsphere-volume/volumeDevices/
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("cant' make a temp dir: %s", err)
t.Fatalf("can't make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)
@@ -80,7 +80,7 @@ func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("vsphereBlockVolume")
if err != nil {
t.Fatalf("cant' make a temp dir: %s", err)
t.Fatalf("can't make a temp dir: %s", err)
}
// deferred clean up
defer os.RemoveAll(tmpVDir)