CLE controller and client changes
This commit is contained in:
@@ -28,8 +28,9 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
v1 "k8s.io/api/coordination/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@@ -78,7 +79,9 @@ import (
|
||||
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
|
||||
garbagecollector "k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -289,6 +292,30 @@ func Run(ctx context.Context, c *config.CompletedConfig) error {
|
||||
return startSATokenControllerInit(ctx, controllerContext, controllerName)
|
||||
}
|
||||
}
|
||||
ver, err := semver.ParseTolerant(version.Get().String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CoordinatedLeaderElection) {
|
||||
// Start component identity lease management
|
||||
leaseCandidate, err := leaderelection.NewCandidate(
|
||||
c.Client,
|
||||
id,
|
||||
"kube-system",
|
||||
"kube-controller-manager",
|
||||
clock.RealClock{},
|
||||
ver.FinalizeVersion(),
|
||||
ver.FinalizeVersion(), // TODO: Use compatibility version when it's available
|
||||
[]v1.CoordinatedLeaseStrategy{"OldestEmulationVersion"},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
healthzHandler.AddHealthChecker(healthz.NewInformerSyncHealthz(leaseCandidate.InformerFactory))
|
||||
|
||||
go leaseCandidate.Run(ctx)
|
||||
}
|
||||
|
||||
// Start the main lock
|
||||
go leaderElectAndRun(ctx, c, id, electionChecker,
|
||||
@@ -886,6 +913,7 @@ func leaderElectAndRun(ctx context.Context, c *config.CompletedConfig, lockIdent
|
||||
Callbacks: callbacks,
|
||||
WatchDog: electionChecker,
|
||||
Name: leaseName,
|
||||
Coordinated: utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CoordinatedLeaderElection),
|
||||
})
|
||||
|
||||
panic("unreachable")
|
||||
|
||||
@@ -24,8 +24,9 @@ import (
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
coordinationv1 "k8s.io/api/coordination/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
@@ -56,8 +57,11 @@ import (
|
||||
"k8s.io/component-base/version"
|
||||
"k8s.io/component-base/version/verflag"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
schedulerserverconfig "k8s.io/kubernetes/cmd/kube-scheduler/app/config"
|
||||
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/latest"
|
||||
@@ -207,6 +211,34 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched *
|
||||
})
|
||||
readyzChecks = append(readyzChecks, handlerSyncCheck)
|
||||
|
||||
if cc.LeaderElection != nil && utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CoordinatedLeaderElection) {
|
||||
binaryVersion, err := semver.ParseTolerant(utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent).BinaryVersion().String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
emulationVersion, err := semver.ParseTolerant(utilversion.DefaultComponentGlobalsRegistry.EffectiveVersionFor(utilversion.DefaultKubeComponent).EmulationVersion().String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start component identity lease management
|
||||
leaseCandidate, err := leaderelection.NewCandidate(
|
||||
cc.Client,
|
||||
cc.LeaderElection.Lock.Identity(),
|
||||
"kube-system",
|
||||
"kube-scheduler",
|
||||
clock.RealClock{},
|
||||
binaryVersion.FinalizeVersion(),
|
||||
emulationVersion.FinalizeVersion(),
|
||||
[]coordinationv1.CoordinatedLeaseStrategy{"OldestEmulationVersion"},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
readyzChecks = append(readyzChecks, healthz.NewInformerSyncHealthz(leaseCandidate.InformerFactory))
|
||||
go leaseCandidate.Run(ctx)
|
||||
}
|
||||
|
||||
// Start up the healthz server.
|
||||
if cc.SecureServing != nil {
|
||||
handler := buildHandlerChain(newHealthEndpointsAndMetricsHandler(&cc.ComponentConfig, cc.InformerFactory, isLeader, checks, readyzChecks), cc.Authentication.Authenticator, cc.Authorization.Authorizer)
|
||||
@@ -245,6 +277,9 @@ func Run(ctx context.Context, cc *schedulerserverconfig.CompletedConfig, sched *
|
||||
}
|
||||
// If leader election is enabled, runCommand via LeaderElector until done and exit.
|
||||
if cc.LeaderElection != nil {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CoordinatedLeaderElection) {
|
||||
cc.LeaderElection.Coordinated = true
|
||||
}
|
||||
cc.LeaderElection.Callbacks = leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
close(waitingForLeader)
|
||||
|
||||
Reference in New Issue
Block a user