Fix indentation/spacing in comments to render correctly in godoc

This commit is contained in:
Jordan Liggitt
2022-12-17 17:31:05 -05:00
parent 2ca74f2885
commit 78cb3862f1
59 changed files with 163 additions and 216 deletions

View File

@@ -274,8 +274,9 @@ type CertificateSigningRequestList struct {
}
// KeyUsage specifies valid usage contexts for keys.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// See:
//
// https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
//
// +enum

View File

@@ -124,8 +124,10 @@ message CertificateSigningRequestSpec {
// allowedUsages specifies a set of usage contexts the key will be
// valid for.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
// See:
// https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
//
// Valid values are:
// "signing",
// "digital signature",

View File

@@ -89,8 +89,10 @@ type CertificateSigningRequestSpec struct {
// allowedUsages specifies a set of usage contexts the key will be
// valid for.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
// See:
// https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
//
// Valid values are:
// "signing",
// "digital signature",
@@ -229,8 +231,9 @@ type CertificateSigningRequestList struct {
}
// KeyUsages specifies valid usage contexts for keys.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// See:
//
// https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
type KeyUsage string

View File

@@ -55,7 +55,7 @@ var map_CertificateSigningRequestSpec = map[string]string{
"request": "Base64-encoded PKCS#10 CSR data",
"signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.",
"expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.",
"usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"",
"usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See:\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.3\n\thttps://tools.ietf.org/html/rfc5280#section-4.2.1.12\n\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"",
"username": "Information about the requesting user. See user.Info interface for details.",
"uid": "UID information about the requesting user. See user.Info interface for details.",
"groups": "Group information about the requesting user. See user.Info interface for details.",

View File

@@ -28,15 +28,13 @@ func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool {
// ToleratesTaint checks if the toleration tolerates the taint.
// The matching follows the rules below:
// (1) Empty toleration.effect means to match all taint effects,
//
// otherwise taint effect must equal to toleration.effect.
//
// (2) If toleration.operator is 'Exists', it means to match all taint values.
// (3) Empty toleration.key means to match all taint keys.
//
// If toleration.key is empty, toleration.operator must be 'Exists';
// this combination means to match all taint values and all taint keys.
// 1. Empty toleration.effect means to match all taint effects,
// otherwise taint effect must equal to toleration.effect.
// 2. If toleration.operator is 'Exists', it means to match all taint values.
// 3. Empty toleration.key means to match all taint keys.
// If toleration.key is empty, toleration.operator must be 'Exists';
// this combination means to match all taint values and all taint keys.
func (t *Toleration) ToleratesTaint(taint *Taint) bool {
if len(t.Effect) > 0 && t.Effect != taint.Effect {
return false

View File

@@ -96,7 +96,7 @@ func getBaseEnv() (*cel.Env, error) {
// CompilationResult for each ValidationRule, or an error. declType is expected to be a CEL DeclType corresponding
// to the structural schema.
// Each CompilationResult may contain:
// / - non-nil Program, nil Error: The program was compiled successfully
// - non-nil Program, nil Error: The program was compiled successfully
// - nil Program, non-nil Error: Compilation resulted in an error
// - nil Program, nil Error: The provided rule was empty so compilation was not attempted
//

View File

@@ -69,9 +69,8 @@ func NewDefaultTestServerOptions() *TestServerInstanceOptions {
// and location of the tmpdir are returned.
//
// Note: we return a tear-down func instead of a stop channel because the later will leak temporary
//
// files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
// files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
func StartTestServer(t Logger, _ *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) {
stopCh := make(chan struct{})
var errCh chan error

View File

@@ -40,8 +40,7 @@ var (
// IsListType returns true if the provided Object has a slice called Items.
// TODO: Replace the code in this check with an interface comparison by
//
// creating and enforcing that lists implement a list accessor.
// creating and enforcing that lists implement a list accessor.
func IsListType(obj runtime.Object) bool {
switch t := obj.(type) {
case runtime.Unstructured:

View File

@@ -149,14 +149,12 @@ type Requirement struct {
// NewRequirement is the constructor for a Requirement.
// If any of these rules is violated, an error is returned:
// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist.
// (2) If the operator is In or NotIn, the values set must be non-empty.
// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
// (4) If the operator is Exists or DoesNotExist, the value set must be empty.
// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
// (6) The key is invalid due to its length, or sequence
//
// of characters. See validateLabelKey for more details.
// 1. The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist.
// 2. If the operator is In or NotIn, the values set must be non-empty.
// 3. If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
// 4. If the operator is Exists or DoesNotExist, the value set must be empty.
// 5. If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer.
// 6. The key is invalid due to its length, or sequence of characters. See validateLabelKey for more details.
//
// The empty string is a valid value in the input values set.
// Returned error, if not nil, is guaranteed to be an aggregated field.ErrorList
@@ -213,22 +211,15 @@ func (r *Requirement) hasValue(value string) bool {
// Matches returns true if the Requirement matches the input Labels.
// There is a match in the following cases:
// (1) The operator is Exists and Labels has the Requirement's key.
// (2) The operator is In, Labels has the Requirement's key and Labels'
//
// value for that key is in Requirement's value set.
//
// (3) The operator is NotIn, Labels has the Requirement's key and
//
// Labels' value for that key is not in Requirement's value set.
//
// (4) The operator is DoesNotExist or NotIn and Labels does not have the
//
// Requirement's key.
//
// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has
//
// the Requirement's key and the corresponding value satisfies mathematical inequality.
// 1. The operator is Exists and Labels has the Requirement's key.
// 2. The operator is In, Labels has the Requirement's key and Labels'
// value for that key is in Requirement's value set.
// 3. The operator is NotIn, Labels has the Requirement's key and
// Labels' value for that key is not in Requirement's value set.
// 4. The operator is DoesNotExist or NotIn and Labels does not have the
// Requirement's key.
// 5. The operator is GreaterThanOperator or LessThanOperator, and Labels has
// the Requirement's key and the corresponding value satisfies mathematical inequality.
func (r *Requirement) Matches(ls Labels) bool {
switch r.operator {
case selection.In, selection.Equals, selection.DoubleEquals:
@@ -872,15 +863,14 @@ func (p *Parser) parseExactValue() (sets.String, error) {
// "x in (foo,,baz),y,z notin ()"
//
// Note:
//
// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the
// VALUEs in its requirement
// (2) Exclusion - " notin " - denotes that the KEY is not equal to any
// of the VALUEs in its requirement or does not exist
// (3) The empty string is a valid VALUE
// (4) A requirement with just a KEY - as in "y" above - denotes that
// the KEY exists and can be any VALUE.
// (5) A requirement with just !KEY requires that the KEY not exist.
// 1. Inclusion - " in " - denotes that the KEY exists and is equal to any of the
// VALUEs in its requirement
// 2. Exclusion - " notin " - denotes that the KEY is not equal to any
// of the VALUEs in its requirement or does not exist
// 3. The empty string is a valid VALUE
// 4. A requirement with just a KEY - as in "y" above - denotes that
// the KEY exists and can be any VALUE.
// 5. A requirement with just !KEY requires that the KEY not exist.
func Parse(selector string, opts ...field.PathOption) (Selector, error) {
parsedSelector, err := parse(selector, field.ToPath(opts...))
if err == nil {

View File

@@ -191,8 +191,7 @@ func (gv GroupVersion) Identifier() string {
// if none of the options match the group. It prefers a match to group and version over just group.
// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme.
// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
//
// in fewer places.
// in fewer places.
func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) {
for _, gvk := range kinds {
if gvk.Group == gv.Group && gvk.Version == gv.Version {
@@ -240,8 +239,7 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
// GroupVersions can be used to represent a set of desired group versions.
// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme.
// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion)
//
// in fewer places.
// in fewer places.
type GroupVersions []GroupVersion
// Identifier implements runtime.GroupVersioner interface.

View File

@@ -118,8 +118,7 @@ func (s *Scheme) Converter() *conversion.Converter {
// API group and version that would never be updated.
//
// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into
//
// every version with particular schemas. Resolve this method at that point.
// every version with particular schemas. Resolve this method at that point.
func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) {
s.addObservedVersion(version)
s.AddKnownTypes(version, types...)

View File

@@ -259,8 +259,7 @@ func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder().
//
// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions.
//
// All other callers will be forced to request a Codec directly.
// All other callers will be forced to request a Codec directly.
func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec {
return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner)
}

View File

@@ -88,8 +88,7 @@ func toYAML(v interface{}) (string, error) {
// supports JSON merge patch semantics.
//
// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts.
//
// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
func HasConflicts(left, right interface{}) (bool, error) {
switch typedLeft := left.(type) {
case map[string]interface{}:

View File

@@ -35,8 +35,7 @@ import (
// - Replies with 304 Not Modified, if If-None-Match header matches hash
//
// hash should be the value of calculateETag on object. If hash is empty, then
//
// the object is simply serialized without E-Tag functionality
// the object is simply serialized without E-Tag functionality
func ServeHTTPWithETag(
object runtime.Object,
hash string,

View File

@@ -38,9 +38,8 @@ import (
// DelegatingAuthorizationOptions provides an easy way for composing API servers to delegate their authorization to
// the root kube API server.
// WARNING: never assume that every authenticated incoming request already does authorization.
//
// The aggregator in the kube API server does this today, but this behaviour is not
// guaranteed in the future.
// The aggregator in the kube API server does this today, but this behaviour is not
// guaranteed in the future.
type DelegatingAuthorizationOptions struct {
// RemoteKubeConfigFile is the file to use to connect to a "normal" kube API server which hosts the
// SubjectAccessReview.authorization.k8s.io endpoint for checking tokens.

View File

@@ -47,8 +47,7 @@ func NewETCDLatencyTracker(delegate clientv3.KV) clientv3.KV {
// tracking function TrackStorageLatency is thread safe.
//
// NOTE: Compact is an asynchronous process and is not associated with
//
// any request, so we will not be tracking its latency.
// any request, so we will not be tracking its latency.
type clientV3KVLatencyTracker struct {
clientv3.KV
}

View File

@@ -108,11 +108,12 @@ const epsilon = 0.0000001
// if possible otherwise returns an error saying why it is impossible.
// `allocs` sums to `requiredSum`.
// For each J in [0, len(classes)):
// (1) `classes[J].lowerBound <= allocs[J] <= classes[J].upperBound` and
// (2) exactly one of the following is true:
// (2a) `allocs[J] == fairProp * classes[J].target`,
// (2b) `allocs[J] == classes[J].lowerBound && classes[J].lowerBound > fairProp * classes[J].target`, or
// (2c) `allocs[J] == classes[J].upperBound && classes[J].upperBound < fairProp * classes[J].target`.
// 1. `classes[J].lowerBound <= allocs[J] <= classes[J].upperBound` and
// 2. exactly one of the following is true:
// 2a. `allocs[J] == fairProp * classes[J].target`,
// 2b. `allocs[J] == classes[J].lowerBound && classes[J].lowerBound > fairProp * classes[J].target`, or
// 2c. `allocs[J] == classes[J].upperBound && classes[J].upperBound < fairProp * classes[J].target`.
//
// Each allocProblemItem is required to have `target >= lowerBound >= 0` and `upperBound >= lowerBound`.
// A target smaller than MinTarget is treated as if it were MinTarget.
func computeConcurrencyAllocation(requiredSum int, classes []allocProblemItem) ([]float64, float64, error) {

View File

@@ -40,8 +40,7 @@ type waitGroupCounter struct {
}
// compile time assertion that waitGroupCounter meets requirements
//
// of GoRoutineCounter
// of GoRoutineCounter
var _ counter.GoRoutineCounter = (*waitGroupCounter)(nil)
func (wgc *waitGroupCounter) Add(delta int) {

View File

@@ -195,9 +195,8 @@ func (w *watchTracker) forgetWatch(identifier *watchIdentifier, index *indexValu
// GetInterestedWatchCount implements WatchTracker interface.
//
// TODO(wojtek-t): As of now, requestInfo for object creation (POST) doesn't
//
// contain the Name field set. Figure out if we can somehow get it for the
// more accurate cost estimation.
// contain the Name field set. Figure out if we can somehow get it for the
// more accurate cost estimation.
//
// TODO(wojtek-t): Figure out how to approach DELETECOLLECTION calls.
func (w *watchTracker) GetInterestedWatchCount(requestInfo *request.RequestInfo) int {

View File

@@ -82,8 +82,7 @@ func (e *events) UpdateWithEventNamespace(event *v1beta1.Event) (*v1beta1.Event,
// It returns the copy of the event that the server returns, or an error.
// The namespace and name of the target event is deduced from the event.
// The namespace must either match this event client's namespace, or this event client must
//
// have been created with the "" namespace.
// have been created with the "" namespace.
func (e *events) PatchWithEventNamespace(event *v1beta1.Event, data []byte) (*v1beta1.Event, error) {
if e.ns != "" && event.Namespace != e.ns {
return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns)

View File

@@ -52,8 +52,7 @@ type Interface interface {
// ClientContentConfig controls how RESTClient communicates with the server.
//
// TODO: ContentConfig will be updated to accept a Negotiator instead of a
//
// NegotiatedSerializer and NegotiatedSerializer will be removed.
// NegotiatedSerializer and NegotiatedSerializer will be removed.
type ClientContentConfig struct {
// AcceptContentTypes specifies the types the client will accept and is optional.
// If not set, ContentType will be used to define the Accept header

View File

@@ -101,8 +101,7 @@ func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions
// It guarantees you to see all events and in the order they happened.
// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case.
// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing
//
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.)
//
// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges").
func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) {

View File

@@ -41,9 +41,9 @@ type FakeOpenAPIServer struct {
// API server.
//
// specsPath - Give a path to some test data organized so that each GroupVersion
// has its own OpenAPI V3 JSON file.
//
// has its own OpenAPI V3 JSON file.
// i.e. apps/v1beta1 is stored in <specsPath>/apps/v1beta1.json
// i.e. apps/v1beta1 is stored in <specsPath>/apps/v1beta1.json
func NewFakeOpenAPIV3Server(specsPath string) (*FakeOpenAPIServer, error) {
mux := &testMux{
counts: map[string]int{},

View File

@@ -60,9 +60,8 @@ type Logger interface {
// and location of the tmpdir are returned.
//
// Note: we return a tear-down func instead of a stop channel because the later will leak temporary
//
// files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
// files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) {
stopCh := make(chan struct{})
var errCh chan error

View File

@@ -109,8 +109,7 @@ func RewriteGeneratedGogoProtobufFile(name string, extractFn ExtractFunc, option
// as being "optional" (they may be nil on the wire). This allows protobuf to serialize a map or slice and
// properly discriminate between empty and nil (which is not possible in protobuf).
// TODO: move into upstream gogo-protobuf once https://github.com/gogo/protobuf/issues/181
//
// has agreement
// has agreement
func rewriteOptionalMethods(decl ast.Decl, isOptional OptionalFunc) {
switch t := decl.(type) {
case *ast.FuncDecl:

View File

@@ -97,9 +97,8 @@ func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) {
// 2. if the metric is manually disabled via a CLI flag.
//
// Disclaimer: disabling a metric via a CLI flag has higher precedence than
//
// deprecation and will override show-hidden-metrics for the explicitly
// disabled metric.
// deprecation and will override show-hidden-metrics for the explicitly
// disabled metric.
func (r *lazyMetric) preprocessMetric(version semver.Version) {
disabledMetricsLock.RLock()
defer disabledMetricsLock.RUnlock()

View File

@@ -60,8 +60,7 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp.
//
// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(),
//
// otherwise, no stability guarantees would be offered.
// otherwise, no stability guarantees would be offered.
func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric {
if m == nil {
return nil

View File

@@ -19,8 +19,7 @@ package config
import internal "k8s.io/controller-manager/config"
// DefaultLeaderMigrationConfiguration returns the default LeaderMigrationConfiguration
//
// that is valid for this release of Kubernetes.
// that is valid for this release of Kubernetes.
func DefaultLeaderMigrationConfiguration() *internal.LeaderMigrationConfiguration {
return &internal.LeaderMigrationConfiguration{
LeaderName: "cloud-provider-extraction-migration",

View File

@@ -31,9 +31,8 @@ type LeaderMigrator struct {
}
// NewLeaderMigrator creates a LeaderMigrator with given config for the given component. component
//
// indicates which controller manager is requesting this leader migration, and it should be consistent
// with the component field of ControllerLeaderConfiguration.
// indicates which controller manager is requesting this leader migration, and it should be consistent
// with the component field of ControllerLeaderConfiguration.
func NewLeaderMigrator(config *internal.LeaderMigrationConfiguration, component string) *LeaderMigrator {
migratedControllers := make(map[string]bool)
for _, leader := range config.ControllerLeaders {

View File

@@ -175,8 +175,7 @@ const ServiceAnnotationLoadBalancerSSLNegotiationPolicy = "service.beta.kubernet
// ServiceAnnotationLoadBalancerBEProtocol is the annotation used on the service
// to specify the protocol spoken by the backend (pod) behind a listener.
// If `http` (default) or `https`, an HTTPS listener that terminates the
//
// connection and parses headers is created.
// connection and parses headers is created.
//
// If set to `ssl` or `tcp`, a "raw" SSL listener is used.
// If set to `http` and `aws-load-balancer-ssl-cert` is not used then

View File

@@ -1244,8 +1244,7 @@ func (c *Cloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBala
// syncElbListeners computes a plan to reconcile the desired vs actual state of the listeners on an ELB
// NOTE: there exists an O(nlgn) implementation for this function. However, as the default limit of
//
// listeners per elb is 100, this implementation is reduced from O(m*n) => O(n).
// listeners per elb is 100, this implementation is reduced from O(m*n) => O(n).
func syncElbListeners(loadBalancerName string, listeners []*elb.Listener, listenerDescriptions []*elb.ListenerDescription) ([]*elb.Listener, []*int64) {
foundSet := make(map[int]bool)
removals := []*int64{}