Generate and format files

- Run hack/update-codegen.sh
- Run hack/update-generated-device-plugin.sh
- Run hack/update-generated-protobuf.sh
- Run hack/update-generated-runtime.sh
- Run hack/update-generated-swagger-docs.sh
- Run hack/update-openapi-spec.sh
- Run hack/update-gofmt.sh

Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
Davanum Srinivas 2022-07-19 20:54:13 -04:00
parent 4784b58e17
commit a9593d634c
No known key found for this signature in database
GPG Key ID: 80D83A796103BF59
451 changed files with 3281 additions and 2918 deletions

View File

@ -1217,7 +1217,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.apps.v1.StatefulSet": { "io.k8s.api.apps.v1.StatefulSet": {
"description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
@ -5247,7 +5247,7 @@
"x-kubernetes-map-type": "atomic" "x-kubernetes-map-type": "atomic"
}, },
"io.k8s.api.core.v1.EndpointSubset": { "io.k8s.api.core.v1.EndpointSubset": {
"description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]",
"properties": { "properties": {
"addresses": { "addresses": {
"description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", "description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
@ -5274,7 +5274,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.core.v1.Endpoints": { "io.k8s.api.core.v1.Endpoints": {
"description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
@ -7631,7 +7631,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.core.v1.PodIP": { "io.k8s.api.core.v1.PodIP": {
"description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", "description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.",
"properties": { "properties": {
"ip": { "ip": {
"description": "ip is an IP address (IPv4 or IPv6) assigned to the pod", "description": "ip is an IP address (IPv4 or IPv6) assigned to the pod",
@ -10521,7 +10521,7 @@
] ]
}, },
"io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration": { "io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration": {
"description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
"properties": { "properties": {
"assuredConcurrencyShares": { "assuredConcurrencyShares": {
"description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.",
@ -11070,7 +11070,7 @@
] ]
}, },
"io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration": { "io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration": {
"description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
"properties": { "properties": {
"assuredConcurrencyShares": { "assuredConcurrencyShares": {
"description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.", "description": "`assuredConcurrencyShares` (ACS) configures the execution limit, which is a limit on the number of requests of this priority level that may be exeucting at a given time. ACS must be a positive number. The server's concurrency limit (SCL) is divided among the concurrency-controlled priority levels in proportion to their assured concurrency shares. This produces the assured concurrency value (ACV) --- the number of requests that may be executing at a time --- for each such priority level:\n\n ACV(l) = ceil( SCL * ACS(l) / ( sum[priority levels k] ACS(k) ) )\n\nbigger numbers of ACS mean more reserved concurrent requests (at the expense of every other PL). This field has a default value of 30.",
@ -14062,7 +14062,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"type": "string" "type": "string"
}, },
"io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": { "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": {
@ -15292,7 +15292,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "io.k8s.apimachinery.pkg.util.intstr.IntOrString": {

View File

@ -1621,7 +1621,7 @@
"x-kubernetes-map-type": "atomic" "x-kubernetes-map-type": "atomic"
}, },
"io.k8s.api.core.v1.EndpointSubset": { "io.k8s.api.core.v1.EndpointSubset": {
"description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]",
"properties": { "properties": {
"addresses": { "addresses": {
"description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", "description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.",
@ -1663,7 +1663,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.core.v1.Endpoints": { "io.k8s.api.core.v1.Endpoints": {
"description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
@ -4786,7 +4786,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.core.v1.PodIP": { "io.k8s.api.core.v1.PodIP": {
"description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", "description": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.",
"properties": { "properties": {
"ip": { "ip": {
"description": "ip is an IP address (IPv4 or IPv6) assigned to the pod", "description": "ip is an IP address (IPv4 or IPv6) assigned to the pod",
@ -7779,7 +7779,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -8953,7 +8953,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "io.k8s.apimachinery.pkg.util.intstr.IntOrString": {

View File

@ -1547,7 +1547,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1879,7 +1879,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -915,7 +915,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.api.apps.v1.StatefulSet": { "io.k8s.api.apps.v1.StatefulSet": {
"description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
"properties": { "properties": {
"apiVersion": { "apiVersion": {
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
@ -4824,7 +4824,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -5946,7 +5946,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "io.k8s.apimachinery.pkg.util.intstr.IntOrString": {

View File

@ -1249,7 +1249,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -837,7 +837,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -1959,7 +1959,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -828,7 +828,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -1950,7 +1950,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -3903,7 +3903,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -5025,7 +5025,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "io.k8s.apimachinery.pkg.util.intstr.IntOrString": {

View File

@ -1287,7 +1287,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1181,7 +1181,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1339,7 +1339,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1303,7 +1303,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -251,7 +251,7 @@
] ]
}, },
"io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration": { "io.k8s.api.flowcontrol.v1beta1.LimitedPriorityLevelConfiguration": {
"description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
"properties": { "properties": {
"assuredConcurrencyShares": { "assuredConcurrencyShares": {
"default": 0, "default": 0,
@ -1749,7 +1749,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -251,7 +251,7 @@
] ]
}, },
"io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration": { "io.k8s.api.flowcontrol.v1beta2.LimitedPriorityLevelConfiguration": {
"description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
"properties": { "properties": {
"assuredConcurrencyShares": { "assuredConcurrencyShares": {
"default": 0, "default": 0,
@ -1749,7 +1749,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1266,7 +1266,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1983,7 +1983,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "io.k8s.apimachinery.pkg.util.intstr.IntOrString": {

View File

@ -174,7 +174,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -1239,7 +1239,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1355,7 +1355,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.util.intstr.IntOrString": { "io.k8s.apimachinery.pkg.util.intstr.IntOrString": {

View File

@ -1616,7 +1616,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1152,7 +1152,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -1921,7 +1921,7 @@
"type": "object" "type": "object"
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -3043,7 +3043,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -113,7 +113,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.api.resource.Quantity": { "io.k8s.apimachinery.pkg.api.resource.Quantity": {
"description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
"oneOf": [ "oneOf": [
{ {
"type": "string" "type": "string"
@ -1235,7 +1235,7 @@
] ]
}, },
"io.k8s.apimachinery.pkg.runtime.RawExtension": { "io.k8s.apimachinery.pkg.runtime.RawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
"type": "object" "type": "object"
} }
}, },

View File

@ -94,8 +94,9 @@ func NewDefaultTestServerOptions() *TestServerInstanceOptions {
// and location of the tmpdir are returned. // and location of the tmpdir are returned.
// //
// Note: we return a tear-down func instead of a stop channel because the later will leak temporary // Note: we return a tear-down func instead of a stop channel because the later will leak temporary
// files that because Golang testing's call to os.Exit will not give a stop channel go routine //
// enough time to remove temporary files. // files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) { func StartTestServer(t Logger, instanceOptions *TestServerInstanceOptions, customFlags []string, storageConfig *storagebackend.Config) (result TestServer, err error) {
if instanceOptions == nil { if instanceOptions == nil {
instanceOptions = NewDefaultTestServerOptions() instanceOptions = NewDefaultTestServerOptions()

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (
@ -381,7 +380,8 @@ func (c ControllerContext) IsControllerEnabled(name string) bool {
type InitFunc func(ctx context.Context, controllerCtx ControllerContext) (controller controller.Interface, enabled bool, err error) type InitFunc func(ctx context.Context, controllerCtx ControllerContext) (controller controller.Interface, enabled bool, err error)
// ControllerInitializersFunc is used to create a collection of initializers // ControllerInitializersFunc is used to create a collection of initializers
// given the loopMode. //
// given the loopMode.
type ControllerInitializersFunc func(loopMode ControllerLoopMode) (initializers map[string]InitFunc) type ControllerInitializersFunc func(loopMode ControllerLoopMode) (initializers map[string]InitFunc)
var _ ControllerInitializersFunc = NewControllerInitializers var _ ControllerInitializersFunc = NewControllerInitializers
@ -727,7 +727,8 @@ func leaderElectAndRun(c *config.CompletedConfig, lockIdentity string, electionC
} }
// createInitializersFunc creates a initializersFunc that returns all initializer // createInitializersFunc creates a initializersFunc that returns all initializer
// with expected as the result after filtering through filterFunc. //
// with expected as the result after filtering through filterFunc.
func createInitializersFunc(filterFunc leadermigration.FilterFunc, expected leadermigration.FilterResult) ControllerInitializersFunc { func createInitializersFunc(filterFunc leadermigration.FilterFunc, expected leadermigration.FilterResult) ControllerInitializersFunc {
return func(loopMode ControllerLoopMode) map[string]InitFunc { return func(loopMode ControllerLoopMode) map[string]InitFunc {
initializers := make(map[string]InitFunc) initializers := make(map[string]InitFunc)

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (

View File

@ -15,7 +15,6 @@ limitations under the License.
*/ */
// Package options provides the flags used for the controller manager. // Package options provides the flags used for the controller manager.
//
package options package options
import ( import (

View File

@ -17,7 +17,6 @@ limitations under the License.
// Package app implements a server that runs a set of active // Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and // components. This includes replication controllers, service endpoints and
// nodes. // nodes.
//
package app package app
import ( import (

View File

@ -58,8 +58,9 @@ type Logger interface {
// and location of the tmpdir are returned. // and location of the tmpdir are returned.
// //
// Note: we return a tear-down func instead of a stop channel because the later will leak temporary // Note: we return a tear-down func instead of a stop channel because the later will leak temporary
// files that because Golang testing's call to os.Exit will not give a stop channel go routine //
// enough time to remove temporary files. // files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) { func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) {
stopCh := make(chan struct{}) stopCh := make(chan struct{})
var errCh chan error var errCh chan error

View File

@ -59,8 +59,9 @@ type Logger interface {
// and location of the tmpdir are returned. // and location of the tmpdir are returned.
// //
// Note: we return a tear-down func instead of a stop channel because the later will leak temporary // Note: we return a tear-down func instead of a stop channel because the later will leak temporary
// files that because Golang testing's call to os.Exit will not give a stop channel go routine //
// enough time to remove temporary files. // files that because Golang testing's call to os.Exit will not give a stop channel go routine
// enough time to remove temporary files.
func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) { func StartTestServer(t Logger, customFlags []string) (result TestServer, err error) {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())

View File

@ -25,22 +25,23 @@ limitations under the License.
// This version improves on the v1beta1 format by fixing some minor issues and adding a few new fields. // This version improves on the v1beta1 format by fixing some minor issues and adding a few new fields.
// //
// A list of changes since v1beta1: // A list of changes since v1beta1:
// - "certificateKey" field is added to InitConfiguration and JoinConfiguration. // - "certificateKey" field is added to InitConfiguration and JoinConfiguration.
// - "ignorePreflightErrors" field is added to the NodeRegistrationOptions. // - "ignorePreflightErrors" field is added to the NodeRegistrationOptions.
// - The JSON "omitempty" tag is used in a more places where appropriate. // - The JSON "omitempty" tag is used in a more places where appropriate.
// - The JSON "omitempty" tag of the "taints" field (inside NodeRegistrationOptions) is removed. // - The JSON "omitempty" tag of the "taints" field (inside NodeRegistrationOptions) is removed.
// See the Kubernetes 1.15 changelog for further details. // See the Kubernetes 1.15 changelog for further details.
// //
// Migration from old kubeadm config versions // # Migration from old kubeadm config versions
// //
// Please convert your v1beta1 configuration files to v1beta2 using the "kubeadm config migrate" command of kubeadm v1.15.x // Please convert your v1beta1 configuration files to v1beta2 using the "kubeadm config migrate" command of kubeadm v1.15.x
// (conversion from older releases of kubeadm config files requires older release of kubeadm as well e.g. // (conversion from older releases of kubeadm config files requires older release of kubeadm as well e.g.
//
// kubeadm v1.11 should be used to migrate v1alpha1 to v1alpha2; kubeadm v1.12 should be used to translate v1alpha2 to v1alpha3; // kubeadm v1.11 should be used to migrate v1alpha1 to v1alpha2; kubeadm v1.12 should be used to translate v1alpha2 to v1alpha3;
// kubeadm v1.13 or v1.14 should be used to translate v1alpha3 to v1beta1) // kubeadm v1.13 or v1.14 should be used to translate v1alpha3 to v1beta1)
// //
// Nevertheless, kubeadm v1.15.x will support reading from v1beta1 version of the kubeadm config file format. // Nevertheless, kubeadm v1.15.x will support reading from v1beta1 version of the kubeadm config file format.
// //
// Basics // # Basics
// //
// The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the // The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the
// configuration options defined in the kubeadm config file are also available as command line flags, but only // configuration options defined in the kubeadm config file are also available as command line flags, but only
@ -50,24 +51,25 @@ limitations under the License.
// //
// kubeadm supports the following configuration types: // kubeadm supports the following configuration types:
// //
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: InitConfiguration // kind: InitConfiguration
// //
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: ClusterConfiguration // kind: ClusterConfiguration
// //
// apiVersion: kubelet.config.k8s.io/v1beta1 // apiVersion: kubelet.config.k8s.io/v1beta1
// kind: KubeletConfiguration // kind: KubeletConfiguration
// //
// apiVersion: kubeproxy.config.k8s.io/v1alpha1 // apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration // kind: KubeProxyConfiguration
// //
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: JoinConfiguration // kind: JoinConfiguration
// //
// To print the defaults for "init" and "join" actions use the following commands: // To print the defaults for "init" and "join" actions use the following commands:
// kubeadm config print init-defaults //
// kubeadm config print join-defaults // kubeadm config print init-defaults
// kubeadm config print join-defaults
// //
// The list of configuration types that must be included in a configuration file depends by the action you are // The list of configuration types that must be included in a configuration file depends by the action you are
// performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). // performing (init or join) and by the configuration options you are going to use (defaults or advanced customization).
@ -82,18 +84,18 @@ limitations under the License.
// If the user provides a configuration types that is not expected for the action you are performing, kubeadm will // If the user provides a configuration types that is not expected for the action you are performing, kubeadm will
// ignore those types and print a warning. // ignore those types and print a warning.
// //
// Kubeadm init configuration types // # Kubeadm init configuration types
// //
// When executing kubeadm init with the --config option, the following configuration types could be used: // When executing kubeadm init with the --config option, the following configuration types could be used:
// InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one // InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one
// between InitConfiguration and ClusterConfiguration is mandatory. // between InitConfiguration and ClusterConfiguration is mandatory.
// //
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: InitConfiguration // kind: InitConfiguration
// bootstrapTokens: // bootstrapTokens:
// ... // ...
// nodeRegistration: // nodeRegistration:
// ... // ...
// //
// The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init // The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init
// are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm // are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm
@ -106,18 +108,18 @@ limitations under the License.
// - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node; // - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node;
// use it e.g. to customize the API server advertise address. // use it e.g. to customize the API server advertise address.
// //
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: ClusterConfiguration // kind: ClusterConfiguration
// networking: // networking:
// ... // ...
// etcd: // etcd:
// ... // ...
// apiServer: // apiServer:
// extraArgs: // extraArgs:
// ... // ...
// extraVolumes: // extraVolumes:
// ... // ...
// ... // ...
// //
// The ClusterConfiguration type should be used to configure cluster-wide settings, // The ClusterConfiguration type should be used to configure cluster-wide settings,
// including settings for: // including settings for:
@ -131,9 +133,9 @@ limitations under the License.
// - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane // - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane
// components by adding customized setting or overriding kubeadm default settings. // components by adding customized setting or overriding kubeadm default settings.
// //
// apiVersion: kubeproxy.config.k8s.io/v1alpha1 // apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration // kind: KubeProxyConfiguration
// ... // ...
// //
// The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed // The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed
// in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. // in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults.
@ -141,9 +143,9 @@ limitations under the License.
// See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://pkg.go.dev/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration // See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://pkg.go.dev/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
// for kube proxy official documentation. // for kube proxy official documentation.
// //
// apiVersion: kubelet.config.k8s.io/v1beta1 // apiVersion: kubelet.config.k8s.io/v1beta1
// kind: KubeletConfiguration // kind: KubeletConfiguration
// ... // ...
// //
// The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances // The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances
// deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. // deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults.
@ -154,113 +156,113 @@ limitations under the License.
// Here is a fully populated example of a single YAML file containing multiple // Here is a fully populated example of a single YAML file containing multiple
// configuration types to be used during a `kubeadm init` run. // configuration types to be used during a `kubeadm init` run.
// //
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: InitConfiguration // kind: InitConfiguration
// bootstrapTokens: // bootstrapTokens:
// - token: "9a08jv.c0izixklcxtmnze7" // - token: "9a08jv.c0izixklcxtmnze7"
// description: "kubeadm bootstrap token" // description: "kubeadm bootstrap token"
// ttl: "24h" // ttl: "24h"
// - token: "783bde.3f89s0fje9f38fhf" // - token: "783bde.3f89s0fje9f38fhf"
// description: "another bootstrap token" // description: "another bootstrap token"
// usages: // usages:
// - authentication // - authentication
// - signing // - signing
// groups: // groups:
// - system:bootstrappers:kubeadm:default-node-token // - system:bootstrappers:kubeadm:default-node-token
// nodeRegistration: // nodeRegistration:
// name: "ec2-10-100-0-1" // name: "ec2-10-100-0-1"
// criSocket: "unix:///var/run/containerd/containerd.sock" // criSocket: "unix:///var/run/containerd/containerd.sock"
// taints: // taints:
// - key: "kubeadmNode" // - key: "kubeadmNode"
// value: "someValue" // value: "someValue"
// effect: "NoSchedule" // effect: "NoSchedule"
// kubeletExtraArgs: // kubeletExtraArgs:
// v: 4 // v: 4
// ignorePreflightErrors: // ignorePreflightErrors:
// - IsPrivilegedUser // - IsPrivilegedUser
// localAPIEndpoint: // localAPIEndpoint:
// advertiseAddress: "10.100.0.1" // advertiseAddress: "10.100.0.1"
// bindPort: 6443 // bindPort: 6443
// certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" // certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204"
// --- // ---
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: ClusterConfiguration // kind: ClusterConfiguration
// etcd: // etcd:
// # one of local or external // # one of local or external
// local: // local:
// imageRepository: "registry.k8s.io" // imageRepository: "registry.k8s.io"
// imageTag: "3.2.24" // imageTag: "3.2.24"
// dataDir: "/var/lib/etcd" // dataDir: "/var/lib/etcd"
// extraArgs: // extraArgs:
// listen-client-urls: "http://10.100.0.1:2379" // listen-client-urls: "http://10.100.0.1:2379"
// serverCertSANs: // serverCertSANs:
// - "ec2-10-100-0-1.compute-1.amazonaws.com" // - "ec2-10-100-0-1.compute-1.amazonaws.com"
// peerCertSANs: // peerCertSANs:
// - "10.100.0.1" // - "10.100.0.1"
// # external: // # external:
// # endpoints: // # endpoints:
// # - "10.100.0.1:2379" // # - "10.100.0.1:2379"
// # - "10.100.0.2:2379" // # - "10.100.0.2:2379"
// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" // # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt"
// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" // # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt"
// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" // # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key"
// networking: // networking:
// serviceSubnet: "10.96.0.0/16" // serviceSubnet: "10.96.0.0/16"
// podSubnet: "10.244.0.0/24" // podSubnet: "10.244.0.0/24"
// dnsDomain: "cluster.local" // dnsDomain: "cluster.local"
// kubernetesVersion: "v1.12.0" // kubernetesVersion: "v1.12.0"
// controlPlaneEndpoint: "10.100.0.1:6443" // controlPlaneEndpoint: "10.100.0.1:6443"
// apiServer: // apiServer:
// extraArgs: // extraArgs:
// authorization-mode: "Node,RBAC" // authorization-mode: "Node,RBAC"
// extraVolumes: // extraVolumes:
// - name: "some-volume" // - name: "some-volume"
// hostPath: "/etc/some-path" // hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path" // mountPath: "/etc/some-pod-path"
// readOnly: false // readOnly: false
// pathType: File // pathType: File
// certSANs: // certSANs:
// - "10.100.1.1" // - "10.100.1.1"
// - "ec2-10-100-0-1.compute-1.amazonaws.com" // - "ec2-10-100-0-1.compute-1.amazonaws.com"
// timeoutForControlPlane: 4m0s // timeoutForControlPlane: 4m0s
// controllerManager: // controllerManager:
// extraArgs: // extraArgs:
// "node-cidr-mask-size": "20" // "node-cidr-mask-size": "20"
// extraVolumes: // extraVolumes:
// - name: "some-volume" // - name: "some-volume"
// hostPath: "/etc/some-path" // hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path" // mountPath: "/etc/some-pod-path"
// readOnly: false // readOnly: false
// pathType: File // pathType: File
// scheduler: // scheduler:
// extraArgs: // extraArgs:
// address: "10.100.0.1" // address: "10.100.0.1"
// extraVolumes: // extraVolumes:
// - name: "some-volume" // - name: "some-volume"
// hostPath: "/etc/some-path" // hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path" // mountPath: "/etc/some-pod-path"
// readOnly: false // readOnly: false
// pathType: File // pathType: File
// certificatesDir: "/etc/kubernetes/pki" // certificatesDir: "/etc/kubernetes/pki"
// imageRepository: "registry.k8s.io" // imageRepository: "registry.k8s.io"
// useHyperKubeImage: false // useHyperKubeImage: false
// clusterName: "example-cluster" // clusterName: "example-cluster"
// --- // ---
// apiVersion: kubelet.config.k8s.io/v1beta1 // apiVersion: kubelet.config.k8s.io/v1beta1
// kind: KubeletConfiguration // kind: KubeletConfiguration
// # kubelet specific options here // # kubelet specific options here
// --- // ---
// apiVersion: kubeproxy.config.k8s.io/v1alpha1 // apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration // kind: KubeProxyConfiguration
// # kube-proxy specific options here // # kube-proxy specific options here
// //
// Kubeadm join configuration types // # Kubeadm join configuration types
// //
// When executing kubeadm join with the --config option, the JoinConfiguration type should be provided. // When executing kubeadm join with the --config option, the JoinConfiguration type should be provided.
// //
// apiVersion: kubeadm.k8s.io/v1beta2 // apiVersion: kubeadm.k8s.io/v1beta2
// kind: JoinConfiguration // kind: JoinConfiguration
// ... // ...
// //
// The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join // The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join
// are the discovery method used for accessing the cluster info and all the setting which are specific // are the discovery method used for accessing the cluster info and all the setting which are specific
@ -271,7 +273,6 @@ limitations under the License.
// node only (e.g. the node ip). // node only (e.g. the node ip).
// //
// - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node. // - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node.
//
package v1beta2 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2" package v1beta2 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
//TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future //TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future

View File

@ -23,28 +23,28 @@ limitations under the License.
// This version improves on the v1beta2 format by fixing some minor issues and adding a few new fields. // This version improves on the v1beta2 format by fixing some minor issues and adding a few new fields.
// //
// A list of changes since v1beta2: // A list of changes since v1beta2:
// - The deprecated "ClusterConfiguration.useHyperKubeImage" field has been removed. // - The deprecated "ClusterConfiguration.useHyperKubeImage" field has been removed.
// Kubeadm no longer supports the hyperkube image. // Kubeadm no longer supports the hyperkube image.
// - The "ClusterConfiguration.DNS.Type" field has been removed since CoreDNS is the only supported // - The "ClusterConfiguration.DNS.Type" field has been removed since CoreDNS is the only supported
// DNS server type by kubeadm. // DNS server type by kubeadm.
// - Include "datapolicy" tags on the fields that hold secrets. // - Include "datapolicy" tags on the fields that hold secrets.
// This would result in the field values to be omitted when API structures are printed with klog. // This would result in the field values to be omitted when API structures are printed with klog.
// - Add "InitConfiguration.SkipPhases", "JoinConfiguration.SkipPhases" to allow skipping // - Add "InitConfiguration.SkipPhases", "JoinConfiguration.SkipPhases" to allow skipping
// a list of phases during kubeadm init/join command execution. // a list of phases during kubeadm init/join command execution.
// - Add "InitConfiguration.NodeRegistration.ImagePullPolicy" and "JoinConfiguration.NodeRegistration.ImagePullPolicy" // - Add "InitConfiguration.NodeRegistration.ImagePullPolicy" and "JoinConfiguration.NodeRegistration.ImagePullPolicy"
// to allow specifying the images pull policy during kubeadm "init" and "join". The value must be one of "Always", "Never" or // to allow specifying the images pull policy during kubeadm "init" and "join". The value must be one of "Always", "Never" or
// "IfNotPresent". "IfNotPresent" is the default, which has been the existing behavior prior to this addition. // "IfNotPresent". "IfNotPresent" is the default, which has been the existing behavior prior to this addition.
// - Add "InitConfiguration.Patches.Directory", "JoinConfiguration.Patches.Directory" to allow // - Add "InitConfiguration.Patches.Directory", "JoinConfiguration.Patches.Directory" to allow
// the user to configure a directory from which to take patches for components deployed by kubeadm. // the user to configure a directory from which to take patches for components deployed by kubeadm.
// - Move the BootstrapToken* API and related utilities out of the "kubeadm" API group to a new group // - Move the BootstrapToken* API and related utilities out of the "kubeadm" API group to a new group
// "bootstraptoken". The kubeadm API version v1beta3 no longer contains the BootstrapToken* structures. // "bootstraptoken". The kubeadm API version v1beta3 no longer contains the BootstrapToken* structures.
// //
// Migration from old kubeadm config versions // Migration from old kubeadm config versions
// //
// - kubeadm v1.15.x and newer can be used to migrate from v1beta1 to v1beta2. // - kubeadm v1.15.x and newer can be used to migrate from v1beta1 to v1beta2.
// - kubeadm v1.22.x and newer no longer support v1beta1 and older APIs, but can be used to migrate v1beta2 to v1beta3. // - kubeadm v1.22.x and newer no longer support v1beta1 and older APIs, but can be used to migrate v1beta2 to v1beta3.
// //
// Basics // # Basics
// //
// The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the // The preferred way to configure kubeadm is to pass an YAML configuration file with the --config option. Some of the
// configuration options defined in the kubeadm config file are also available as command line flags, but only // configuration options defined in the kubeadm config file are also available as command line flags, but only
@ -54,24 +54,25 @@ limitations under the License.
// //
// kubeadm supports the following configuration types: // kubeadm supports the following configuration types:
// //
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: InitConfiguration // kind: InitConfiguration
// //
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: ClusterConfiguration // kind: ClusterConfiguration
// //
// apiVersion: kubelet.config.k8s.io/v1beta1 // apiVersion: kubelet.config.k8s.io/v1beta1
// kind: KubeletConfiguration // kind: KubeletConfiguration
// //
// apiVersion: kubeproxy.config.k8s.io/v1alpha1 // apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration // kind: KubeProxyConfiguration
// //
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: JoinConfiguration // kind: JoinConfiguration
// //
// To print the defaults for "init" and "join" actions use the following commands: // To print the defaults for "init" and "join" actions use the following commands:
// kubeadm config print init-defaults //
// kubeadm config print join-defaults // kubeadm config print init-defaults
// kubeadm config print join-defaults
// //
// The list of configuration types that must be included in a configuration file depends by the action you are // The list of configuration types that must be included in a configuration file depends by the action you are
// performing (init or join) and by the configuration options you are going to use (defaults or advanced customization). // performing (init or join) and by the configuration options you are going to use (defaults or advanced customization).
@ -86,18 +87,18 @@ limitations under the License.
// If the user provides a configuration types that is not expected for the action you are performing, kubeadm will // If the user provides a configuration types that is not expected for the action you are performing, kubeadm will
// ignore those types and print a warning. // ignore those types and print a warning.
// //
// Kubeadm init configuration types // # Kubeadm init configuration types
// //
// When executing kubeadm init with the --config option, the following configuration types could be used: // When executing kubeadm init with the --config option, the following configuration types could be used:
// InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one // InitConfiguration, ClusterConfiguration, KubeProxyConfiguration, KubeletConfiguration, but only one
// between InitConfiguration and ClusterConfiguration is mandatory. // between InitConfiguration and ClusterConfiguration is mandatory.
// //
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: InitConfiguration // kind: InitConfiguration
// bootstrapTokens: // bootstrapTokens:
// ... // ...
// nodeRegistration: // nodeRegistration:
// ... // ...
// //
// The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init // The InitConfiguration type should be used to configure runtime settings, that in case of kubeadm init
// are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm // are the configuration of the bootstrap token and all the setting which are specific to the node where kubeadm
@ -110,18 +111,18 @@ limitations under the License.
// - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node; // - LocalAPIEndpoint, that represents the endpoint of the instance of the API server to be deployed on this node;
// use it e.g. to customize the API server advertise address. // use it e.g. to customize the API server advertise address.
// //
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: ClusterConfiguration // kind: ClusterConfiguration
// networking: // networking:
// ... // ...
// etcd: // etcd:
// ... // ...
// apiServer: // apiServer:
// extraArgs: // extraArgs:
// ... // ...
// extraVolumes: // extraVolumes:
// ... // ...
// ... // ...
// //
// The ClusterConfiguration type should be used to configure cluster-wide settings, // The ClusterConfiguration type should be used to configure cluster-wide settings,
// including settings for: // including settings for:
@ -135,9 +136,9 @@ limitations under the License.
// - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane // - kube-apiserver, kube-scheduler, kube-controller-manager configurations; use it to customize control-plane
// components by adding customized setting or overriding kubeadm default settings. // components by adding customized setting or overriding kubeadm default settings.
// //
// apiVersion: kubeproxy.config.k8s.io/v1alpha1 // apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration // kind: KubeProxyConfiguration
// ... // ...
// //
// The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed // The KubeProxyConfiguration type should be used to change the configuration passed to kube-proxy instances deployed
// in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. // in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults.
@ -145,9 +146,9 @@ limitations under the License.
// See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://pkg.go.dev/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration // See https://kubernetes.io/docs/reference/command-line-tools-reference/kube-proxy/ or https://pkg.go.dev/k8s.io/kube-proxy/config/v1alpha1#KubeProxyConfiguration
// for kube proxy official documentation. // for kube proxy official documentation.
// //
// apiVersion: kubelet.config.k8s.io/v1beta1 // apiVersion: kubelet.config.k8s.io/v1beta1
// kind: KubeletConfiguration // kind: KubeletConfiguration
// ... // ...
// //
// The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances // The KubeletConfiguration type should be used to change the configurations that will be passed to all kubelet instances
// deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults. // deployed in the cluster. If this object is not provided or provided only partially, kubeadm applies defaults.
@ -158,115 +159,115 @@ limitations under the License.
// Here is a fully populated example of a single YAML file containing multiple // Here is a fully populated example of a single YAML file containing multiple
// configuration types to be used during a `kubeadm init` run. // configuration types to be used during a `kubeadm init` run.
// //
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: InitConfiguration // kind: InitConfiguration
// bootstrapTokens: // bootstrapTokens:
// - token: "9a08jv.c0izixklcxtmnze7" // - token: "9a08jv.c0izixklcxtmnze7"
// description: "kubeadm bootstrap token" // description: "kubeadm bootstrap token"
// ttl: "24h" // ttl: "24h"
// - token: "783bde.3f89s0fje9f38fhf" // - token: "783bde.3f89s0fje9f38fhf"
// description: "another bootstrap token" // description: "another bootstrap token"
// usages: // usages:
// - authentication // - authentication
// - signing // - signing
// groups: // groups:
// - system:bootstrappers:kubeadm:default-node-token // - system:bootstrappers:kubeadm:default-node-token
// nodeRegistration: // nodeRegistration:
// name: "ec2-10-100-0-1" // name: "ec2-10-100-0-1"
// criSocket: "unix:///var/run/containerd/containerd.sock" // criSocket: "unix:///var/run/containerd/containerd.sock"
// taints: // taints:
// - key: "kubeadmNode" // - key: "kubeadmNode"
// value: "someValue" // value: "someValue"
// effect: "NoSchedule" // effect: "NoSchedule"
// kubeletExtraArgs: // kubeletExtraArgs:
// v: 4 // v: 4
// ignorePreflightErrors: // ignorePreflightErrors:
// - IsPrivilegedUser // - IsPrivilegedUser
// imagePullPolicy: "IfNotPresent" // imagePullPolicy: "IfNotPresent"
// localAPIEndpoint: // localAPIEndpoint:
// advertiseAddress: "10.100.0.1" // advertiseAddress: "10.100.0.1"
// bindPort: 6443 // bindPort: 6443
// certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204" // certificateKey: "e6a2eb8581237ab72a4f494f30285ec12a9694d750b9785706a83bfcbbbd2204"
// skipPhases: // skipPhases:
// - addon/kube-proxy // - addon/kube-proxy
// --- // ---
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: ClusterConfiguration // kind: ClusterConfiguration
// etcd: // etcd:
// # one of local or external // # one of local or external
// local: // local:
// imageRepository: "registry.k8s.io" // imageRepository: "registry.k8s.io"
// imageTag: "3.2.24" // imageTag: "3.2.24"
// dataDir: "/var/lib/etcd" // dataDir: "/var/lib/etcd"
// extraArgs: // extraArgs:
// listen-client-urls: "http://10.100.0.1:2379" // listen-client-urls: "http://10.100.0.1:2379"
// serverCertSANs: // serverCertSANs:
// - "ec2-10-100-0-1.compute-1.amazonaws.com" // - "ec2-10-100-0-1.compute-1.amazonaws.com"
// peerCertSANs: // peerCertSANs:
// - "10.100.0.1" // - "10.100.0.1"
// # external: // # external:
// # endpoints: // # endpoints:
// # - "10.100.0.1:2379" // # - "10.100.0.1:2379"
// # - "10.100.0.2:2379" // # - "10.100.0.2:2379"
// # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt" // # caFile: "/etcd/kubernetes/pki/etcd/etcd-ca.crt"
// # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt" // # certFile: "/etcd/kubernetes/pki/etcd/etcd.crt"
// # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key" // # keyFile: "/etcd/kubernetes/pki/etcd/etcd.key"
// networking: // networking:
// serviceSubnet: "10.96.0.0/16" // serviceSubnet: "10.96.0.0/16"
// podSubnet: "10.244.0.0/24" // podSubnet: "10.244.0.0/24"
// dnsDomain: "cluster.local" // dnsDomain: "cluster.local"
// kubernetesVersion: "v1.21.0" // kubernetesVersion: "v1.21.0"
// controlPlaneEndpoint: "10.100.0.1:6443" // controlPlaneEndpoint: "10.100.0.1:6443"
// apiServer: // apiServer:
// extraArgs: // extraArgs:
// authorization-mode: "Node,RBAC" // authorization-mode: "Node,RBAC"
// extraVolumes: // extraVolumes:
// - name: "some-volume" // - name: "some-volume"
// hostPath: "/etc/some-path" // hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path" // mountPath: "/etc/some-pod-path"
// readOnly: false // readOnly: false
// pathType: File // pathType: File
// certSANs: // certSANs:
// - "10.100.1.1" // - "10.100.1.1"
// - "ec2-10-100-0-1.compute-1.amazonaws.com" // - "ec2-10-100-0-1.compute-1.amazonaws.com"
// timeoutForControlPlane: 4m0s // timeoutForControlPlane: 4m0s
// controllerManager: // controllerManager:
// extraArgs: // extraArgs:
// "node-cidr-mask-size": "20" // "node-cidr-mask-size": "20"
// extraVolumes: // extraVolumes:
// - name: "some-volume" // - name: "some-volume"
// hostPath: "/etc/some-path" // hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path" // mountPath: "/etc/some-pod-path"
// readOnly: false // readOnly: false
// pathType: File // pathType: File
// scheduler: // scheduler:
// extraArgs: // extraArgs:
// address: "10.100.0.1" // address: "10.100.0.1"
// extraVolumes: // extraVolumes:
// - name: "some-volume" // - name: "some-volume"
// hostPath: "/etc/some-path" // hostPath: "/etc/some-path"
// mountPath: "/etc/some-pod-path" // mountPath: "/etc/some-pod-path"
// readOnly: false // readOnly: false
// pathType: File // pathType: File
// certificatesDir: "/etc/kubernetes/pki" // certificatesDir: "/etc/kubernetes/pki"
// imageRepository: "registry.k8s.io" // imageRepository: "registry.k8s.io"
// clusterName: "example-cluster" // clusterName: "example-cluster"
// --- // ---
// apiVersion: kubelet.config.k8s.io/v1beta1 // apiVersion: kubelet.config.k8s.io/v1beta1
// kind: KubeletConfiguration // kind: KubeletConfiguration
// # kubelet specific options here // # kubelet specific options here
// --- // ---
// apiVersion: kubeproxy.config.k8s.io/v1alpha1 // apiVersion: kubeproxy.config.k8s.io/v1alpha1
// kind: KubeProxyConfiguration // kind: KubeProxyConfiguration
// # kube-proxy specific options here // # kube-proxy specific options here
// //
// Kubeadm join configuration types // # Kubeadm join configuration types
// //
// When executing kubeadm join with the --config option, the JoinConfiguration type should be provided. // When executing kubeadm join with the --config option, the JoinConfiguration type should be provided.
// //
// apiVersion: kubeadm.k8s.io/v1beta3 // apiVersion: kubeadm.k8s.io/v1beta3
// kind: JoinConfiguration // kind: JoinConfiguration
// ... // ...
// //
// The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join // The JoinConfiguration type should be used to configure runtime settings, that in case of kubeadm join
// are the discovery method used for accessing the cluster info and all the setting which are specific // are the discovery method used for accessing the cluster info and all the setting which are specific
@ -277,7 +278,6 @@ limitations under the License.
// node only (e.g. the node ip). // node only (e.g. the node ip).
// //
// - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node. // - APIEndpoint, that represents the endpoint of the instance of the API server to be eventually deployed on this node.
//
package v1beta3 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3" package v1beta3 // import "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
//TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future //TODO: The BootstrapTokenString object should move out to either k8s.io/client-go or k8s.io/api in the future

View File

@ -424,7 +424,7 @@ func TestValidateAPIEndpoint(t *testing.T) {
} }
} }
//TODO: Create a separated test for ValidateClusterConfiguration // TODO: Create a separated test for ValidateClusterConfiguration
func TestValidateInitConfiguration(t *testing.T) { func TestValidateInitConfiguration(t *testing.T) {
nodename := "valid-nodename" nodename := "valid-nodename"
var tests = []struct { var tests = []struct {

View File

@ -131,7 +131,8 @@ type initData struct {
// newCmdInit returns "kubeadm init" command. // newCmdInit returns "kubeadm init" command.
// NB. initOptions is exposed as parameter for allowing unit testing of // NB. initOptions is exposed as parameter for allowing unit testing of
// the newInitOptions method, that implements all the command options validation logic //
// the newInitOptions method, that implements all the command options validation logic
func newCmdInit(out io.Writer, initOptions *initOptions) *cobra.Command { func newCmdInit(out io.Writer, initOptions *initOptions) *cobra.Command {
if initOptions == nil { if initOptions == nil {
initOptions = newInitOptions() initOptions = newInitOptions()

View File

@ -157,7 +157,8 @@ type joinData struct {
// newCmdJoin returns "kubeadm join" command. // newCmdJoin returns "kubeadm join" command.
// NB. joinOptions is exposed as parameter for allowing unit testing of // NB. joinOptions is exposed as parameter for allowing unit testing of
// the newJoinData method, that implements all the command options validation logic //
// the newJoinData method, that implements all the command options validation logic
func newCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command { func newCmdJoin(out io.Writer, joinOptions *joinOptions) *cobra.Command {
if joinOptions == nil { if joinOptions == nil {
joinOptions = newJoinOptions() joinOptions = newJoinOptions()

View File

@ -38,13 +38,15 @@ var joinCommandTemplate = template.Must(template.New("join").Parse(`` +
)) ))
// GetJoinWorkerCommand returns the kubeadm join command for a given token and // GetJoinWorkerCommand returns the kubeadm join command for a given token and
// Kubernetes cluster (the current cluster in the kubeconfig file) //
// Kubernetes cluster (the current cluster in the kubeconfig file)
func GetJoinWorkerCommand(kubeConfigFile, token string, skipTokenPrint bool) (string, error) { func GetJoinWorkerCommand(kubeConfigFile, token string, skipTokenPrint bool) (string, error) {
return getJoinCommand(kubeConfigFile, token, "", false, skipTokenPrint, false) return getJoinCommand(kubeConfigFile, token, "", false, skipTokenPrint, false)
} }
// GetJoinControlPlaneCommand returns the kubeadm join command for a given token and // GetJoinControlPlaneCommand returns the kubeadm join command for a given token and
// Kubernetes cluster (the current cluster in the kubeconfig file) //
// Kubernetes cluster (the current cluster in the kubeconfig file)
func GetJoinControlPlaneCommand(kubeConfigFile, token, key string, skipTokenPrint, skipCertificateKeyPrint bool) (string, error) { func GetJoinControlPlaneCommand(kubeConfigFile, token, key string, skipTokenPrint, skipCertificateKeyPrint bool) (string, error) {
return getJoinCommand(kubeConfigFile, token, key, true, skipTokenPrint, skipCertificateKeyPrint) return getJoinCommand(kubeConfigFile, token, key, true, skipTokenPrint, skipCertificateKeyPrint)
} }

View File

@ -76,7 +76,7 @@ func createShortLivedBootstrapToken(client clientset.Interface) (string, error)
return tokens[0].Token.ID, nil return tokens[0].Token.ID, nil
} }
//CreateCertificateKey returns a cryptographically secure random key // CreateCertificateKey returns a cryptographically secure random key
func CreateCertificateKey() (string, error) { func CreateCertificateKey() (string, error) {
randBytes, err := cryptoutil.CreateRandBytes(kubeadmconstants.CertificateKeySize) randBytes, err := cryptoutil.CreateRandBytes(kubeadmconstants.CertificateKeySize)
if err != nil { if err != nil {
@ -85,7 +85,7 @@ func CreateCertificateKey() (string, error) {
return hex.EncodeToString(randBytes), nil return hex.EncodeToString(randBytes), nil
} }
//UploadCerts save certs needs to join a new control-plane on kubeadm-certs sercret. // UploadCerts save certs needs to join a new control-plane on kubeadm-certs sercret.
func UploadCerts(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, key string) error { func UploadCerts(client clientset.Interface, cfg *kubeadmapi.InitConfiguration, key string) error {
fmt.Printf("[upload-certs] Storing the certificates in Secret %q in the %q Namespace\n", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem) fmt.Printf("[upload-certs] Storing the certificates in Secret %q in the %q Namespace\n", kubeadmconstants.KubeadmCertsSecret, metav1.NamespaceSystem)
decodedKey, err := hex.DecodeString(key) decodedKey, err := hex.DecodeString(key)

View File

@ -71,8 +71,8 @@ func WriteKubeletDynamicEnvFile(cfg *kubeadmapi.ClusterConfiguration, nodeReg *k
return writeKubeletFlagBytesToDisk([]byte(envFileContent), kubeletDir) return writeKubeletFlagBytesToDisk([]byte(envFileContent), kubeletDir)
} }
//buildKubeletArgMapCommon takes a kubeletFlagsOpts object and builds based on that a string-string map with flags // buildKubeletArgMapCommon takes a kubeletFlagsOpts object and builds based on that a string-string map with flags
//that are common to both Linux and Windows // that are common to both Linux and Windows
func buildKubeletArgMapCommon(opts kubeletFlagsOpts) map[string]string { func buildKubeletArgMapCommon(opts kubeletFlagsOpts) map[string]string {
kubeletFlags := map[string]string{} kubeletFlags := map[string]string{}
kubeletFlags["container-runtime-endpoint"] = opts.nodeRegOpts.CRISocket kubeletFlags["container-runtime-endpoint"] = opts.nodeRegOpts.CRISocket

View File

@ -160,7 +160,8 @@ func GetNodeRegistration(kubeconfigFile string, client clientset.Interface, node
// getNodeNameFromKubeletConfig gets the node name from a kubelet config file // getNodeNameFromKubeletConfig gets the node name from a kubelet config file
// TODO: in future we want to switch to a more canonical way for doing this e.g. by having this // TODO: in future we want to switch to a more canonical way for doing this e.g. by having this
// information in the local kubelet config.yaml //
// information in the local kubelet config.yaml
func getNodeNameFromKubeletConfig(fileName string) (string, error) { func getNodeNameFromKubeletConfig(fileName string) (string, error) {
// loads the kubelet.conf file // loads the kubelet.conf file
config, err := clientcmd.LoadFromFile(fileName) config, err := clientcmd.LoadFromFile(fileName)

View File

@ -57,12 +57,13 @@ var (
// servers and then return actual semantic version. // servers and then return actual semantic version.
// //
// Available names on release servers: // Available names on release servers:
// stable (latest stable release) //
// stable-1 (latest stable release in 1.x) // stable (latest stable release)
// stable-1.0 (and similarly 1.1, 1.2, 1.3, ...) // stable-1 (latest stable release in 1.x)
// latest (latest release, including alpha/beta) // stable-1.0 (and similarly 1.1, 1.2, 1.3, ...)
// latest-1 (latest release in 1.x, including alpha/beta) // latest (latest release, including alpha/beta)
// latest-1.0 (and similarly 1.1, 1.2, 1.3, ...) // latest-1 (latest release in 1.x, including alpha/beta)
// latest-1.0 (and similarly 1.1, 1.2, 1.3, ...)
func KubernetesReleaseVersion(version string) (string, error) { func KubernetesReleaseVersion(version string) (string, error) {
return kubernetesReleaseVersion(version, fetchFromURL) return kubernetesReleaseVersion(version, fetchFromURL)
} }

View File

@ -45,9 +45,10 @@ const defaultRootDir = "/var/lib/kubelet"
// KubeletFlags contains configuration flags for the Kubelet. // KubeletFlags contains configuration flags for the Kubelet.
// A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true: // A configuration field should go in KubeletFlags instead of KubeletConfiguration if any of these are true:
// - its value will never, or cannot safely be changed during the lifetime of a node, or // - its value will never, or cannot safely be changed during the lifetime of a node, or
// - its value cannot be safely shared between nodes at the same time (e.g. a hostname); // - its value cannot be safely shared between nodes at the same time (e.g. a hostname);
// KubeletConfiguration is intended to be shared between nodes. // KubeletConfiguration is intended to be shared between nodes.
//
// In general, please try to avoid adding flags or configuration fields, // In general, please try to avoid adding flags or configuration fields,
// we already have a confusingly large amount of them. // we already have a confusingly large amount of them.
type KubeletFlags struct { type KubeletFlags struct {

View File

@ -1075,9 +1075,11 @@ func setContentTypeForClient(cfg *restclient.Config, contentType string) {
} }
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications: // RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications:
// 1 Integration tests //
// 2 Kubelet binary // 1 Integration tests
// 3 Standalone 'kubernetes' binary // 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
//
// Eventually, #2 will be replaced with instances of #3 // Eventually, #2 will be replaced with instances of #3
func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencies, runOnce bool) error { func RunKubelet(kubeServer *options.KubeletServer, kubeDeps *kubelet.Dependencies, runOnce bool) error {
hostname, err := nodeutil.GetHostname(kubeServer.HostnameOverride) hostname, err := nodeutil.GetHostname(kubeServer.HostnameOverride)

View File

@ -27,8 +27,9 @@ import (
// StatefulSet represents a set of pods with consistent identities. // StatefulSet represents a set of pods with consistent identities.
// Identities are defined as: // Identities are defined as:
// - Network: A single stable DNS and hostname. // - Network: A single stable DNS and hostname.
// - Storage: As many VolumeClaims as requested. // - Storage: As many VolumeClaims as requested.
//
// The StatefulSet guarantees that a given network identity will always // The StatefulSet guarantees that a given network identity will always
// map to the same storage identity. // map to the same storage identity.
type StatefulSet struct { type StatefulSet struct {

View File

@ -21,16 +21,16 @@ package autoscaling
// It should always be called when converting internal -> external versions, prior // It should always be called when converting internal -> external versions, prior
// to setting any of the custom annotations: // to setting any of the custom annotations:
// //
// annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations) // annotations, copiedAnnotations := DropRoundTripHorizontalPodAutoscalerAnnotations(externalObj.Annotations)
// externalObj.Annotations = annotations // externalObj.Annotations = annotations
// //
// if internal.SomeField != nil { // if internal.SomeField != nil {
// if !copiedAnnotations { // if !copiedAnnotations {
// externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations) // externalObj.Annotations = DeepCopyStringMap(externalObj.Annotations)
// copiedAnnotations = true // copiedAnnotations = true
// } // }
// externalObj.Annotations[...] = json.Marshal(...) // externalObj.Annotations[...] = json.Marshal(...)
// } // }
func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) { func DropRoundTripHorizontalPodAutoscalerAnnotations(in map[string]string) (out map[string]string, copied bool) {
_, hasMetricsSpecs := in[MetricSpecsAnnotation] _, hasMetricsSpecs := in[MetricSpecsAnnotation]
_, hasBehaviorSpecs := in[BehaviorSpecsAnnotation] _, hasBehaviorSpecs := in[BehaviorSpecsAnnotation]

View File

@ -193,7 +193,8 @@ type CertificateSigningRequestList struct {
// KeyUsages specifies valid usage contexts for keys. // KeyUsages specifies valid usage contexts for keys.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 //
// https://tools.ietf.org/html/rfc5280#section-4.2.1.12
type KeyUsage string type KeyUsage string
const ( const (

View File

@ -3221,8 +3221,9 @@ type PodDNSConfigOption struct {
// PodIP represents the IP address of a pod. // PodIP represents the IP address of a pod.
// IP address information. Each entry includes: // IP address information. Each entry includes:
// IP: An IP address allocated to the pod. Routable at least within //
// the cluster. // IP: An IP address allocated to the pod. Routable at least within
// the cluster.
type PodIP struct { type PodIP struct {
IP string IP string
} }
@ -4035,17 +4036,18 @@ type ServiceAccountList struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example: // Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc", //
// Subsets: [ // Name: "mysvc",
// { // Subsets: [
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], // {
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] // Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// }, // Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// { // },
// Addresses: [{"ip": "10.10.3.3"}], // {
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] // Addresses: [{"ip": "10.10.3.3"}],
// }, // Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// ] // },
// ]
type Endpoints struct { type Endpoints struct {
metav1.TypeMeta metav1.TypeMeta
// +optional // +optional
@ -4058,13 +4060,16 @@ type Endpoints struct {
// EndpointSubset is a group of addresses with a common set of ports. The // EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports. // expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given: // For example, given:
// { //
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], // {
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] // Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// } // Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
//
// The resulting set of endpoints can be viewed as: // The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], //
// b: [ 10.10.1.1:309, 10.10.2.2:309 ] // a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct { type EndpointSubset struct {
Addresses []EndpointAddress Addresses []EndpointAddress
NotReadyAddresses []EndpointAddress NotReadyAddresses []EndpointAddress

View File

@ -4083,8 +4083,9 @@ var sysctlContainSlashRegexp = regexp.MustCompile("^" + SysctlContainSlashFmt +
// IsValidSysctlName checks that the given string is a valid sysctl name, // IsValidSysctlName checks that the given string is a valid sysctl name,
// i.e. matches SysctlContainSlashFmt. // i.e. matches SysctlContainSlashFmt.
// More info: // More info:
// https://man7.org/linux/man-pages/man8/sysctl.8.html //
// https://man7.org/linux/man-pages/man5/sysctl.d.5.html // https://man7.org/linux/man-pages/man8/sysctl.8.html
// https://man7.org/linux/man-pages/man5/sysctl.d.5.html
func IsValidSysctlName(name string) bool { func IsValidSysctlName(name string) bool {
if len(name) > SysctlMaxLength { if len(name) > SysctlMaxLength {
return false return false

View File

@ -391,8 +391,8 @@ const (
// LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. // LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits.
// It addresses two issues: // It addresses two issues:
// * How are requests for this priority level limited? // - How are requests for this priority level limited?
// * What should be done with requests that exceed the limit? // - What should be done with requests that exceed the limit?
type LimitedPriorityLevelConfiguration struct { type LimitedPriorityLevelConfiguration struct {
// `assuredConcurrencyShares` (ACS) configures the execution // `assuredConcurrencyShares` (ACS) configures the execution
// limit, which is a limit on the number of requests of this // limit, which is a limit on the number of requests of this

View File

@ -468,10 +468,10 @@ func ValidatePriorityLevelConfigurationCondition(condition *flowcontrol.Priority
} }
// ValidateNonResourceURLPath validates non-resource-url path by following rules: // ValidateNonResourceURLPath validates non-resource-url path by following rules:
// 1. Slash must be the leading character of the path // 1. Slash must be the leading character of the path
// 2. White-space is forbidden in the path // 2. White-space is forbidden in the path
// 3. Continuous/double slash is forbidden in the path // 3. Continuous/double slash is forbidden in the path
// 4. Wildcard "*" should only do suffix glob matching. Note that wildcard also matches slashes. // 4. Wildcard "*" should only do suffix glob matching. Note that wildcard also matches slashes.
func ValidateNonResourceURLPath(path string, fldPath *field.Path) *field.Error { func ValidateNonResourceURLPath(path string, fldPath *field.Path) *field.Error {
if len(path) == 0 { if len(path) == 0 {
return field.Invalid(fldPath, path, "must not be empty") return field.Invalid(fldPath, path, "must not be empty")

View File

@ -408,8 +408,9 @@ var sysctlContainSlashPatternRegexp = regexp.MustCompile("^" + SysctlContainSlas
// IsValidSysctlPattern checks if name is a valid sysctl pattern. // IsValidSysctlPattern checks if name is a valid sysctl pattern.
// i.e. matches sysctlContainSlashPatternRegexp. // i.e. matches sysctlContainSlashPatternRegexp.
// More info: // More info:
// https://man7.org/linux/man-pages/man8/sysctl.8.html //
// https://man7.org/linux/man-pages/man5/sysctl.d.5.html // https://man7.org/linux/man-pages/man8/sysctl.8.html
// https://man7.org/linux/man-pages/man5/sysctl.d.5.html
func IsValidSysctlPattern(name string) bool { func IsValidSysctlPattern(name string) bool {
if len(name) > apivalidation.SysctlMaxLength { if len(name) > apivalidation.SysctlMaxLength {
return false return false

View File

@ -37,17 +37,17 @@ type SigningPolicy interface {
// PermissiveSigningPolicy is the signing policy historically used by the local // PermissiveSigningPolicy is the signing policy historically used by the local
// signer. // signer.
// //
// * It forwards all SANs from the original signing request. // - It forwards all SANs from the original signing request.
// * It sets allowed usages as configured in the policy. // - It sets allowed usages as configured in the policy.
// * It zeros all extensions. // - It zeros all extensions.
// * It sets BasicConstraints to true. // - It sets BasicConstraints to true.
// * It sets IsCA to false. // - It sets IsCA to false.
// * It validates that the signer has not expired. // - It validates that the signer has not expired.
// * It sets NotBefore and NotAfter: // - It sets NotBefore and NotAfter:
// All certificates set NotBefore = Now() - Backdate. // All certificates set NotBefore = Now() - Backdate.
// Long-lived certificates set NotAfter = Now() + TTL - Backdate. // Long-lived certificates set NotAfter = Now() + TTL - Backdate.
// Short-lived certificates set NotAfter = Now() + TTL. // Short-lived certificates set NotAfter = Now() + TTL.
// All certificates truncate NotAfter to the expiration date of the signer. // All certificates truncate NotAfter to the expiration date of the signer.
type PermissiveSigningPolicy struct { type PermissiveSigningPolicy struct {
// TTL is used in certificate NotAfter calculation as described above. // TTL is used in certificate NotAfter calculation as described above.
TTL time.Duration TTL time.Duration

View File

@ -54,8 +54,8 @@ func (m *BaseControllerRefManager) CanAdopt(ctx context.Context) error {
// ClaimObject tries to take ownership of an object for this controller. // ClaimObject tries to take ownership of an object for this controller.
// //
// It will reconcile the following: // It will reconcile the following:
// * Adopt orphans if the match function returns true. // - Adopt orphans if the match function returns true.
// * Release owned objects if the match function returns false. // - Release owned objects if the match function returns false.
// //
// A non-nil error is returned if some form of reconciliation was attempted and // A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation // failed. Usually, controllers should try again later in case reconciliation
@ -143,8 +143,9 @@ type PodControllerRefManager struct {
// If CanAdopt() returns a non-nil error, all adoptions will fail. // If CanAdopt() returns a non-nil error, all adoptions will fail.
// //
// NOTE: Once CanAdopt() is called, it will not be called again by the same // NOTE: Once CanAdopt() is called, it will not be called again by the same
// PodControllerRefManager instance. Create a new instance if it makes //
// sense to check CanAdopt() again (e.g. in a different sync pass). // PodControllerRefManager instance. Create a new instance if it makes
// sense to check CanAdopt() again (e.g. in a different sync pass).
func NewPodControllerRefManager( func NewPodControllerRefManager(
podControl PodControlInterface, podControl PodControlInterface,
controller metav1.Object, controller metav1.Object,
@ -168,8 +169,8 @@ func NewPodControllerRefManager(
// ClaimPods tries to take ownership of a list of Pods. // ClaimPods tries to take ownership of a list of Pods.
// //
// It will reconcile the following: // It will reconcile the following:
// * Adopt orphans if the selector matches. // - Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches. // - Release owned objects if the selector no longer matches.
// //
// Optional: If one or more filters are specified, a Pod will only be claimed if // Optional: If one or more filters are specified, a Pod will only be claimed if
// all filters return true. // all filters return true.
@ -283,8 +284,9 @@ type ReplicaSetControllerRefManager struct {
// If CanAdopt() returns a non-nil error, all adoptions will fail. // If CanAdopt() returns a non-nil error, all adoptions will fail.
// //
// NOTE: Once CanAdopt() is called, it will not be called again by the same // NOTE: Once CanAdopt() is called, it will not be called again by the same
// ReplicaSetControllerRefManager instance. Create a new instance if it //
// makes sense to check CanAdopt() again (e.g. in a different sync pass). // ReplicaSetControllerRefManager instance. Create a new instance if it
// makes sense to check CanAdopt() again (e.g. in a different sync pass).
func NewReplicaSetControllerRefManager( func NewReplicaSetControllerRefManager(
rsControl RSControlInterface, rsControl RSControlInterface,
controller metav1.Object, controller metav1.Object,
@ -306,8 +308,8 @@ func NewReplicaSetControllerRefManager(
// ClaimReplicaSets tries to take ownership of a list of ReplicaSets. // ClaimReplicaSets tries to take ownership of a list of ReplicaSets.
// //
// It will reconcile the following: // It will reconcile the following:
// * Adopt orphans if the selector matches. // - Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches. // - Release owned objects if the selector no longer matches.
// //
// A non-nil error is returned if some form of reconciliation was attempted and // A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation // failed. Usually, controllers should try again later in case reconciliation
@ -421,8 +423,9 @@ type ControllerRevisionControllerRefManager struct {
// If canAdopt() returns a non-nil error, all adoptions will fail. // If canAdopt() returns a non-nil error, all adoptions will fail.
// //
// NOTE: Once canAdopt() is called, it will not be called again by the same // NOTE: Once canAdopt() is called, it will not be called again by the same
// ControllerRevisionControllerRefManager instance. Create a new instance if it //
// makes sense to check canAdopt() again (e.g. in a different sync pass). // ControllerRevisionControllerRefManager instance. Create a new instance if it
// makes sense to check canAdopt() again (e.g. in a different sync pass).
func NewControllerRevisionControllerRefManager( func NewControllerRevisionControllerRefManager(
crControl ControllerRevisionControlInterface, crControl ControllerRevisionControlInterface,
controller metav1.Object, controller metav1.Object,
@ -444,8 +447,8 @@ func NewControllerRevisionControllerRefManager(
// ClaimControllerRevisions tries to take ownership of a list of ControllerRevisions. // ClaimControllerRevisions tries to take ownership of a list of ControllerRevisions.
// //
// It will reconcile the following: // It will reconcile the following:
// * Adopt orphans if the selector matches. // - Adopt orphans if the selector matches.
// * Release owned objects if the selector no longer matches. // - Release owned objects if the selector no longer matches.
// //
// A non-nil error is returned if some form of reconciliation was attempted and // A non-nil error is returned if some form of reconciliation was attempted and
// failed. Usually, controllers should try again later in case reconciliation // failed. Usually, controllers should try again later in case reconciliation

View File

@ -755,24 +755,24 @@ func (s ActivePods) Less(i, j int) bool {
// length. After sorting, the pods will be ordered as follows, applying each // length. After sorting, the pods will be ordered as follows, applying each
// rule in turn until one matches: // rule in turn until one matches:
// //
// 1. If only one of the pods is assigned to a node, the pod that is not // 1. If only one of the pods is assigned to a node, the pod that is not
// assigned comes before the pod that is. // assigned comes before the pod that is.
// 2. If the pods' phases differ, a pending pod comes before a pod whose phase // 2. If the pods' phases differ, a pending pod comes before a pod whose phase
// is unknown, and a pod whose phase is unknown comes before a running pod. // is unknown, and a pod whose phase is unknown comes before a running pod.
// 3. If exactly one of the pods is ready, the pod that is not ready comes // 3. If exactly one of the pods is ready, the pod that is not ready comes
// before the ready pod. // before the ready pod.
// 4. If controller.kubernetes.io/pod-deletion-cost annotation is set, then // 4. If controller.kubernetes.io/pod-deletion-cost annotation is set, then
// the pod with the lower value will come first. // the pod with the lower value will come first.
// 5. If the pods' ranks differ, the pod with greater rank comes before the pod // 5. If the pods' ranks differ, the pod with greater rank comes before the pod
// with lower rank. // with lower rank.
// 6. If both pods are ready but have not been ready for the same amount of // 6. If both pods are ready but have not been ready for the same amount of
// time, the pod that has been ready for a shorter amount of time comes // time, the pod that has been ready for a shorter amount of time comes
// before the pod that has been ready for longer. // before the pod that has been ready for longer.
// 7. If one pod has a container that has restarted more than any container in // 7. If one pod has a container that has restarted more than any container in
// the other pod, the pod with the container with more restarts comes // the other pod, the pod with the container with more restarts comes
// before the other pod. // before the other pod.
// 8. If the pods' creation times differ, the pod that was created more recently // 8. If the pods' creation times differ, the pod that was created more recently
// comes before the older pod. // comes before the older pod.
// //
// In 6 and 8, times are compared in a logarithmic scale. This allows a level // In 6 and 8, times are compared in a logarithmic scale. This allows a level
// of randomness among equivalent Pods when sorting. If two pods have the same // of randomness among equivalent Pods when sorting. If two pods have the same

View File

@ -58,7 +58,9 @@ func deleteFromActiveList(cj *batchv1.CronJob, uid types.UID) {
} }
// getNextScheduleTime gets the time of next schedule after last scheduled and before now // getNextScheduleTime gets the time of next schedule after last scheduled and before now
// it returns nil if no unmet schedule times. //
// it returns nil if no unmet schedule times.
//
// If there are too many (>100) unstarted times, it will raise a warning and but still return // If there are too many (>100) unstarted times, it will raise a warning and but still return
// the list of missed times. // the list of missed times.
func getNextScheduleTime(cj batchv1.CronJob, now time.Time, schedule cron.Schedule, recorder record.EventRecorder) (*time.Time, error) { func getNextScheduleTime(cj batchv1.CronJob, now time.Time, schedule cron.Schedule, recorder record.EventRecorder) (*time.Time, error) {

View File

@ -1263,10 +1263,10 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string)
// NodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a // NodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
// summary. Returned booleans are: // summary. Returned booleans are:
// * shouldRun: // - shouldRun:
// Returns true when a daemonset should run on the node if a daemonset pod is not already // Returns true when a daemonset should run on the node if a daemonset pod is not already
// running on that node. // running on that node.
// * shouldContinueRunning: // - shouldContinueRunning:
// Returns true when a daemonset should continue running on a node if a daemonset pod is already // Returns true when a daemonset should continue running on a node if a daemonset pod is already
// running on that node. // running on that node.
func NodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (bool, bool) { func NodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (bool, bool) {

View File

@ -106,10 +106,10 @@ func (dc *DeploymentController) checkPausedConditions(ctx context.Context, d *ap
// //
// rsList should come from getReplicaSetsForDeployment(d). // rsList should come from getReplicaSetsForDeployment(d).
// //
// 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV). // 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV).
// 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1), // 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1),
// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. // only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop.
// 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. // 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop.
// //
// Note that currently the deployment controller is using caches to avoid querying the server for reads. // Note that currently the deployment controller is using caches to avoid querying the server for reads.
// This may lead to stale reads of replica sets, thus incorrect deployment status. // This may lead to stale reads of replica sets, thus incorrect deployment status.

View File

@ -302,7 +302,8 @@ var annotationsToSkip = map[string]bool{
// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key
// TODO: How to decide which annotations should / should not be copied? // TODO: How to decide which annotations should / should not be copied?
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 //
// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615
func skipCopyAnnotation(key string) bool { func skipCopyAnnotation(key string) bool {
return annotationsToSkip[key] return annotationsToSkip[key]
} }
@ -595,9 +596,9 @@ func ListPods(deployment *apps.Deployment, rsList []*apps.ReplicaSet, getPodList
// EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash] // EqualIgnoreHash returns true if two given podTemplateSpec are equal, ignoring the diff in value of Labels[pod-template-hash]
// We ignore pod-template-hash because: // We ignore pod-template-hash because:
// 1. The hash result would be different upon podTemplateSpec API changes // 1. The hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change) // (e.g. the addition of a new field will cause the hash code to change)
// 2. The deployment template won't have hash labels // 2. The deployment template won't have hash labels
func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool { func EqualIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy() t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy() t2Copy := template2.DeepCopy()

View File

@ -1029,7 +1029,7 @@ func TestMaxUnavailable(t *testing.T) {
} }
} }
//Set of simple tests for annotation related util functions // Set of simple tests for annotation related util functions
func TestAnnotationUtils(t *testing.T) { func TestAnnotationUtils(t *testing.T) {
//Setup //Setup

View File

@ -269,7 +269,7 @@ func updatePodOwnerToRs(t *testing.T, pod *v1.Pod, rs *apps.ReplicaSet) {
pod.OwnerReferences = append(pod.OwnerReferences, controllerReference) pod.OwnerReferences = append(pod.OwnerReferences, controllerReference)
} }
// pod, podName := newPod(t, name) // pod, podName := newPod(t, name)
func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) { func updatePodOwnerToSs(t *testing.T, pod *v1.Pod, ss *apps.StatefulSet) {
var controllerReference metav1.OwnerReference var controllerReference metav1.OwnerReference
var trueVar = true var trueVar = true

View File

@ -389,13 +389,13 @@ func (r *reconciler) finalize(
// the list of desired endpoints and returns lists of slices to create, update, // the list of desired endpoints and returns lists of slices to create, update,
// and delete. It also checks that the slices mirror the parent services labels. // and delete. It also checks that the slices mirror the parent services labels.
// The logic is split up into several main steps: // The logic is split up into several main steps:
// 1. Iterate through existing slices, delete endpoints that are no longer // 1. Iterate through existing slices, delete endpoints that are no longer
// desired and update matching endpoints that have changed. It also checks // desired and update matching endpoints that have changed. It also checks
// if the slices have the labels of the parent services, and updates them if not. // if the slices have the labels of the parent services, and updates them if not.
// 2. Iterate through slices that have been modified in 1 and fill them up with // 2. Iterate through slices that have been modified in 1 and fill them up with
// any remaining desired endpoints. // any remaining desired endpoints.
// 3. If there still desired endpoints left, try to fit them into a previously // 3. If there still desired endpoints left, try to fit them into a previously
// unchanged slice and/or create new ones. // unchanged slice and/or create new ones.
func (r *reconciler) reconcileByPortMapping( func (r *reconciler) reconcileByPortMapping(
service *corev1.Service, service *corev1.Service,
existingSlices []*discovery.EndpointSlice, existingSlices []*discovery.EndpointSlice,

View File

@ -47,11 +47,11 @@ func (si *SliceInfo) getTotalReadyEndpoints() int {
// getAllocatedHintsByZone sums up the allocated hints we currently have in // getAllocatedHintsByZone sums up the allocated hints we currently have in
// unchanged slices and marks slices for update as necessary. A slice needs to // unchanged slices and marks slices for update as necessary. A slice needs to
// be updated if any of the following are true: // be updated if any of the following are true:
// - It has an endpoint without zone hints // - It has an endpoint without zone hints
// - It has an endpoint hint for a zone that no longer needs any // - It has an endpoint hint for a zone that no longer needs any
// - It has endpoint hints that would make the minimum allocations necessary // - It has endpoint hints that would make the minimum allocations necessary
// impossible with changes to slices that are already being updated or // impossible with changes to slices that are already being updated or
// created. // created.
func (si *SliceInfo) getAllocatedHintsByZone(allocations map[string]Allocation) EndpointZoneInfo { func (si *SliceInfo) getAllocatedHintsByZone(allocations map[string]Allocation) EndpointZoneInfo {
allocatedHintsByZone := EndpointZoneInfo{} allocatedHintsByZone := EndpointZoneInfo{}

View File

@ -981,11 +981,12 @@ func (jm *Controller) removeTrackingFinalizersFromAllPods(ctx context.Context, p
} }
// trackJobStatusAndRemoveFinalizers does: // trackJobStatusAndRemoveFinalizers does:
// 1. Add finished Pods to .status.uncountedTerminatedPods // 1. Add finished Pods to .status.uncountedTerminatedPods
// 2. Remove the finalizers from the Pods if they completed or were removed // 2. Remove the finalizers from the Pods if they completed or were removed
// or the job was removed. // or the job was removed.
// 3. Increment job counters for pods that no longer have a finalizer. // 3. Increment job counters for pods that no longer have a finalizer.
// 4. Add Complete condition if satisfied with current counters. // 4. Add Complete condition if satisfied with current counters.
//
// It does this up to a limited number of Pods so that the size of .status // It does this up to a limited number of Pods so that the size of .status
// doesn't grow too much and this sync doesn't starve other Jobs. // doesn't grow too much and this sync doesn't starve other Jobs.
func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job *batch.Job, pods []*v1.Pod, succeededIndexes orderedIntervals, uncounted uncountedTerminatedPods, expectedRmFinalizers sets.String, finishedCond *batch.JobCondition, needsFlush bool) error { func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job *batch.Job, pods []*v1.Pod, succeededIndexes orderedIntervals, uncounted uncountedTerminatedPods, expectedRmFinalizers sets.String, finishedCond *batch.JobCondition, needsFlush bool) error {
@ -1076,12 +1077,13 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
} }
// flushUncountedAndRemoveFinalizers does: // flushUncountedAndRemoveFinalizers does:
// 1. flush the Job status that might include new uncounted Pod UIDs. // 1. flush the Job status that might include new uncounted Pod UIDs.
// 2. perform the removal of finalizers from Pods which are in the uncounted // 2. perform the removal of finalizers from Pods which are in the uncounted
// lists. // lists.
// 3. update the counters based on the Pods for which it successfully removed // 3. update the counters based on the Pods for which it successfully removed
// the finalizers. // the finalizers.
// 4. (if not all removals succeeded) flush Job status again. // 4. (if not all removals succeeded) flush Job status again.
//
// Returns whether there are pending changes in the Job status that need to be // Returns whether there are pending changes in the Job status that need to be
// flushed in subsequent calls. // flushed in subsequent calls.
func (jm *Controller) flushUncountedAndRemoveFinalizers(ctx context.Context, job *batch.Job, podsToRemoveFinalizer []*v1.Pod, uidsWithFinalizer sets.String, oldCounters *batch.JobStatus, needsFlush bool) (*batch.Job, bool, error) { func (jm *Controller) flushUncountedAndRemoveFinalizers(ctx context.Context, job *batch.Job, podsToRemoveFinalizer []*v1.Pod, uidsWithFinalizer sets.String, oldCounters *batch.JobStatus, needsFlush bool) (*batch.Job, bool, error) {

View File

@ -81,10 +81,11 @@ type namespacedResourcesDeleter struct {
// Delete deletes all resources in the given namespace. // Delete deletes all resources in the given namespace.
// Before deleting resources: // Before deleting resources:
// * It ensures that deletion timestamp is set on the // - It ensures that deletion timestamp is set on the
// namespace (does nothing if deletion timestamp is missing). // namespace (does nothing if deletion timestamp is missing).
// * Verifies that the namespace is in the "terminating" phase // - Verifies that the namespace is in the "terminating" phase
// (updates the namespace phase if it is not yet marked terminating) // (updates the namespace phase if it is not yet marked terminating)
//
// After deleting the resources: // After deleting the resources:
// * It removes finalizer token from the given namespace. // * It removes finalizer token from the given namespace.
// //
@ -339,9 +340,10 @@ func (d *namespacedResourcesDeleter) deleteCollection(gvr schema.GroupVersionRes
// listCollection will list the items in the specified namespace // listCollection will list the items in the specified namespace
// it returns the following: // it returns the following:
// the list of items in the collection (if found) //
// a boolean if the operation is supported // the list of items in the collection (if found)
// an error if the operation is supported but could not be completed. // a boolean if the operation is supported
// an error if the operation is supported but could not be completed.
func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResource, namespace string) (*metav1.PartialObjectMetadataList, bool, error) { func (d *namespacedResourcesDeleter) listCollection(gvr schema.GroupVersionResource, namespace string) (*metav1.PartialObjectMetadataList, bool, error) {
klog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr) klog.V(5).Infof("namespace controller - listCollection - namespace: %s, gvr: %v", namespace, gvr)

View File

@ -17,14 +17,14 @@ limitations under the License.
// Package ipam provides different allocators for assigning IP ranges to nodes. // Package ipam provides different allocators for assigning IP ranges to nodes.
// We currently support several kinds of IPAM allocators (these are denoted by // We currently support several kinds of IPAM allocators (these are denoted by
// the CIDRAllocatorType): // the CIDRAllocatorType):
// - RangeAllocator is an allocator that assigns PodCIDRs to nodes and works // - RangeAllocator is an allocator that assigns PodCIDRs to nodes and works
// in conjunction with the RouteController to configure the network to get // in conjunction with the RouteController to configure the network to get
// connectivity. // connectivity.
// - CloudAllocator is an allocator that synchronizes PodCIDRs from IP // - CloudAllocator is an allocator that synchronizes PodCIDRs from IP
// ranges assignments from the underlying cloud platform. // ranges assignments from the underlying cloud platform.
// - (Alpha only) IPAMFromCluster is an allocator that has the similar // - (Alpha only) IPAMFromCluster is an allocator that has the similar
// functionality as the RangeAllocator but also synchronizes cluster-managed // functionality as the RangeAllocator but also synchronizes cluster-managed
// ranges into the cloud platform. // ranges into the cloud platform.
// - (Alpha only) IPAMFromCloud is the same as CloudAllocator (synchronizes // - (Alpha only) IPAMFromCloud is the same as CloudAllocator (synchronizes
// from cloud into the cluster.) // from cloud into the cluster.)
package ipam package ipam

View File

@ -133,9 +133,9 @@ const (
// labelReconcileInfo lists Node labels to reconcile, and how to reconcile them. // labelReconcileInfo lists Node labels to reconcile, and how to reconcile them.
// primaryKey and secondaryKey are keys of labels to reconcile. // primaryKey and secondaryKey are keys of labels to reconcile.
// - If both keys exist, but their values don't match. Use the value from the // - If both keys exist, but their values don't match. Use the value from the
// primaryKey as the source of truth to reconcile. // primaryKey as the source of truth to reconcile.
// - If ensureSecondaryExists is true, and the secondaryKey does not // - If ensureSecondaryExists is true, and the secondaryKey does not
// exist, secondaryKey will be added with the value of the primaryKey. // exist, secondaryKey will be added with the value of the primaryKey.
var labelReconcileInfo = []struct { var labelReconcileInfo = []struct {
primaryKey string primaryKey string
secondaryKey string secondaryKey string
@ -1375,9 +1375,9 @@ func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneStat
} }
// classifyNodes classifies the allNodes to three categories: // classifyNodes classifies the allNodes to three categories:
// 1. added: the nodes that in 'allNodes', but not in 'knownNodeSet' // 1. added: the nodes that in 'allNodes', but not in 'knownNodeSet'
// 2. deleted: the nodes that in 'knownNodeSet', but not in 'allNodes' // 2. deleted: the nodes that in 'knownNodeSet', but not in 'allNodes'
// 3. newZoneRepresentatives: the nodes that in both 'knownNodeSet' and 'allNodes', but no zone states // 3. newZoneRepresentatives: the nodes that in both 'knownNodeSet' and 'allNodes', but no zone states
func (nc *Controller) classifyNodes(allNodes []*v1.Node) (added, deleted, newZoneRepresentatives []*v1.Node) { func (nc *Controller) classifyNodes(allNodes []*v1.Node) (added, deleted, newZoneRepresentatives []*v1.Node) {
for i := range allNodes { for i := range allNodes {
if _, has := nc.knownNodeSet[allNodes[i].Name]; !has { if _, has := nc.knownNodeSet[allNodes[i].Name]; !has {
@ -1464,10 +1464,10 @@ func (nc *Controller) cancelPodEviction(node *v1.Node) bool {
} }
// evictPods: // evictPods:
// - adds node to evictor queue if the node is not marked as evicted. // - adds node to evictor queue if the node is not marked as evicted.
// Returns false if the node name was already enqueued. // Returns false if the node name was already enqueued.
// - deletes pods immediately if node is already marked as evicted. // - deletes pods immediately if node is already marked as evicted.
// Returns false, because the node wasn't added to the queue. // Returns false, because the node wasn't added to the queue.
func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.Pod) (bool, error) { func (nc *Controller) evictPods(ctx context.Context, node *v1.Node, pods []*v1.Pod) (bool, error) {
status, ok := nc.nodeEvictionMap.getStatus(node.Name) status, ok := nc.nodeEvictionMap.getStatus(node.Name)
if ok && status == evicted { if ok && status == evicted {

View File

@ -3088,18 +3088,23 @@ func generateScalingRules(pods, podsPeriod, percent, percentPeriod, stabilizatio
} }
// generateEventsUniformDistribution generates events that uniformly spread in the time window // generateEventsUniformDistribution generates events that uniformly spread in the time window
// time.Now()-periodSeconds ; time.Now() //
// time.Now()-periodSeconds ; time.Now()
//
// It split the time window into several segments (by the number of events) and put the event in the center of the segment // It split the time window into several segments (by the number of events) and put the event in the center of the segment
// it is needed if you want to create events for several policies (to check how "outdated" flag is set). // it is needed if you want to create events for several policies (to check how "outdated" flag is set).
// E.g. generateEventsUniformDistribution([]int{1,2,3,4}, 120) will spread events uniformly for the last 120 seconds: // E.g. generateEventsUniformDistribution([]int{1,2,3,4}, 120) will spread events uniformly for the last 120 seconds:
// //
// 1 2 3 4 // 1 2 3 4
//
// ----------------------------------------------- // -----------------------------------------------
// ^ ^ ^ ^ ^ //
// ^ ^ ^ ^ ^
//
// -120s -90s -60s -30s now() // -120s -90s -60s -30s now()
// And we can safely have two different stabilizationWindows: // And we can safely have two different stabilizationWindows:
// - 60s (guaranteed to have last half of events) // - 60s (guaranteed to have last half of events)
// - 120s (guaranteed to have all events) // - 120s (guaranteed to have all events)
func generateEventsUniformDistribution(rawEvents []int, periodSeconds int) []timestampedScaleEvent { func generateEventsUniformDistribution(rawEvents []int, periodSeconds int) []timestampedScaleEvent {
events := make([]timestampedScaleEvent, len(rawEvents)) events := make([]timestampedScaleEvent, len(rawEvents))
segmentDuration := float64(periodSeconds) / float64(len(rawEvents)) segmentDuration := float64(periodSeconds) / float64(len(rawEvents))

View File

@ -290,7 +290,8 @@ func (ssc *StatefulSetController) deletePod(obj interface{}) {
// It also reconciles ControllerRef by adopting/orphaning. // It also reconciles ControllerRef by adopting/orphaning.
// //
// NOTE: Returned Pods are pointers to objects from the cache. // NOTE: Returned Pods are pointers to objects from the cache.
// If you need to modify one, you need to copy it first. //
// If you need to modify one, you need to copy it first.
func (ssc *StatefulSetController) getPodsForStatefulSet(ctx context.Context, set *apps.StatefulSet, selector labels.Selector) ([]*v1.Pod, error) { func (ssc *StatefulSetController) getPodsForStatefulSet(ctx context.Context, set *apps.StatefulSet, selector labels.Selector) ([]*v1.Pod, error) {
// List all pods to include the pods that don't match the selector anymore but // List all pods to include the pods that don't match the selector anymore but
// has a ControllerRef pointing to this StatefulSet. // has a ControllerRef pointing to this StatefulSet.

View File

@ -79,7 +79,7 @@ func getParentName(pod *v1.Pod) string {
return parent return parent
} }
// getOrdinal gets pod's ordinal. If pod has no ordinal, -1 is returned. // getOrdinal gets pod's ordinal. If pod has no ordinal, -1 is returned.
func getOrdinal(pod *v1.Pod) int { func getOrdinal(pod *v1.Pod) int {
_, ordinal := getParentNameAndOrdinal(pod) _, ordinal := getParentNameAndOrdinal(pod)
return ordinal return ordinal

View File

@ -81,11 +81,11 @@ func (est *EndpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSli
} }
// StaleSlices returns true if any of the following are true: // StaleSlices returns true if any of the following are true:
// 1. One or more of the provided EndpointSlices have older generations than the // 1. One or more of the provided EndpointSlices have older generations than the
// corresponding tracked ones. // corresponding tracked ones.
// 2. The tracker is expecting one or more of the provided EndpointSlices to be // 2. The tracker is expecting one or more of the provided EndpointSlices to be
// deleted. (EndpointSlices that have already been marked for deletion are ignored here.) // deleted. (EndpointSlices that have already been marked for deletion are ignored here.)
// 3. The tracker is tracking EndpointSlices that have not been provided. // 3. The tracker is tracking EndpointSlices that have not been provided.
func (est *EndpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool { func (est *EndpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
est.lock.Lock() est.lock.Lock()
defer est.lock.Unlock() defer est.lock.Unlock()

View File

@ -693,8 +693,9 @@ func (adc *attachDetachController) processVolumesInUse(
// For each VA object, this function checks if its present in the ASW. // For each VA object, this function checks if its present in the ASW.
// If not, adds the volume to ASW as an "uncertain" attachment. // If not, adds the volume to ASW as an "uncertain" attachment.
// In the reconciler, the logic checks if the volume is present in the DSW; // In the reconciler, the logic checks if the volume is present in the DSW;
// if yes, the reconciler will attempt attach on the volume; //
// if not (could be a dangling attachment), the reconciler will detach this volume. // if yes, the reconciler will attempt attach on the volume;
// if not (could be a dangling attachment), the reconciler will detach this volume.
func (adc *attachDetachController) processVolumeAttachments() error { func (adc *attachDetachController) processVolumeAttachments() error {
vas, err := adc.volumeAttachmentLister.List(labels.Everything()) vas, err := adc.volumeAttachmentLister.List(labels.Everything())
if err != nil { if err != nil {

View File

@ -92,10 +92,11 @@ type attachDetachStateCollector struct {
} }
// volumeCount is a map of maps used as a counter, e.g.: // volumeCount is a map of maps used as a counter, e.g.:
// node 172.168.1.100.ec2.internal has 10 EBS and 3 glusterfs PVC in use: //
// {"172.168.1.100.ec2.internal": {"aws-ebs": 10, "glusterfs": 3}} // node 172.168.1.100.ec2.internal has 10 EBS and 3 glusterfs PVC in use:
// state actual_state_of_world contains a total of 10 EBS volumes: // {"172.168.1.100.ec2.internal": {"aws-ebs": 10, "glusterfs": 3}}
// {"actual_state_of_world": {"aws-ebs": 10}} // state actual_state_of_world contains a total of 10 EBS volumes:
// {"actual_state_of_world": {"aws-ebs": 10}}
type volumeCount map[string]map[string]int64 type volumeCount map[string]map[string]int64
func (v volumeCount) add(typeKey, counterKey string) { func (v volumeCount) add(typeKey, counterKey string) {

View File

@ -48,9 +48,13 @@ type DesiredStateOfWorldPopulator interface {
// NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator. // NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator.
// loopSleepDuration - the amount of time the populator loop sleeps between // loopSleepDuration - the amount of time the populator loop sleeps between
// successive executions //
// successive executions
//
// podManager - the kubelet podManager that is the source of truth for the pods // podManager - the kubelet podManager that is the source of truth for the pods
// that exist on this host //
// that exist on this host
//
// desiredStateOfWorld - the cache to populate // desiredStateOfWorld - the cache to populate
func NewDesiredStateOfWorldPopulator( func NewDesiredStateOfWorldPopulator(
loopSleepDuration time.Duration, loopSleepDuration time.Duration,

View File

@ -26,10 +26,10 @@ import (
) )
// Test single call to syncClaim and syncVolume methods. // Test single call to syncClaim and syncVolume methods.
// 1. Fill in the controller with initial data // 1. Fill in the controller with initial data
// 2. Call the tested function (syncClaim/syncVolume) via // 2. Call the tested function (syncClaim/syncVolume) via
// controllerTest.testCall *once*. // controllerTest.testCall *once*.
// 3. Compare resulting volumes and claims with expected volumes and claims. // 3. Compare resulting volumes and claims with expected volumes and claims.
func TestSync(t *testing.T) { func TestSync(t *testing.T) {
labels := map[string]string{ labels := map[string]string{
"foo": "true", "foo": "true",
@ -842,17 +842,18 @@ func TestSyncBlockVolume(t *testing.T) {
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern: // volume/claims. The test follows this pattern:
// 0. Load the controller with initial data. // 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync() // 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, // 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" // call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything. // events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call // 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). // syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of // 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again) // "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set // 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences. // of volumes/claims with expected claims/volumes and report differences.
//
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func TestMultiSync(t *testing.T) { func TestMultiSync(t *testing.T) {
tests := []controllerTest{ tests := []controllerTest{

View File

@ -209,17 +209,18 @@ func TestDeleteSync(t *testing.T) {
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern: // volume/claims. The test follows this pattern:
// 0. Load the controller with initial data. // 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync() // 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, // 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" // call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything. // events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call // 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). // syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of // 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again) // "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set // 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences. // of volumes/claims with expected claims/volumes and report differences.
//
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func TestDeleteMultiSync(t *testing.T) { func TestDeleteMultiSync(t *testing.T) {
tests := []controllerTest{ tests := []controllerTest{

View File

@ -71,9 +71,10 @@ func init() {
// function to call as the actual test. Available functions are: // function to call as the actual test. Available functions are:
// - testSyncClaim - calls syncClaim on the first claim in initialClaims. // - testSyncClaim - calls syncClaim on the first claim in initialClaims.
// - testSyncClaimError - calls syncClaim on the first claim in initialClaims // - testSyncClaimError - calls syncClaim on the first claim in initialClaims
// and expects an error to be returned. // and expects an error to be returned.
// - testSyncVolume - calls syncVolume on the first volume in initialVolumes. // - testSyncVolume - calls syncVolume on the first volume in initialVolumes.
// - any custom function for specialized tests. // - any custom function for specialized tests.
//
// The test then contains list of volumes/claims that are expected at the end // The test then contains list of volumes/claims that are expected at the end
// of the test and list of generated events. // of the test and list of generated events.
type controllerTest struct { type controllerTest struct {
@ -602,10 +603,10 @@ var (
) )
// wrapTestWithPluginCalls returns a testCall that: // wrapTestWithPluginCalls returns a testCall that:
// - configures controller with a volume plugin that implements recycler, // - configures controller with a volume plugin that implements recycler,
// deleter and provisioner. The plugin returns provided errors when a volume // deleter and provisioner. The plugin returns provided errors when a volume
// is deleted, recycled or provisioned. // is deleted, recycled or provisioned.
// - calls given testCall // - calls given testCall
func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall { func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error, expectedProvisionCalls []provisionCall, toWrap testCall) testCall {
return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error { return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
plugin := &mockVolumePlugin{ plugin := &mockVolumePlugin{
@ -619,9 +620,9 @@ func wrapTestWithPluginCalls(expectedRecycleCalls, expectedDeleteCalls []error,
} }
// wrapTestWithReclaimCalls returns a testCall that: // wrapTestWithReclaimCalls returns a testCall that:
// - configures controller with recycler or deleter which will return provided // - configures controller with recycler or deleter which will return provided
// errors when a volume is deleted or recycled // errors when a volume is deleted or recycled
// - calls given testCall // - calls given testCall
func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall { func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls []error, toWrap testCall) testCall {
if operation == operationDelete { if operation == operationDelete {
return wrapTestWithPluginCalls(nil, expectedOperationCalls, nil, toWrap) return wrapTestWithPluginCalls(nil, expectedOperationCalls, nil, toWrap)
@ -631,9 +632,9 @@ func wrapTestWithReclaimCalls(operation operationType, expectedOperationCalls []
} }
// wrapTestWithProvisionCalls returns a testCall that: // wrapTestWithProvisionCalls returns a testCall that:
// - configures controller with a provisioner which will return provided errors // - configures controller with a provisioner which will return provided errors
// when a claim is provisioned // when a claim is provisioned
// - calls given testCall // - calls given testCall
func wrapTestWithProvisionCalls(expectedProvisionCalls []provisionCall, toWrap testCall) testCall { func wrapTestWithProvisionCalls(expectedProvisionCalls []provisionCall, toWrap testCall) testCall {
return wrapTestWithPluginCalls(nil, nil, expectedProvisionCalls, toWrap) return wrapTestWithPluginCalls(nil, nil, expectedProvisionCalls, toWrap)
} }
@ -664,11 +665,11 @@ func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall {
} }
// wrapTestWithInjectedOperation returns a testCall that: // wrapTestWithInjectedOperation returns a testCall that:
// - starts the controller and lets it run original testCall until // - starts the controller and lets it run original testCall until
// scheduleOperation() call. It blocks the controller there and calls the // scheduleOperation() call. It blocks the controller there and calls the
// injected function to simulate that something is happening when the // injected function to simulate that something is happening when the
// controller waits for the operation lock. Controller is then resumed and we // controller waits for the operation lock. Controller is then resumed and we
// check how it behaves. // check how it behaves.
func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall { func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall {
return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error { return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
@ -716,10 +717,10 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo
// Test single call to syncClaim and syncVolume methods. // Test single call to syncClaim and syncVolume methods.
// For all tests: // For all tests:
// 1. Fill in the controller with initial data // 1. Fill in the controller with initial data
// 2. Call the tested function (syncClaim/syncVolume) via // 2. Call the tested function (syncClaim/syncVolume) via
// controllerTest.testCall *once*. // controllerTest.testCall *once*.
// 3. Compare resulting volumes and claims with expected volumes and claims. // 3. Compare resulting volumes and claims with expected volumes and claims.
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) { func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
doit := func(t *testing.T, test controllerTest) { doit := func(t *testing.T, test controllerTest) {
// Initialize the controller // Initialize the controller
@ -783,17 +784,18 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. For all tests, the test follows this pattern: // volume/claims. For all tests, the test follows this pattern:
// 0. Load the controller with initial data. // 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync() // 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, // 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" // call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything. // events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call // 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). // syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of // 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again) // "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set // 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences. // of volumes/claims with expected claims/volumes and report differences.
//
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) { func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {
run := func(t *testing.T, test controllerTest) { run := func(t *testing.T, test controllerTest) {

View File

@ -127,23 +127,23 @@ func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *v1.Per
// A request for RWO could be satisfied by both sets of indexed volumes, so // A request for RWO could be satisfied by both sets of indexed volumes, so
// allPossibleMatchingAccessModes returns: // allPossibleMatchingAccessModes returns:
// //
// [][]v1.PersistentVolumeAccessMode { // [][]v1.PersistentVolumeAccessMode {
// []v1.PersistentVolumeAccessMode { // []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany, // v1.ReadWriteOnce, v1.ReadOnlyMany,
// }, // },
// []v1.PersistentVolumeAccessMode { // []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany, // v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
// }, // },
// } // }
// //
// A request for RWX can be satisfied by only one set of indexed volumes, so // A request for RWX can be satisfied by only one set of indexed volumes, so
// the return is: // the return is:
// //
// [][]v1.PersistentVolumeAccessMode { // [][]v1.PersistentVolumeAccessMode {
// []v1.PersistentVolumeAccessMode { // []v1.PersistentVolumeAccessMode {
// v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany, // v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany,
// }, // },
// } // }
// //
// This func returns modes with ascending levels of modes to give the user // This func returns modes with ascending levels of modes to give the user
// what is closest to what they actually asked for. // what is closest to what they actually asked for.

View File

@ -559,17 +559,18 @@ func TestProvisionSync(t *testing.T) {
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern: // volume/claims. The test follows this pattern:
// 0. Load the controller with initial data. // 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync() // 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, // 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" // call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything. // events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call // 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). // syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of // 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again) // "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set // 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences. // of volumes/claims with expected claims/volumes and report differences.
//
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func TestProvisionMultiSync(t *testing.T) { func TestProvisionMultiSync(t *testing.T) {
tests := []controllerTest{ tests := []controllerTest{

View File

@ -756,9 +756,10 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume *
// updateClaimStatus saves new claim.Status to API server. // updateClaimStatus saves new claim.Status to API server.
// Parameters: // Parameters:
// claim - claim to update //
// phase - phase to set // claim - claim to update
// volume - volume which Capacity is set into claim.Status.Capacity // phase - phase to set
// volume - volume which Capacity is set into claim.Status.Capacity
func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) {
klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase) klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase)
@ -840,10 +841,11 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo
// given event on the claim. It saves the status and emits the event only when // given event on the claim. It saves the status and emits the event only when
// the status has actually changed from the version saved in API server. // the status has actually changed from the version saved in API server.
// Parameters: // Parameters:
// claim - claim to update //
// phase - phase to set // claim - claim to update
// volume - volume which Capacity is set into claim.Status.Capacity // phase - phase to set
// eventtype, reason, message - event to send, see EventRecorder.Event() // volume - volume which Capacity is set into claim.Status.Capacity
// eventtype, reason, message - event to send, see EventRecorder.Event()
func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) { func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) {
klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase)
if claim.Status.Phase == phase { if claim.Status.Phase == phase {

View File

@ -236,17 +236,18 @@ func TestRecycleSync(t *testing.T) {
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
// volume/claims. The test follows this pattern: // volume/claims. The test follows this pattern:
// 0. Load the controller with initial data. // 0. Load the controller with initial data.
// 1. Call controllerTest.testCall() once as in TestSync() // 1. Call controllerTest.testCall() once as in TestSync()
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls, // 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed" // call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
// events). Go to 2. if these calls change anything. // events). Go to 2. if these calls change anything.
// 3. When all changes are processed and no new changes were made, call // 3. When all changes are processed and no new changes were made, call
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync"). // syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
// 4. If some changes were done by step 3., go to 2. (simulation of // 4. If some changes were done by step 3., go to 2. (simulation of
// "volume/claim updated" events, eventually performing step 3. again) // "volume/claim updated" events, eventually performing step 3. again)
// 5. When 3. does not do any changes, finish the tests and compare final set // 5. When 3. does not do any changes, finish the tests and compare final set
// of volumes/claims with expected claims/volumes and report differences. // of volumes/claims with expected claims/volumes and report differences.
//
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func TestRecycleMultiSync(t *testing.T) { func TestRecycleMultiSync(t *testing.T) {
tests := []controllerTest{ tests := []controllerTest{

View File

@ -41,21 +41,21 @@ var ErrVersionConflict = errors.New("VersionError")
// VolumeReactor is a core.Reactor that simulates etcd and API server. It // VolumeReactor is a core.Reactor that simulates etcd and API server. It
// stores: // stores:
// - Latest version of claims volumes saved by the controller. // - Latest version of claims volumes saved by the controller.
// - Queue of all saves (to simulate "volume/claim updated" events). This queue // - Queue of all saves (to simulate "volume/claim updated" events). This queue
// contains all intermediate state of an object - e.g. a claim.VolumeName // contains all intermediate state of an object - e.g. a claim.VolumeName
// is updated first and claim.Phase second. This queue will then contain both // is updated first and claim.Phase second. This queue will then contain both
// updates as separate entries. // updates as separate entries.
// - Number of changes since the last call to VolumeReactor.syncAll(). // - Number of changes since the last call to VolumeReactor.syncAll().
// - Optionally, volume and claim fake watchers which should be the same ones // - Optionally, volume and claim fake watchers which should be the same ones
// used by the controller. Any time an event function like deleteVolumeEvent // used by the controller. Any time an event function like deleteVolumeEvent
// is called to simulate an event, the reactor's stores are updated and the // is called to simulate an event, the reactor's stores are updated and the
// controller is sent the event via the fake watcher. // controller is sent the event via the fake watcher.
// - Optionally, list of error that should be returned by reactor, simulating // - Optionally, list of error that should be returned by reactor, simulating
// etcd / API server failures. These errors are evaluated in order and every // etcd / API server failures. These errors are evaluated in order and every
// error is returned only once. I.e. when the reactor finds matching // error is returned only once. I.e. when the reactor finds matching
// ReactorError, it return appropriate error and removes the ReactorError from // ReactorError, it return appropriate error and removes the ReactorError from
// the list. // the list.
type VolumeReactor struct { type VolumeReactor struct {
volumes map[string]*v1.PersistentVolume volumes map[string]*v1.PersistentVolume
claims map[string]*v1.PersistentVolumeClaim claims map[string]*v1.PersistentVolumeClaim

View File

@ -326,7 +326,8 @@ func (c *Config) Complete() CompletedConfig {
// New returns a new instance of Master from the given config. // New returns a new instance of Master from the given config.
// Certain config fields will be set to a default value if unset. // Certain config fields will be set to a default value if unset.
// Certain config fields must be specified, including: // Certain config fields must be specified, including:
// KubeletClientConfig //
// KubeletClientConfig
func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*Instance, error) { func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) (*Instance, error) {
if reflect.DeepEqual(c.ExtraConfig.KubeletClientConfig, kubeletclient.KubeletClientConfig{}) { if reflect.DeepEqual(c.ExtraConfig.KubeletClientConfig, kubeletclient.KubeletClientConfig{}) {
return nil, fmt.Errorf("Master.New() called with empty config.KubeletClientConfig") return nil, fmt.Errorf("Master.New() called with empty config.KubeletClientConfig")

View File

@ -53,12 +53,12 @@ func NewMasterCountEndpointReconciler(masterCount int, epAdapter EndpointsAdapte
// understand the requirements and the body of this function. // understand the requirements and the body of this function.
// //
// Requirements: // Requirements:
// * All apiservers MUST use the same ports for their {rw, ro} services. // - All apiservers MUST use the same ports for their {rw, ro} services.
// * All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the // - All apiservers MUST use ReconcileEndpoints and only ReconcileEndpoints to manage the
// endpoints for their {rw, ro} services. // endpoints for their {rw, ro} services.
// * All apiservers MUST know and agree on the number of apiservers expected // - All apiservers MUST know and agree on the number of apiservers expected
// to be running (c.masterCount). // to be running (c.masterCount).
// * ReconcileEndpoints is called periodically from all apiservers. // - ReconcileEndpoints is called periodically from all apiservers.
func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []corev1.EndpointPort, reconcilePorts bool) error { func (r *masterCountEndpointReconciler) ReconcileEndpoints(serviceName string, ip net.IP, endpointPorts []corev1.EndpointPort, reconcilePorts bool) error {
r.reconcilingLock.Lock() r.reconcilingLock.Lock()
defer r.reconcilingLock.Unlock() defer r.reconcilingLock.Unlock()
@ -187,10 +187,10 @@ func (r *masterCountEndpointReconciler) Destroy() {
// Determine if the endpoint is in the format ReconcileEndpoints expects. // Determine if the endpoint is in the format ReconcileEndpoints expects.
// //
// Return values: // Return values:
// * formatCorrect is true if exactly one subset is found. // - formatCorrect is true if exactly one subset is found.
// * ipCorrect is true when current master's IP is found and the number // - ipCorrect is true when current master's IP is found and the number
// of addresses is less than or equal to the master count. // of addresses is less than or equal to the master count.
// * portsCorrect is true when endpoint ports exactly match provided ports. // - portsCorrect is true when endpoint ports exactly match provided ports.
// portsCorrect is only evaluated when reconcilePorts is set to true. // portsCorrect is only evaluated when reconcilePorts is set to true.
func checkEndpointSubsetFormat(e *corev1.Endpoints, ip string, ports []corev1.EndpointPort, count int, reconcilePorts bool) (formatCorrect bool, ipCorrect bool, portsCorrect bool) { func checkEndpointSubsetFormat(e *corev1.Endpoints, ip string, ports []corev1.EndpointPort, count int, reconcilePorts bool) (formatCorrect bool, ipCorrect bool, portsCorrect bool) {
if len(e.Subsets) != 1 { if len(e.Subsets) != 1 {

View File

@ -262,9 +262,9 @@ func (r *leaseEndpointReconciler) doReconcile(serviceName string, endpointPorts
// format ReconcileEndpoints expects when the controller is using leases. // format ReconcileEndpoints expects when the controller is using leases.
// //
// Return values: // Return values:
// * formatCorrect is true if exactly one subset is found. // - formatCorrect is true if exactly one subset is found.
// * ipsCorrect when the addresses in the endpoints match the expected addresses list // - ipsCorrect when the addresses in the endpoints match the expected addresses list
// * portsCorrect is true when endpoint ports exactly match provided ports. // - portsCorrect is true when endpoint ports exactly match provided ports.
// portsCorrect is only evaluated when reconcilePorts is set to true. // portsCorrect is only evaluated when reconcilePorts is set to true.
func checkEndpointSubsetFormatWithLease(e *corev1.Endpoints, expectedIPs []string, ports []corev1.EndpointPort, reconcilePorts bool) (formatCorrect bool, ipsCorrect bool, portsCorrect bool) { func checkEndpointSubsetFormatWithLease(e *corev1.Endpoints, expectedIPs []string, ports []corev1.EndpointPort, reconcilePorts bool) (formatCorrect bool, ipsCorrect bool, portsCorrect bool) {
if len(e.Subsets) != 1 { if len(e.Subsets) != 1 {

View File

@ -84,12 +84,12 @@ func GetPreferredDockercfgPath() string {
return preferredPath return preferredPath
} }
//DefaultDockercfgPaths returns default search paths of .dockercfg // DefaultDockercfgPaths returns default search paths of .dockercfg
func DefaultDockercfgPaths() []string { func DefaultDockercfgPaths() []string {
return []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath} return []string{GetPreferredDockercfgPath(), workingDirPath, homeDirPath, rootDirPath}
} }
//DefaultDockerConfigJSONPaths returns default search paths of .docker/config.json // DefaultDockerConfigJSONPaths returns default search paths of .docker/config.json
func DefaultDockerConfigJSONPaths() []string { func DefaultDockerConfigJSONPaths() []string {
return []string{GetPreferredDockercfgPath(), workingDirPath, homeJSONDirPath, rootJSONDirPath} return []string{GetPreferredDockercfgPath(), workingDirPath, homeJSONDirPath, rootJSONDirPath}
} }
@ -156,7 +156,7 @@ func ReadDockerConfigJSONFile(searchPaths []string) (cfg DockerConfig, err error
} }
//ReadSpecificDockerConfigJSONFile attempts to read docker configJSON from a given file path. // ReadSpecificDockerConfigJSONFile attempts to read docker configJSON from a given file path.
func ReadSpecificDockerConfigJSONFile(filePath string) (cfg DockerConfig, err error) { func ReadSpecificDockerConfigJSONFile(filePath string) (cfg DockerConfig, err error) {
var contents []byte var contents []byte

View File

@ -110,8 +110,9 @@ type DockerConfigURLKeyProvider struct {
} }
// ContainerRegistryProvider is a DockerConfigProvider that provides a dockercfg with: // ContainerRegistryProvider is a DockerConfigProvider that provides a dockercfg with:
// Username: "_token" //
// Password: "{access token from metadata}" // Username: "_token"
// Password: "{access token from metadata}"
type ContainerRegistryProvider struct { type ContainerRegistryProvider struct {
MetadataProvider MetadataProvider
} }

View File

@ -31,9 +31,9 @@ import (
// reverse index across the registry endpoints. A registry endpoint is made // reverse index across the registry endpoints. A registry endpoint is made
// up of a host (e.g. registry.example.com), but it may also contain a path // up of a host (e.g. registry.example.com), but it may also contain a path
// (e.g. registry.example.com/foo) This index is important for two reasons: // (e.g. registry.example.com/foo) This index is important for two reasons:
// - registry endpoints may overlap, and when this happens we must find the // - registry endpoints may overlap, and when this happens we must find the
// most specific match for a given image // most specific match for a given image
// - iterating a map does not yield predictable results // - iterating a map does not yield predictable results
type DockerKeyring interface { type DockerKeyring interface {
Lookup(image string) ([]AuthConfig, bool) Lookup(image string) ([]AuthConfig, bool)
} }
@ -197,8 +197,9 @@ func URLsMatchStr(glob string, target string) (bool, error) {
// glob wild cards in the host name. // glob wild cards in the host name.
// //
// Examples: // Examples:
// globURL=*.docker.io, targetURL=blah.docker.io => match //
// globURL=*.docker.io, targetURL=not.right.io => no match // globURL=*.docker.io, targetURL=blah.docker.io => match
// globURL=*.docker.io, targetURL=not.right.io => no match
// //
// Note that we don't support wildcards in ports and paths yet. // Note that we don't support wildcards in ports and paths yet.
func URLsMatch(globURL *url.URL, targetURL *url.URL) (bool, error) { func URLsMatch(globURL *url.URL, targetURL *url.URL) (bool, error) {

View File

@ -367,7 +367,7 @@ type execPlugin struct {
// ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig: // ExecPlugin executes the plugin binary with arguments and environment variables specified in CredentialProviderConfig:
// //
// $ ENV_NAME=ENV_VALUE <plugin-name> args[0] args[1] <<<request // $ ENV_NAME=ENV_VALUE <plugin-name> args[0] args[1] <<<request
// //
// The plugin is expected to receive the CredentialProviderRequest API via stdin from the kubelet and // The plugin is expected to receive the CredentialProviderRequest API via stdin from the kubelet and
// return CredentialProviderResponse via stdout. // return CredentialProviderResponse via stdout.

View File

@ -32,9 +32,10 @@ var providers = make(map[string]DockerConfigProvider)
// RegisterCredentialProvider is called by provider implementations on // RegisterCredentialProvider is called by provider implementations on
// initialization to register themselves, like so: // initialization to register themselves, like so:
// func init() { //
// RegisterCredentialProvider("name", &myProvider{...}) // func init() {
// } // RegisterCredentialProvider("name", &myProvider{...})
// }
func RegisterCredentialProvider(name string, provider DockerConfigProvider) { func RegisterCredentialProvider(name string, provider DockerConfigProvider) {
providersMutex.Lock() providersMutex.Lock()
defer providersMutex.Unlock() defer providersMutex.Unlock()

View File

@ -3799,7 +3799,7 @@ func schema_k8sio_api_apps_v1_StatefulSet(ref common.ReferenceCallback) common.O
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", Description: "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"kind": { "kind": {
@ -4910,7 +4910,7 @@ func schema_k8sio_api_apps_v1beta1_StatefulSet(ref common.ReferenceCallback) com
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", Description: "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"kind": { "kind": {
@ -6567,7 +6567,7 @@ func schema_k8sio_api_apps_v1beta2_StatefulSet(ref common.ReferenceCallback) com
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", Description: "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"kind": { "kind": {
@ -16490,7 +16490,7 @@ func schema_k8sio_api_core_v1_EndpointSubset(ref common.ReferenceCallback) commo
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", Description: "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"addresses": { "addresses": {
@ -16547,7 +16547,7 @@ func schema_k8sio_api_core_v1_Endpoints(ref common.ReferenceCallback) common.Ope
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", Description: "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"kind": { "kind": {
@ -21397,7 +21397,7 @@ func schema_k8sio_api_core_v1_PodIP(ref common.ReferenceCallback) common.OpenAPI
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", Description: "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n\n\tIP: An IP address allocated to the pod. Routable at least within the cluster.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"ip": { "ip": {
@ -30322,7 +30322,7 @@ func schema_k8sio_api_flowcontrol_v1alpha1_LimitedPriorityLevelConfiguration(ref
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", Description: "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"assuredConcurrencyShares": { "assuredConcurrencyShares": {
@ -31307,7 +31307,7 @@ func schema_k8sio_api_flowcontrol_v1beta1_LimitedPriorityLevelConfiguration(ref
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", Description: "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"assuredConcurrencyShares": { "assuredConcurrencyShares": {
@ -32292,7 +32292,7 @@ func schema_k8sio_api_flowcontrol_v1beta2_LimitedPriorityLevelConfiguration(ref
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n * How are requests for this priority level limited?\n * What should be done with requests that exceed the limit?", Description: "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"assuredConcurrencyShares": { "assuredConcurrencyShares": {
@ -43721,7 +43721,7 @@ func schema_apimachinery_pkg_api_resource_Quantity(ref common.ReferenceCallback)
return common.EmbedOpenAPIDefinitionIntoV2Extension(common.OpenAPIDefinition{ return common.EmbedOpenAPIDefinitionIntoV2Extension(common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", Description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
OneOf: common.GenerateOpenAPIV3OneOfSchema(resource.Quantity{}.OpenAPIV3OneOfTypes()), OneOf: common.GenerateOpenAPIV3OneOfSchema(resource.Quantity{}.OpenAPIV3OneOfTypes()),
Format: resource.Quantity{}.OpenAPISchemaFormat(), Format: resource.Quantity{}.OpenAPISchemaFormat(),
}, },
@ -43729,7 +43729,7 @@ func schema_apimachinery_pkg_api_resource_Quantity(ref common.ReferenceCallback)
}, common.OpenAPIDefinition{ }, common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n (Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", Description: "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` <quantity> ::= <signedNumber><suffix>\n\n\t(Note that <suffix> may be empty, from the \"\" case in <decimalSI>.)\n\n<digit> ::= 0 | 1 | ... | 9 <digits> ::= <digit> | <digit><digits> <number> ::= <digits> | <digits>.<digits> | <digits>. | .<digits> <sign> ::= \"+\" | \"-\" <signedNumber> ::= <number> | <sign><number> <suffix> ::= <binarySI> | <decimalExponent> | <decimalSI> <binarySI> ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n<decimalSI> ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n<decimalExponent> ::= \"e\" <signedNumber> | \"E\" <signedNumber> ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.",
Type: resource.Quantity{}.OpenAPISchemaType(), Type: resource.Quantity{}.OpenAPISchemaType(),
Format: resource.Quantity{}.OpenAPISchemaFormat(), Format: resource.Quantity{}.OpenAPISchemaFormat(),
}, },
@ -46088,7 +46088,7 @@ func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCall
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
Type: []string{"object"}, Type: []string{"object"},
}, },
}, },
@ -46099,7 +46099,7 @@ func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.", Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this:\n\n\ttype MyAwesomeAPIObject struct {\n\t runtime.TypeMeta `json:\",inline\"`\n\t ... // other fields\n\t}\n\nfunc (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.",
Type: []string{"object"}, Type: []string{"object"},
Properties: map[string]spec.Schema{ Properties: map[string]spec.Schema{
"apiVersion": { "apiVersion": {

View File

@ -43,12 +43,13 @@ type AdmissionOptions struct {
// NewAdmissionOptions creates a new instance of AdmissionOptions // NewAdmissionOptions creates a new instance of AdmissionOptions
// Note: // Note:
// In addition it calls RegisterAllAdmissionPlugins to register
// all kube-apiserver admission plugins.
// //
// Provides the list of RecommendedPluginOrder that holds sane values // In addition it calls RegisterAllAdmissionPlugins to register
// that can be used by servers that don't care about admission chain. // all kube-apiserver admission plugins.
// Servers that do care can overwrite/append that field after creation. //
// Provides the list of RecommendedPluginOrder that holds sane values
// that can be used by servers that don't care about admission chain.
// Servers that do care can overwrite/append that field after creation.
func NewAdmissionOptions() *AdmissionOptions { func NewAdmissionOptions() *AdmissionOptions {
options := genericoptions.NewAdmissionOptions() options := genericoptions.NewAdmissionOptions()
// register all admission plugins // register all admission plugins

View File

@ -861,7 +861,6 @@ func getContainer(pid int) (string, error) {
// //
// The reason of leaving kernel threads at root cgroup is that we don't want to tie the // The reason of leaving kernel threads at root cgroup is that we don't want to tie the
// execution of these threads with to-be defined /system quota and create priority inversions. // execution of these threads with to-be defined /system quota and create priority inversions.
//
func ensureSystemCgroups(rootCgroupPath string, manager cgroups.Manager) error { func ensureSystemCgroups(rootCgroupPath string, manager cgroups.Manager) error {
// Move non-kernel PIDs to the system container. // Move non-kernel PIDs to the system container.
// Only keep errors on latest attempt. // Only keep errors on latest attempt.

View File

@ -501,35 +501,35 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
// At a high-level this algorithm can be summarized as: // At a high-level this algorithm can be summarized as:
// //
// For each NUMA single node: // For each NUMA single node:
// * If all requested CPUs can be allocated from this NUMA node; // - If all requested CPUs can be allocated from this NUMA node;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the // --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in that NUMA node and return // available CPUs in that NUMA node and return
// //
// Otherwise, for each pair of NUMA nodes: // Otherwise, for each pair of NUMA nodes:
// * If the set of requested CPUs (modulo 2) can be evenly split across // - If the set of requested CPUs (modulo 2) can be evenly split across
// the 2 NUMA nodes; AND // the 2 NUMA nodes; AND
// * Any remaining CPUs (after the modulo operation) can be striped across // - Any remaining CPUs (after the modulo operation) can be striped across
// some subset of the NUMA nodes; // some subset of the NUMA nodes;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the // --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in both NUMA nodes and return // available CPUs in both NUMA nodes and return
// //
// Otherwise, for each 3-tuple of NUMA nodes: // Otherwise, for each 3-tuple of NUMA nodes:
// * If the set of requested CPUs (modulo 3) can be evenly distributed // - If the set of requested CPUs (modulo 3) can be evenly distributed
// across the 3 NUMA nodes; AND // across the 3 NUMA nodes; AND
// * Any remaining CPUs (after the modulo operation) can be striped across // - Any remaining CPUs (after the modulo operation) can be striped across
// some subset of the NUMA nodes; // some subset of the NUMA nodes;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the // --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in all three NUMA nodes and return // available CPUs in all three NUMA nodes and return
// //
// ... // ...
// //
// Otherwise, for the set of all NUMA nodes: // Otherwise, for the set of all NUMA nodes:
// * If the set of requested CPUs (modulo NUM_NUMA_NODES) can be evenly // - If the set of requested CPUs (modulo NUM_NUMA_NODES) can be evenly
// distributed across all NUMA nodes; AND // distributed across all NUMA nodes; AND
// * Any remaining CPUs (after the modulo operation) can be striped across // - Any remaining CPUs (after the modulo operation) can be striped across
// some subset of the NUMA nodes; // some subset of the NUMA nodes;
// --> Do the allocation by running takeByTopologyNUMAPacked() over the // --> Do the allocation by running takeByTopologyNUMAPacked() over the
// available CPUs in all NUMA nodes and return // available CPUs in all NUMA nodes and return
// //
// If none of the above conditions can be met, then resort back to a // If none of the above conditions can be met, then resort back to a
// best-effort fit of packing CPUs into NUMA nodes by calling // best-effort fit of packing CPUs into NUMA nodes by calling

View File

@ -65,24 +65,24 @@ func (e SMTAlignmentError) Type() string {
// //
// The static policy maintains the following sets of logical CPUs: // The static policy maintains the following sets of logical CPUs:
// //
// - SHARED: Burstable, BestEffort, and non-integral Guaranteed containers // - SHARED: Burstable, BestEffort, and non-integral Guaranteed containers
// run here. Initially this contains all CPU IDs on the system. As // run here. Initially this contains all CPU IDs on the system. As
// exclusive allocations are created and destroyed, this CPU set shrinks // exclusive allocations are created and destroyed, this CPU set shrinks
// and grows, accordingly. This is stored in the state as the default // and grows, accordingly. This is stored in the state as the default
// CPU set. // CPU set.
// //
// - RESERVED: A subset of the shared pool which is not exclusively // - RESERVED: A subset of the shared pool which is not exclusively
// allocatable. The membership of this pool is static for the lifetime of // allocatable. The membership of this pool is static for the lifetime of
// the Kubelet. The size of the reserved pool is // the Kubelet. The size of the reserved pool is
// ceil(systemreserved.cpu + kubereserved.cpu). // ceil(systemreserved.cpu + kubereserved.cpu).
// Reserved CPUs are taken topologically starting with lowest-indexed // Reserved CPUs are taken topologically starting with lowest-indexed
// physical core, as reported by cAdvisor. // physical core, as reported by cAdvisor.
// //
// - ASSIGNABLE: Equal to SHARED - RESERVED. Exclusive CPUs are allocated // - ASSIGNABLE: Equal to SHARED - RESERVED. Exclusive CPUs are allocated
// from this pool. // from this pool.
// //
// - EXCLUSIVE ALLOCATIONS: CPU sets assigned exclusively to one container. // - EXCLUSIVE ALLOCATIONS: CPU sets assigned exclusively to one container.
// These are stored as explicit assignments in the state. // These are stored as explicit assignments in the state.
// //
// When an exclusive allocation is made, the static policy also updates the // When an exclusive allocation is made, the static policy also updates the
// default cpuset in the state abstraction. The CPU manager's periodic // default cpuset in the state abstraction. The CPU manager's periodic

View File

@ -40,7 +40,7 @@ const (
defaultNodeAllocatableCgroupName = "kubepods" defaultNodeAllocatableCgroupName = "kubepods"
) )
//createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true // createNodeAllocatableCgroups creates Node Allocatable Cgroup when CgroupsPerQOS flag is specified as true
func (cm *containerManagerImpl) createNodeAllocatableCgroups() error { func (cm *containerManagerImpl) createNodeAllocatableCgroups() error {
nodeAllocatable := cm.internalCapacity nodeAllocatable := cm.internalCapacity
// Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable.

View File

@ -27,7 +27,7 @@ type fakeManager struct {
hint *TopologyHint hint *TopologyHint
} }
//NewFakeManager returns an instance of FakeManager // NewFakeManager returns an instance of FakeManager
func NewFakeManager() Manager { func NewFakeManager() Manager {
klog.InfoS("NewFakeManager") klog.InfoS("NewFakeManager")
return &fakeManager{} return &fakeManager{}

Some files were not shown because too many files have changed in this diff Show More