Bump to latest SMD to pick up performance optimizations

This commit is contained in:
Joe Betz 2020-02-06 15:10:25 -08:00
parent 9617322727
commit d9faaca647
56 changed files with 1949 additions and 688 deletions

2
go.mod
View File

@ -574,7 +574,7 @@ replace (
mvdan.cc/unparam => mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34
rsc.io/pdf => rsc.io/pdf v0.1.1
sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible
sigs.k8s.io/structured-merge-diff/v3 => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd
sigs.k8s.io/structured-merge-diff/v3 => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c
sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.2.0
sourcegraph.com/sqs/pbtypes => sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4
vbom.ml/util => vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc

4
go.sum
View File

@ -621,8 +621,8 @@ mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskX
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd h1:RtLwJkS5lCLrbjdIqQdKiwZ6LrlGuLVdIcVhkUfsaIA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=

View File

@ -100,6 +100,8 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -463,8 +463,8 @@ k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8E
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874 h1:0KsuGbLhWdIxv5DA1OnbFz5hI/Co9kuxMfMUa5YsAHY=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd h1:RtLwJkS5lCLrbjdIqQdKiwZ6LrlGuLVdIcVhkUfsaIA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -33,6 +33,7 @@ require (
gopkg.in/yaml.v2 v2.2.8
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c
sigs.k8s.io/yaml v1.2.0
)

View File

@ -118,6 +118,8 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -634,6 +634,11 @@ func (q Quantity) MarshalJSON() ([]byte, error) {
return result, nil
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (q Quantity) ToUnstructured() interface{} {
return q.String()
}
// UnmarshalJSON implements the json.Unmarshaller interface.
// TODO: Remove support for leading/trailing whitespace
func (q *Quantity) UnmarshalJSON(value []byte) error {

View File

@ -49,6 +49,11 @@ func (d Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.Duration.String())
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (d Duration) ToUnstructured() interface{} {
return d.Duration.String()
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//

View File

@ -153,6 +153,16 @@ func (t Time) MarshalJSON() ([]byte, error) {
return buf, nil
}
// ToUnstructured implements the value.UnstructuredConverter interface.
func (t Time) ToUnstructured() interface{} {
if t.IsZero() {
return nil
}
buf := make([]byte, 0, len(time.RFC3339))
buf = t.UTC().AppendFormat(buf, time.RFC3339)
return string(buf)
}
// OpenAPISchemaType is used by the kube-openapi generator when constructing
// the OpenAPI spec of this type.
//

View File

@ -72,6 +72,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/sigs.k8s.io/structured-merge-diff/v3/value:go_default_library",
],
)

View File

@ -17,7 +17,6 @@ limitations under the License.
package runtime
import (
"bytes"
encodingjson "encoding/json"
"fmt"
"math"
@ -32,6 +31,7 @@ import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/structured-merge-diff/v3/value"
"k8s.io/klog"
)
@ -68,8 +68,6 @@ func newFieldsCache() *fieldsCache {
}
var (
marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem()
unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem()
mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
stringType = reflect.TypeOf(string(""))
fieldCache = newFieldsCache()
@ -205,13 +203,9 @@ func fromUnstructured(sv, dv reflect.Value) error {
}
// Check if the object has a custom JSON marshaller/unmarshaller.
if reflect.PtrTo(dt).Implements(unmarshalerType) {
data, err := json.Marshal(sv.Interface())
if err != nil {
return fmt.Errorf("error encoding %s to json: %v", st.String(), err)
}
unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler)
return unmarshaler.UnmarshalJSON(data)
entry := value.TypeReflectEntryOf(dv.Type())
if entry.CanConvertFromUnstructured() {
return entry.FromUnstructured(sv, dv)
}
switch dt.Kind() {
@ -481,94 +475,19 @@ func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error {
return json.Unmarshal(data, u)
}
var (
nullBytes = []byte("null")
trueBytes = []byte("true")
falseBytes = []byte("false")
)
func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) {
// Check value receivers if v is not a pointer and pointer receivers if v is a pointer
if v.Type().Implements(marshalerType) {
return v.Interface().(encodingjson.Marshaler), true
}
// Check pointer receivers if v is not a pointer
if v.Kind() != reflect.Ptr && v.CanAddr() {
v = v.Addr()
if v.Type().Implements(marshalerType) {
return v.Interface().(encodingjson.Marshaler), true
}
}
return nil, false
}
func toUnstructured(sv, dv reflect.Value) error {
// Check if the object has a custom JSON marshaller/unmarshaller.
if marshaler, ok := getMarshaler(sv); ok {
if sv.Kind() == reflect.Ptr && sv.IsNil() {
// We're done - we don't need to store anything.
return nil
}
data, err := marshaler.MarshalJSON()
// Check if the object has a custom string converter.
entry := value.TypeReflectEntryOf(sv.Type())
if entry.CanConvertToUnstructured() {
v, err := entry.ToUnstructured(sv)
if err != nil {
return err
}
switch {
case len(data) == 0:
return fmt.Errorf("error decoding from json: empty value")
case bytes.Equal(data, nullBytes):
// We're done - we don't need to store anything.
case bytes.Equal(data, trueBytes):
dv.Set(reflect.ValueOf(true))
case bytes.Equal(data, falseBytes):
dv.Set(reflect.ValueOf(false))
case data[0] == '"':
var result string
err := json.Unmarshal(data, &result)
if err != nil {
return fmt.Errorf("error decoding string from json: %v", err)
if v != nil {
dv.Set(reflect.ValueOf(v))
}
dv.Set(reflect.ValueOf(result))
case data[0] == '{':
result := make(map[string]interface{})
err := json.Unmarshal(data, &result)
if err != nil {
return fmt.Errorf("error decoding object from json: %v", err)
}
dv.Set(reflect.ValueOf(result))
case data[0] == '[':
result := make([]interface{}, 0)
err := json.Unmarshal(data, &result)
if err != nil {
return fmt.Errorf("error decoding array from json: %v", err)
}
dv.Set(reflect.ValueOf(result))
default:
var (
resultInt int64
resultFloat float64
err error
)
if err = json.Unmarshal(data, &resultInt); err == nil {
dv.Set(reflect.ValueOf(resultInt))
} else if err = json.Unmarshal(data, &resultFloat); err == nil {
dv.Set(reflect.ValueOf(resultFloat))
} else {
return fmt.Errorf("error decoding number from json: %v", err)
}
}
return nil
}
st := sv.Type()
switch st.Kind() {
case reflect.String:

View File

@ -51,7 +51,7 @@ require (
k8s.io/klog v1.0.0
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c
sigs.k8s.io/yaml v1.2.0
)

View File

@ -372,8 +372,8 @@ k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8E
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874 h1:0KsuGbLhWdIxv5DA1OnbFz5hI/Co9kuxMfMUa5YsAHY=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd h1:RtLwJkS5lCLrbjdIqQdKiwZ6LrlGuLVdIcVhkUfsaIA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -67,9 +67,13 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal:go_default_library",
"//vendor/k8s.io/kube-openapi/pkg/util/proto:go_default_library",
"//vendor/k8s.io/kube-openapi/pkg/util/proto/testing:go_default_library",
"//vendor/sigs.k8s.io/structured-merge-diff/v3/fieldpath:go_default_library",
"//vendor/sigs.k8s.io/structured-merge-diff/v3/merge:go_default_library",
"//vendor/sigs.k8s.io/structured-merge-diff/v3/typed:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
],
)

View File

@ -33,9 +33,15 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager"
"k8s.io/apiserver/pkg/endpoints/handlers/fieldmanager/internal"
"k8s.io/kube-openapi/pkg/util/proto"
prototesting "k8s.io/kube-openapi/pkg/util/proto/testing"
"sigs.k8s.io/structured-merge-diff/v3/fieldpath"
"sigs.k8s.io/structured-merge-diff/v3/merge"
"sigs.k8s.io/structured-merge-diff/v3/typed"
"sigs.k8s.io/yaml"
)
@ -45,9 +51,17 @@ var fakeSchema = prototesting.Fake{
"api", "openapi-spec", "swagger.json"),
}
type fakeObjectConvertor struct{}
type fakeObjectConvertor struct {
converter merge.Converter
apiVersion fieldpath.APIVersion
}
func (c *fakeObjectConvertor) Convert(in, out, context interface{}) error {
if typedValue, ok := in.(*typed.TypedValue); ok {
var err error
out, err = c.converter.Convert(typedValue, c.apiVersion)
return err
}
out = in
return nil
}
@ -71,18 +85,14 @@ type TestFieldManager struct {
}
func NewTestFieldManager(gvk schema.GroupVersionKind) TestFieldManager {
d, err := fakeSchema.OpenAPISchema()
if err != nil {
panic(err)
}
m, err := proto.NewOpenAPIData(d)
if err != nil {
panic(err)
}
m := NewFakeOpenAPIModels()
tc := NewFakeTypeConverter(m)
converter := internal.NewVersionConverter(tc, &fakeObjectConvertor{}, gvk.GroupVersion())
apiVersion := fieldpath.APIVersion(gvk.GroupVersion().String())
f, err := fieldmanager.NewStructuredMergeManager(
m,
&fakeObjectConvertor{},
&fakeObjectConvertor{converter, apiVersion},
&fakeObjectDefaulter{},
gvk.GroupVersion(),
gvk.GroupVersion(),
@ -102,6 +112,26 @@ func NewTestFieldManager(gvk schema.GroupVersionKind) TestFieldManager {
}
}
func NewFakeTypeConverter(m proto.Models) internal.TypeConverter {
tc, err := internal.NewTypeConverter(m, false)
if err != nil {
panic(fmt.Sprintf("Failed to build TypeConverter: %v", err))
}
return tc
}
func NewFakeOpenAPIModels() proto.Models {
d, err := fakeSchema.OpenAPISchema()
if err != nil {
panic(err)
}
m, err := proto.NewOpenAPIData(d)
if err != nil {
panic(err)
}
return m
}
func (f *TestFieldManager) Reset() {
f.liveObj = f.emptyObj.DeepCopyObject()
}
@ -393,16 +423,24 @@ func BenchmarkNewObject(b *testing.B) {
obj: getObjectBytes("endpoints.yaml"),
},
}
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
b.Fatalf("Failed to add to scheme: %v", err)
}
for _, test := range tests {
b.Run(test.gvk.Kind, func(b *testing.B) {
f := NewTestFieldManager(test.gvk)
newObj := &unstructured.Unstructured{Object: map[string]interface{}{}}
if err := yaml.Unmarshal(test.obj, &newObj.Object); err != nil {
decoder := serializer.NewCodecFactory(scheme).UniversalDecoder(test.gvk.GroupVersion())
newObj, err := runtime.Decode(decoder, test.obj)
if err != nil {
b.Fatalf("Failed to parse yaml object: %v", err)
}
newObj.SetManagedFields([]metav1.ManagedFieldsEntry{
objMeta, err := meta.Accessor(newObj)
if err != nil {
b.Fatalf("Failed to get object meta: %v", err)
}
objMeta.SetManagedFields([]metav1.ManagedFieldsEntry{
{
Manager: "default",
Operation: "Update",
@ -463,6 +501,152 @@ func BenchmarkNewObject(b *testing.B) {
}
}
func toUnstructured(b *testing.B, o runtime.Object) *unstructured.Unstructured {
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o)
if err != nil {
b.Fatalf("Failed to unmarshal to json: %v", err)
}
return &unstructured.Unstructured{Object: u}
}
func BenchmarkConvertObjectToTyped(b *testing.B) {
tests := []struct {
gvk schema.GroupVersionKind
obj []byte
}{
{
gvk: schema.FromAPIVersionAndKind("v1", "Pod"),
obj: getObjectBytes("pod.yaml"),
},
{
gvk: schema.FromAPIVersionAndKind("v1", "Node"),
obj: getObjectBytes("node.yaml"),
},
{
gvk: schema.FromAPIVersionAndKind("v1", "Endpoints"),
obj: getObjectBytes("endpoints.yaml"),
},
}
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
b.Fatalf("Failed to add to scheme: %v", err)
}
for _, test := range tests {
b.Run(test.gvk.Kind, func(b *testing.B) {
decoder := serializer.NewCodecFactory(scheme).UniversalDecoder(test.gvk.GroupVersion())
m := NewFakeOpenAPIModels()
typeConverter := NewFakeTypeConverter(m)
structured, err := runtime.Decode(decoder, test.obj)
if err != nil {
b.Fatalf("Failed to parse yaml object: %v", err)
}
b.Run("structured", func(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := typeConverter.ObjectToTyped(structured)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
}
})
})
unstructured := toUnstructured(b, structured)
b.Run("unstructured", func(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err := typeConverter.ObjectToTyped(unstructured)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
}
})
})
})
}
}
func BenchmarkCompare(b *testing.B) {
tests := []struct {
gvk schema.GroupVersionKind
obj []byte
}{
{
gvk: schema.FromAPIVersionAndKind("v1", "Pod"),
obj: getObjectBytes("pod.yaml"),
},
{
gvk: schema.FromAPIVersionAndKind("v1", "Node"),
obj: getObjectBytes("node.yaml"),
},
{
gvk: schema.FromAPIVersionAndKind("v1", "Endpoints"),
obj: getObjectBytes("endpoints.yaml"),
},
}
scheme := runtime.NewScheme()
if err := corev1.AddToScheme(scheme); err != nil {
b.Fatalf("Failed to add to scheme: %v", err)
}
for _, test := range tests {
b.Run(test.gvk.Kind, func(b *testing.B) {
decoder := serializer.NewCodecFactory(scheme).UniversalDecoder(test.gvk.GroupVersion())
m := NewFakeOpenAPIModels()
typeConverter := NewFakeTypeConverter(m)
structured, err := runtime.Decode(decoder, test.obj)
if err != nil {
b.Fatal(err)
}
tv1, err := typeConverter.ObjectToTyped(structured)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
tv2, err := typeConverter.ObjectToTyped(structured)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
b.Run("structured", func(b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
_, err = tv1.Compare(tv2)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
}
})
unstructured := toUnstructured(b, structured)
utv1, err := typeConverter.ObjectToTyped(unstructured)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
utv2, err := typeConverter.ObjectToTyped(unstructured)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
b.Run("unstructured", func(b *testing.B) {
b.ReportAllocs()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, err = utv1.Compare(utv2)
if err != nil {
b.Errorf("Error in ObjectToTyped: %v", err)
}
}
})
})
})
}
}
func BenchmarkRepeatedUpdate(b *testing.B) {
f := NewTestFieldManager(schema.FromAPIVersionAndKind("v1", "Pod"))
podBytes := getObjectBytes("pod.yaml")

View File

@ -49,11 +49,12 @@ var _ TypeConverter = DeducedTypeConverter{}
// ObjectToTyped converts an object into a TypedValue with a "deduced type".
func (DeducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return nil, err
switch o := obj.(type) {
case *unstructured.Unstructured:
return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent())
default:
return typed.DeducedParseableType.FromStructured(obj)
}
return typed.DeducedParseableType.FromUnstructured(u)
}
// TypedToObject transforms the typed value into a runtime.Object. That
@ -80,16 +81,17 @@ func NewTypeConverter(models proto.Models, preserveUnknownFields bool) (TypeConv
}
func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) {
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
return nil, err
}
gvk := obj.GetObjectKind().GroupVersionKind()
t := c.parser.Type(gvk)
if t == nil {
return nil, newNoCorrespondingTypeError(gvk)
}
return t.FromUnstructured(u)
switch o := obj.(type) {
case *unstructured.Unstructured:
return t.FromUnstructured(o.UnstructuredContent())
default:
return t.FromStructured(obj)
}
}
func (c *typeConverter) TypedToObject(value *typed.TypedValue) (runtime.Object, error) {

View File

@ -248,6 +248,8 @@ k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -200,6 +200,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -183,6 +183,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -104,6 +104,8 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -222,6 +222,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -162,6 +162,8 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -393,8 +393,8 @@ k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8E
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874 h1:0KsuGbLhWdIxv5DA1OnbFz5hI/Co9kuxMfMUa5YsAHY=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd h1:RtLwJkS5lCLrbjdIqQdKiwZ6LrlGuLVdIcVhkUfsaIA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -193,6 +193,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -193,6 +193,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -193,6 +193,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -325,6 +325,8 @@ k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -130,6 +130,8 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -368,7 +368,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -212,6 +212,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -390,8 +390,8 @@ k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8E
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874 h1:0KsuGbLhWdIxv5DA1OnbFz5hI/Co9kuxMfMUa5YsAHY=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd h1:RtLwJkS5lCLrbjdIqQdKiwZ6LrlGuLVdIcVhkUfsaIA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -247,6 +247,8 @@ k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl
sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@ -217,6 +217,8 @@ k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc h1:MUttqhwRgupMiA5ps5F3d2/NLkU8EZSECTGxrQxqM54=
k8s.io/utils v0.0.0-20191217005138-9e5e9d854fcc/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c h1:xQP7F7Lntt2dtYmg12WPQHObOrAyPHlMWP1JVSa79GM=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

2
vendor/modules.txt vendored
View File

@ -1967,7 +1967,7 @@ sigs.k8s.io/kustomize/pkg/transformers
sigs.k8s.io/kustomize/pkg/transformers/config
sigs.k8s.io/kustomize/pkg/transformers/config/defaultconfig
sigs.k8s.io/kustomize/pkg/types
# sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200121225636-0fb62c1057dd
# sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c => sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200207200219-5e70324e7c1c
sigs.k8s.io/structured-merge-diff/v3/fieldpath
sigs.k8s.io/structured-merge-diff/v3/merge
sigs.k8s.io/structured-merge-diff/v3/schema

View File

@ -27,6 +27,7 @@ func SetFromValue(v value.Value) *Set {
w := objectWalker{
path: Path{},
value: v,
allocator: value.NewFreelistAllocator(),
do: func(p Path) { s.Insert(p) },
}
@ -37,6 +38,7 @@ func SetFromValue(v value.Value) *Set {
type objectWalker struct {
path Path
value value.Value
allocator value.Allocator
do func(Path)
}
@ -55,11 +57,15 @@ func (w *objectWalker) walk() {
case w.value.IsList():
// If the list were atomic, we'd break here, but we don't have
// a schema, so we can't tell.
list := w.value.AsList()
for i := 0; i < list.Length(); i++ {
l := w.value.AsListUsing(w.allocator)
defer w.allocator.Free(l)
iter := l.RangeUsing(w.allocator)
defer w.allocator.Free(iter)
for iter.Next() {
i, value := iter.Item()
w2 := *w
w2.path = append(w.path, GuessBestListPathElement(i, list.At(i)))
w2.value = list.At(i)
w2.path = append(w.path, w.GuessBestListPathElement(i, value))
w2.value = value
w2.walk()
}
return
@ -67,7 +73,9 @@ func (w *objectWalker) walk() {
// If the map/struct were atomic, we'd break here, but we don't
// have a schema, so we can't tell.
w.value.AsMap().Iterate(func(k string, val value.Value) bool {
m := w.value.AsMapUsing(w.allocator)
defer w.allocator.Free(m)
m.IterateUsing(w.allocator, func(k string, val value.Value) bool {
w2 := *w
w2.path = append(w.path, PathElement{FieldName: &k})
w2.value = val
@ -96,7 +104,7 @@ var AssociativeListCandidateFieldNames = []string{
// referencing by index is acceptable. Currently this is done by checking
// whether item has any of the fields listed in
// AssociativeListCandidateFieldNames which have scalar values.
func GuessBestListPathElement(index int, item value.Value) PathElement {
func (w *objectWalker) GuessBestListPathElement(index int, item value.Value) PathElement {
if !item.IsMap() {
// Non map items could be parts of sets or regular "atomic"
// lists. We won't try to guess whether something should be a
@ -104,9 +112,11 @@ func GuessBestListPathElement(index int, item value.Value) PathElement {
return PathElement{Index: &index}
}
m := item.AsMapUsing(w.allocator)
defer w.allocator.Free(m)
var keys value.FieldList
for _, name := range AssociativeListCandidateFieldNames {
f, ok := item.AsMap().Get(name)
f, ok := m.Get(name)
if !ok {
continue
}

View File

@ -40,7 +40,7 @@ func (s *Set) ToJSONStream(w io.Writer) error {
var r reusableBuilder
stream.WriteObjectStart()
err := s.emitContents_v1(false, stream, &r)
err := s.emitContentsV1(false, stream, &r)
if err != nil {
return err
}
@ -76,7 +76,7 @@ func (r *reusableBuilder) reset() *bytes.Buffer {
return &r.Buffer
}
func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error {
func (s *Set) emitContentsV1(includeSelf bool, stream *jsoniter.Stream, r *reusableBuilder) error {
mi, ci := 0, 0
first := true
preWrite := func() {
@ -97,7 +97,7 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus
mpe := s.Members.members[mi]
cpe := s.Children.members[ci].pathElement
if mpe.Less(cpe) {
if c := mpe.Compare(cpe); c < 0 {
preWrite()
if err := serializePathElementToWriter(r.reset(), mpe); err != nil {
return err
@ -105,14 +105,14 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus
stream.WriteObjectField(r.unsafeString())
stream.WriteEmptyObject()
mi++
} else if cpe.Less(mpe) {
} else if c > 0 {
preWrite()
if err := serializePathElementToWriter(r.reset(), cpe); err != nil {
return err
}
stream.WriteObjectField(r.unsafeString())
stream.WriteObjectStart()
if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil {
if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil {
return err
}
stream.WriteObjectEnd()
@ -124,7 +124,7 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus
}
stream.WriteObjectField(r.unsafeString())
stream.WriteObjectStart()
if err := s.Children.members[ci].set.emitContents_v1(true, stream, r); err != nil {
if err := s.Children.members[ci].set.emitContentsV1(true, stream, r); err != nil {
return err
}
stream.WriteObjectEnd()
@ -154,7 +154,7 @@ func (s *Set) emitContents_v1(includeSelf bool, stream *jsoniter.Stream, r *reus
}
stream.WriteObjectField(r.unsafeString())
stream.WriteObjectStart()
if err := s.Children.members[ci].set.emitContents_v1(false, stream, r); err != nil {
if err := s.Children.members[ci].set.emitContentsV1(false, stream, r); err != nil {
return err
}
stream.WriteObjectEnd()
@ -169,7 +169,7 @@ func (s *Set) FromJSON(r io.Reader) error {
// The iterator pool is completely useless for memory management, grrr.
iter := jsoniter.Parse(jsoniter.ConfigCompatibleWithStandardLibrary, r, 4096)
found, _ := readIter_v1(iter)
found, _ := readIterV1(iter)
if found == nil {
*s = Set{}
} else {
@ -180,7 +180,7 @@ func (s *Set) FromJSON(r io.Reader) error {
// returns true if this subtree is also (or only) a member of parent; s is nil
// if there are no further children.
func readIter_v1(iter *jsoniter.Iterator) (children *Set, isMember bool) {
func readIterV1(iter *jsoniter.Iterator) (children *Set, isMember bool) {
iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool {
if key == "." {
isMember = true
@ -199,7 +199,7 @@ func readIter_v1(iter *jsoniter.Iterator) (children *Set, isMember bool) {
iter.Skip()
return true
}
grandchildren, childIsMember := readIter_v1(iter)
grandchildren, childIsMember := readIterV1(iter)
if childIsMember {
if children == nil {
children = &Set{}

View File

@ -154,7 +154,7 @@ func handleAtom(a schema.Atom, tr schema.TypeRef, ah atomHandler) ValidationErro
}
// Returns the list, or an error. Reminder: nil is a valid list and might be returned.
func listValue(val value.Value) (value.List, error) {
func listValue(a value.Allocator, val value.Value) (value.List, error) {
if val.IsNull() {
// Null is a valid list.
return nil, nil
@ -162,11 +162,11 @@ func listValue(val value.Value) (value.List, error) {
if !val.IsList() {
return nil, fmt.Errorf("expected list, got %v", val)
}
return val.AsList(), nil
return val.AsListUsing(a), nil
}
// Returns the map, or an error. Reminder: nil is a valid map and might be returned.
func mapValue(val value.Value) (value.Map, error) {
func mapValue(a value.Allocator, val value.Value) (value.Map, error) {
if val == nil {
return nil, fmt.Errorf("expected map, got nil")
}
@ -177,10 +177,10 @@ func mapValue(val value.Value) (value.Map, error) {
if !val.IsMap() {
return nil, fmt.Errorf("expected map, got %v", val)
}
return val.AsMap(), nil
return val.AsMapUsing(a), nil
}
func keyedAssociativeListItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
func keyedAssociativeListItemToPathElement(a value.Allocator, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
pe := fieldpath.PathElement{}
if child.IsNull() {
// For now, the keys are required which means that null entries
@ -191,8 +191,10 @@ func keyedAssociativeListItemToPathElement(list *schema.List, index int, child v
return pe, errors.New("associative list with keys may not have non-map elements")
}
keyMap := value.FieldList{}
m := child.AsMapUsing(a)
defer a.Free(m)
for _, fieldName := range list.Keys {
if val, ok := child.AsMap().Get(fieldName); ok {
if val, ok := m.Get(fieldName); ok {
keyMap = append(keyMap, value.Field{Name: fieldName, Value: val})
} else {
return pe, fmt.Errorf("associative list with keys has an element that omits key field %q", fieldName)
@ -223,10 +225,10 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel
}
}
func listItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
func listItemToPathElement(a value.Allocator, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) {
if list.ElementRelationship == schema.Associative {
if len(list.Keys) > 0 {
return keyedAssociativeListItemToPathElement(list, index, child)
return keyedAssociativeListItemToPathElement(a, list, index, child)
}
// If there's no keys, then we must be a set of primitives.

View File

@ -48,6 +48,8 @@ type mergingWalker struct {
// Allocate only as many walkers as needed for the depth by storing them here.
spareWalkers *[]*mergingWalker
allocator value.Allocator
}
// merge rules examine w.lhs and w.rhs (up to one of which may be nil) and
@ -148,18 +150,15 @@ func (w *mergingWalker) finishDescent(w2 *mergingWalker) {
*w.spareWalkers = append(*w.spareWalkers, w2)
}
func (w *mergingWalker) derefMap(prefix string, v value.Value, dest *value.Map) (errs ValidationErrors) {
// taking dest as input so that it can be called as a one-liner with
// append.
func (w *mergingWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) {
if v == nil {
return nil
return nil, nil
}
m, err := mapValue(v)
m, err := mapValue(w.allocator, v)
if err != nil {
return errorf("%v: %v", prefix, err)
return nil, errorf("%v: %v", prefix, err)
}
*dest = m
return nil
return m, nil
}
func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) {
@ -184,7 +183,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
if rhs != nil {
for i := 0; i < rhs.Length(); i++ {
child := rhs.At(i)
pe, err := listItemToPathElement(t, i, child)
pe, err := listItemToPathElement(w.allocator, t, i, child)
if err != nil {
errs = append(errs, errorf("rhs: element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
@ -205,7 +204,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
if lhs != nil {
for i := 0; i < lhs.Length(); i++ {
child := lhs.At(i)
pe, err := listItemToPathElement(t, i, child)
pe, err := listItemToPathElement(w.allocator, t, i, child)
if err != nil {
errs = append(errs, errorf("lhs: element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
@ -253,24 +252,26 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err
return errs
}
func (w *mergingWalker) derefList(prefix string, v value.Value, dest *value.List) (errs ValidationErrors) {
// taking dest as input so that it can be called as a one-liner with
// append.
func (w *mergingWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) {
if v == nil {
return nil
return nil, nil
}
l, err := listValue(v)
l, err := listValue(w.allocator, v)
if err != nil {
return errorf("%v: %v", prefix, err)
return nil, errorf("%v: %v", prefix, err)
}
*dest = l
return nil
return l, nil
}
func (w *mergingWalker) doList(t *schema.List) (errs ValidationErrors) {
var lhs, rhs value.List
w.derefList("lhs: ", w.lhs, &lhs)
w.derefList("rhs: ", w.rhs, &rhs)
lhs, _ := w.derefList("lhs: ", w.lhs)
if lhs != nil {
defer w.allocator.Free(lhs)
}
rhs, _ := w.derefList("rhs: ", w.rhs)
if rhs != nil {
defer w.allocator.Free(rhs)
}
// If both lhs and rhs are empty/null, treat it as a
// leaf: this helps preserve the empty/null
@ -311,31 +312,10 @@ func (w *mergingWalker) visitMapItem(t *schema.Map, out map[string]interface{},
func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) {
out := map[string]interface{}{}
if lhs != nil {
lhs.Iterate(func(key string, val value.Value) bool {
var rval value.Value
if rhs != nil {
if item, ok := rhs.Get(key); ok {
rval = item
defer rval.Recycle()
}
}
errs = append(errs, w.visitMapItem(t, out, key, val, rval)...)
value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool {
errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...)
return true
})
}
if rhs != nil {
rhs.Iterate(func(key string, val value.Value) bool {
if lhs != nil {
if lhs.Has(key) {
return true
}
}
errs = append(errs, w.visitMapItem(t, out, key, nil, val)...)
return true
})
}
if len(out) > 0 {
i := interface{}(out)
w.out = &i
@ -345,14 +325,18 @@ func (w *mergingWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs V
}
func (w *mergingWalker) doMap(t *schema.Map) (errs ValidationErrors) {
var lhs, rhs value.Map
w.derefMap("lhs: ", w.lhs, &lhs)
w.derefMap("rhs: ", w.rhs, &rhs)
lhs, _ := w.derefMap("lhs: ", w.lhs)
if lhs != nil {
defer w.allocator.Free(lhs)
}
rhs, _ := w.derefMap("rhs: ", w.rhs)
if rhs != nil {
defer w.allocator.Free(rhs)
}
// If both lhs and rhs are empty/null, treat it as a
// leaf: this helps preserve the empty/null
// distinction.
emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0)
emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty())
if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf {
w.doLeaf()

View File

@ -24,6 +24,7 @@ type removingWalker struct {
out interface{}
schema *schema.Schema
toRemove *fieldpath.Set
allocator value.Allocator
}
func removeItemsWithSchema(val value.Value, toRemove *fieldpath.Set, schema *schema.Schema, typeRef schema.TypeRef) value.Value {
@ -31,6 +32,7 @@ func removeItemsWithSchema(val value.Value, toRemove *fieldpath.Set, schema *sch
value: val,
schema: schema,
toRemove: toRemove,
allocator: value.NewFreelistAllocator(),
}
resolveSchema(schema, typeRef, val, w)
return value.NewValueInterface(w.out)
@ -42,18 +44,20 @@ func (w *removingWalker) doScalar(t *schema.Scalar) ValidationErrors {
}
func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) {
l := w.value.AsList()
l := w.value.AsListUsing(w.allocator)
defer w.allocator.Free(l)
// If list is null, empty, or atomic just return
if l == nil || l.Length() == 0 || t.ElementRelationship == schema.Atomic {
return nil
}
var newItems []interface{}
for i := 0; i < l.Length(); i++ {
item := l.At(i)
iter := l.RangeUsing(w.allocator)
defer w.allocator.Free(iter)
for iter.Next() {
i, item := iter.Item()
// Ignore error because we have already validated this list
pe, _ := listItemToPathElement(t, i, item)
pe, _ := listItemToPathElement(w.allocator, t, i, item)
path, _ := fieldpath.MakePath(pe)
if w.toRemove.Has(path) {
continue
@ -70,10 +74,12 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) {
}
func (w *removingWalker) doMap(t *schema.Map) ValidationErrors {
m := w.value.AsMap()
m := w.value.AsMapUsing(w.allocator)
if m != nil {
defer w.allocator.Free(m)
}
// If map is null, empty, or atomic just return
if m == nil || m.Length() == 0 || t.ElementRelationship == schema.Atomic {
if m == nil || m.Empty() || t.ElementRelationship == schema.Atomic {
return nil
}

View File

@ -34,6 +34,7 @@ func (tv TypedValue) toFieldSetWalker() *toFieldSetWalker {
v.schema = tv.schema
v.typeRef = tv.typeRef
v.set = &fieldpath.Set{}
v.allocator = value.NewFreelistAllocator()
return v
}
@ -55,6 +56,7 @@ type toFieldSetWalker struct {
// Allocate only as many walkers as needed for the depth by storing them here.
spareWalkers *[]*toFieldSetWalker
allocator value.Allocator
}
func (v *toFieldSetWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef) *toFieldSetWalker {
@ -94,7 +96,7 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors {
func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) {
for i := 0; i < list.Length(); i++ {
child := list.At(i)
pe, _ := listItemToPathElement(t, i, child)
pe, _ := listItemToPathElement(v.allocator, t, i, child)
v2 := v.prepareDescent(pe, t.ElementType)
v2.value = child
errs = append(errs, v2.toFieldSet()...)
@ -106,8 +108,10 @@ func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs
}
func (v *toFieldSetWalker) doList(t *schema.List) (errs ValidationErrors) {
list, _ := listValue(v.value)
list, _ := listValue(v.allocator, v.value)
if list != nil {
defer v.allocator.Free(list)
}
if t.ElementRelationship == schema.Atomic {
v.set.Insert(v.path)
return nil
@ -143,8 +147,10 @@ func (v *toFieldSetWalker) visitMapItems(t *schema.Map, m value.Map) (errs Valid
}
func (v *toFieldSetWalker) doMap(t *schema.Map) (errs ValidationErrors) {
m, _ := mapValue(v.value)
m, _ := mapValue(v.allocator, v.value)
if m != nil {
defer v.allocator.Free(m)
}
if t.ElementRelationship == schema.Atomic {
v.set.Insert(v.path)
return nil

View File

@ -238,6 +238,9 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error)
mw.typeRef = lhs.typeRef
mw.rule = rule
mw.postItemHook = postRule
if mw.allocator == nil {
mw.allocator = value.NewFreelistAllocator()
}
errs := mw.merge(nil)
if len(errs) > 0 {

View File

@ -33,6 +33,9 @@ func (tv TypedValue) walker() *validatingObjectWalker {
v.value = tv.value
v.schema = tv.schema
v.typeRef = tv.typeRef
if v.allocator == nil {
v.allocator = value.NewFreelistAllocator()
}
return v
}
@ -49,6 +52,7 @@ type validatingObjectWalker struct {
// Allocate only as many walkers as needed for the depth by storing them here.
spareWalkers *[]*validatingObjectWalker
allocator value.Allocator
}
func (v *validatingObjectWalker) prepareDescent(tr schema.TypeRef) *validatingObjectWalker {
@ -112,13 +116,14 @@ func (v *validatingObjectWalker) doScalar(t *schema.Scalar) ValidationErrors {
func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) {
observedKeys := fieldpath.MakePathElementSet(list.Length())
for i := 0; i < list.Length(); i++ {
child := list.At(i)
child := list.AtUsing(v.allocator, i)
defer v.allocator.Free(child)
var pe fieldpath.PathElement
if t.ElementRelationship != schema.Associative {
pe.Index = &i
} else {
var err error
pe, err = listItemToPathElement(t, i, child)
pe, err = listItemToPathElement(v.allocator, t, i, child)
if err != nil {
errs = append(errs, errorf("element %v: %v", i, err.Error())...)
// If we can't construct the path element, we can't
@ -140,7 +145,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List)
}
func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors) {
list, err := listValue(v.value)
list, err := listValue(v.allocator, v.value)
if err != nil {
return errorf(err.Error())
}
@ -149,13 +154,14 @@ func (v *validatingObjectWalker) doList(t *schema.List) (errs ValidationErrors)
return nil
}
defer v.allocator.Free(list)
errs = v.visitListItems(t, list)
return errs
}
func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs ValidationErrors) {
m.Iterate(func(key string, val value.Value) bool {
m.IterateUsing(v.allocator, func(key string, val value.Value) bool {
pe := fieldpath.PathElement{FieldName: &key}
tr := t.ElementType
if sf, ok := t.FindField(key); ok {
@ -175,15 +181,14 @@ func (v *validatingObjectWalker) visitMapItems(t *schema.Map, m value.Map) (errs
}
func (v *validatingObjectWalker) doMap(t *schema.Map) (errs ValidationErrors) {
m, err := mapValue(v.value)
m, err := mapValue(v.allocator, v.value)
if err != nil {
return errorf(err.Error())
}
if m == nil {
return nil
}
defer v.allocator.Free(m)
errs = v.visitMapItems(t, m)
return errs

View File

@ -3,6 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"allocator.go",
"doc.go",
"fields.go",
"jsontagutil.go",
@ -12,6 +13,7 @@ go_library(
"map.go",
"mapreflect.go",
"mapunstructured.go",
"reflectcache.go",
"scalar.go",
"structreflect.go",
"value.go",

View File

@ -0,0 +1,203 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package value
// Allocator provides a value object allocation strategy.
// Value objects can be allocated by passing an allocator to the "Using"
// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...).
// Value objects returned from "Using" functions should be given back to the allocator
// once longer needed by calling Allocator.Free(Value).
type Allocator interface {
// Free gives the allocator back any value objects returned by the "Using"
// receiver functions on the value interfaces.
// interface{} may be any of: Value, Map, List or Range.
Free(interface{})
// The unexported functions are for "Using" receiver functions of the value types
// to request what they need from the allocator.
allocValueUnstructured() *valueUnstructured
allocListUnstructuredRange() *listUnstructuredRange
allocValueReflect() *valueReflect
allocMapReflect() *mapReflect
allocStructReflect() *structReflect
allocListReflect() *listReflect
allocListReflectRange() *listReflectRange
}
// HeapAllocator simply allocates objects to the heap. It is the default
// allocator used receiver functions on the value interfaces that do not accept
// an allocator and should be used whenever allocating objects that will not
// be given back to an allocator by calling Allocator.Free(Value).
var HeapAllocator = &heapAllocator{}
type heapAllocator struct{}
func (p *heapAllocator) allocValueUnstructured() *valueUnstructured {
return &valueUnstructured{}
}
func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange {
return &listUnstructuredRange{vv: &valueUnstructured{}}
}
func (p *heapAllocator) allocValueReflect() *valueReflect {
return &valueReflect{}
}
func (p *heapAllocator) allocStructReflect() *structReflect {
return &structReflect{}
}
func (p *heapAllocator) allocMapReflect() *mapReflect {
return &mapReflect{}
}
func (p *heapAllocator) allocListReflect() *listReflect {
return &listReflect{}
}
func (p *heapAllocator) allocListReflectRange() *listReflectRange {
return &listReflectRange{vr: &valueReflect{}}
}
func (p *heapAllocator) Free(_ interface{}) {}
// NewFreelistAllocator creates freelist based allocator.
// This allocator provides fast allocation and freeing of short lived value objects.
//
// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is
// allocated at once, the excess will be returned to the heap for garbage collection when freed.
//
// This allocator is unsafe and must not be accessed concurrently by goroutines.
//
// This allocator works well for traversal of value data trees. Typical usage is to acquire
// a freelist at the beginning of the traversal and use it through out
// for all temporary value access.
func NewFreelistAllocator() Allocator {
return &freelistAllocator{
valueUnstructured: &freelist{new: func() interface{} {
return &valueUnstructured{}
}},
listUnstructuredRange: &freelist{new: func() interface{} {
return &listUnstructuredRange{vv: &valueUnstructured{}}
}},
valueReflect: &freelist{new: func() interface{} {
return &valueReflect{}
}},
mapReflect: &freelist{new: func() interface{} {
return &mapReflect{}
}},
structReflect: &freelist{new: func() interface{} {
return &structReflect{}
}},
listReflect: &freelist{new: func() interface{} {
return &listReflect{}
}},
listReflectRange: &freelist{new: func() interface{} {
return &listReflectRange{vr: &valueReflect{}}
}},
}
}
// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory.
// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects
// that don't fit into the freelist are orphaned on the heap to be garbage collected.
const freelistMaxSize = 1000
type freelistAllocator struct {
valueUnstructured *freelist
listUnstructuredRange *freelist
valueReflect *freelist
mapReflect *freelist
structReflect *freelist
listReflect *freelist
listReflectRange *freelist
}
type freelist struct {
list []interface{}
new func() interface{}
}
func (f *freelist) allocate() interface{} {
var w2 interface{}
if n := len(f.list); n > 0 {
w2, f.list = f.list[n-1], f.list[:n-1]
} else {
w2 = f.new()
}
return w2
}
func (f *freelist) free(v interface{}) {
if len(f.list) < freelistMaxSize {
f.list = append(f.list, v)
}
}
func (w *freelistAllocator) Free(value interface{}) {
switch v := value.(type) {
case *valueUnstructured:
v.Value = nil // don't hold references to unstructured objects
w.valueUnstructured.free(v)
case *listUnstructuredRange:
v.vv.Value = nil // don't hold references to unstructured objects
w.listUnstructuredRange.free(v)
case *valueReflect:
v.ParentMapKey = nil
v.ParentMap = nil
w.valueReflect.free(v)
case *mapReflect:
w.mapReflect.free(v)
case *structReflect:
w.structReflect.free(v)
case *listReflect:
w.listReflect.free(v)
case *listReflectRange:
v.vr.ParentMapKey = nil
v.vr.ParentMap = nil
w.listReflectRange.free(v)
}
}
func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured {
return w.valueUnstructured.allocate().(*valueUnstructured)
}
func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange {
return w.listUnstructuredRange.allocate().(*listUnstructuredRange)
}
func (w *freelistAllocator) allocValueReflect() *valueReflect {
return w.valueReflect.allocate().(*valueReflect)
}
func (w *freelistAllocator) allocStructReflect() *structReflect {
return w.structReflect.allocate().(*structReflect)
}
func (w *freelistAllocator) allocMapReflect() *mapReflect {
return w.mapReflect.allocate().(*mapReflect)
}
func (w *freelistAllocator) allocListReflect() *listReflect {
return w.listReflect.allocate().(*listReflect)
}
func (w *freelistAllocator) allocListReflectRange() *listReflectRange {
return w.listReflectRange.allocate().(*listReflectRange)
}

View File

@ -27,7 +27,7 @@ type Field struct {
Value Value
}
// FieldList is a list of key-value pairs. Each field is expectUpdated to
// FieldList is a list of key-value pairs. Each field is expected to
// have a different name.
type FieldList []Field

View File

@ -23,24 +23,74 @@ type List interface {
// At returns the item at the given position in the map. It will
// panic if the index is out of range.
At(int) Value
// AtUsing uses the provided allocator and returns the item at the given
// position in the map. It will panic if the index is out of range.
// The returned Value should be given back to the Allocator when no longer needed
// by calling Allocator.Free(Value).
AtUsing(Allocator, int) Value
// Range returns a ListRange for iterating over the items in the list.
Range() ListRange
// RangeUsing uses the provided allocator and returns a ListRange for
// iterating over the items in the list.
// The returned Range should be given back to the Allocator when no longer needed
// by calling Allocator.Free(Value).
RangeUsing(Allocator) ListRange
// Equals compares the two lists, and return true if they are the same, false otherwise.
// Implementations can use ListEquals as a general implementation for this methods.
Equals(List) bool
// EqualsUsing uses the provided allocator and compares the two lists, and return true if
// they are the same, false otherwise. Implementations can use ListEqualsUsing as a general
// implementation for this methods.
EqualsUsing(Allocator, List) bool
}
// ListRange represents a single iteration across the items of a list.
type ListRange interface {
// Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items.
Next() bool
// Item returns the index and value of the current item in the range. or panics if there is no current item.
// For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding
// pointers to the value returned by Item() that escape the iteration loop since they become invalid once either
// Item() or Allocator.Free() is called.
Item() (index int, value Value)
}
var EmptyRange = &emptyRange{}
type emptyRange struct{}
func (_ *emptyRange) Next() bool {
return false
}
func (_ *emptyRange) Item() (index int, value Value) {
panic("Item called on empty ListRange")
}
// ListEquals compares two lists lexically.
// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient.
func ListEquals(lhs, rhs List) bool {
return ListEqualsUsing(HeapAllocator, lhs, rhs)
}
// ListEqualsUsing uses the provided allocator and compares two lists lexically.
// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient.
func ListEqualsUsing(a Allocator, lhs, rhs List) bool {
if lhs.Length() != rhs.Length() {
return false
}
for i := 0; i < lhs.Length(); i++ {
lv := lhs.At(i)
rv := rhs.At(i)
if !Equals(lv, rv) {
lv.Recycle()
rv.Recycle()
lhsRange := lhs.RangeUsing(a)
defer a.Free(lhsRange)
rhsRange := rhs.RangeUsing(a)
defer a.Free(rhsRange)
for lhsRange.Next() && rhsRange.Next() {
_, lv := lhsRange.Item()
_, rv := rhsRange.Item()
if !EqualsUsing(a, lv, rv) {
return false
}
lv.Recycle()
rv.Recycle()
}
return true
}
@ -53,28 +103,37 @@ func ListLess(lhs, rhs List) bool {
// ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1
// if l < rhs, and +1 if l > rhs.
func ListCompare(lhs, rhs List) int {
i := 0
return ListCompareUsing(HeapAllocator, lhs, rhs)
}
// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1
// if l < rhs, and +1 if l > rhs.
func ListCompareUsing(a Allocator, lhs, rhs List) int {
lhsRange := lhs.RangeUsing(a)
defer a.Free(lhsRange)
rhsRange := rhs.RangeUsing(a)
defer a.Free(rhsRange)
for {
if i >= lhs.Length() && i >= rhs.Length() {
lhsOk := lhsRange.Next()
rhsOk := rhsRange.Next()
if !lhsOk && !rhsOk {
// Lists are the same length and all items are equal.
return 0
}
if i >= lhs.Length() {
if !lhsOk {
// LHS is shorter.
return -1
}
if i >= rhs.Length() {
if !rhsOk {
// RHS is shorter.
return 1
}
lv := lhs.At(i)
rv := rhs.At(i)
if c := Compare(lv, rv); c != 0 {
_, lv := lhsRange.Item()
_, rv := rhsRange.Item()
if c := CompareUsing(a, lv, rv); c != 0 {
return c
}
lv.Recycle()
rv.Recycle()
// The items are equal; continue.
i++
}
}

View File

@ -16,7 +16,9 @@ limitations under the License.
package value
import "reflect"
import (
"reflect"
)
type listReflect struct {
Value reflect.Value
@ -29,7 +31,12 @@ func (r listReflect) Length() int {
func (r listReflect) At(i int) Value {
val := r.Value
return mustWrapValueReflect(val.Index(i))
return mustWrapValueReflect(val.Index(i), nil, nil)
}
func (r listReflect) AtUsing(a Allocator, i int) Value {
val := r.Value
return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil)
}
func (r listReflect) Unstructured() interface{} {
@ -40,3 +47,52 @@ func (r listReflect) Unstructured() interface{} {
}
return result
}
func (r listReflect) Range() ListRange {
return r.RangeUsing(HeapAllocator)
}
func (r listReflect) RangeUsing(a Allocator) ListRange {
length := r.Value.Len()
if length == 0 {
return EmptyRange
}
rr := a.allocListReflectRange()
rr.list = r.Value
rr.i = -1
rr.entry = TypeReflectEntryOf(r.Value.Type().Elem())
return rr
}
func (r listReflect) Equals(other List) bool {
return r.EqualsUsing(HeapAllocator, other)
}
func (r listReflect) EqualsUsing(a Allocator, other List) bool {
if otherReflectList, ok := other.(*listReflect); ok {
return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface())
}
return ListEqualsUsing(a, &r, other)
}
type listReflectRange struct {
list reflect.Value
vr *valueReflect
i int
entry *TypeReflectCacheEntry
}
func (r *listReflectRange) Next() bool {
r.i += 1
return r.i < r.list.Len()
}
func (r *listReflectRange) Item() (index int, value Value) {
if r.i < 0 {
panic("Item() called before first calling Next()")
}
if r.i >= r.list.Len() {
panic("Item() called on ListRange with no more items")
}
v := r.list.Index(r.i)
return r.i, r.vr.mustReuse(v, r.entry, nil, nil)
}

View File

@ -25,3 +25,50 @@ func (l listUnstructured) Length() int {
func (l listUnstructured) At(i int) Value {
return NewValueInterface(l[i])
}
func (l listUnstructured) AtUsing(a Allocator, i int) Value {
return a.allocValueUnstructured().reuse(l[i])
}
func (l listUnstructured) Equals(other List) bool {
return l.EqualsUsing(HeapAllocator, other)
}
func (l listUnstructured) EqualsUsing(a Allocator, other List) bool {
return ListEqualsUsing(a, &l, other)
}
func (l listUnstructured) Range() ListRange {
return l.RangeUsing(HeapAllocator)
}
func (l listUnstructured) RangeUsing(a Allocator) ListRange {
if len(l) == 0 {
return EmptyRange
}
r := a.allocListUnstructuredRange()
r.list = l
r.i = -1
return r
}
type listUnstructuredRange struct {
list listUnstructured
vv *valueUnstructured
i int
}
func (r *listUnstructuredRange) Next() bool {
r.i += 1
return r.i < len(r.list)
}
func (r *listUnstructuredRange) Item() (index int, value Value) {
if r.i < 0 {
panic("Item() called before first calling Next()")
}
if r.i >= len(r.list) {
panic("Item() called on ListRange with no more items")
}
return r.i, r.vv.reuse(r.list[r.i])
}

View File

@ -18,7 +18,6 @@ package value
import (
"sort"
"strings"
)
// Map represents a Map or go structure.
@ -27,6 +26,11 @@ type Map interface {
Set(key string, val Value)
// Get returns the value for the given key, if present, or (nil, false) otherwise.
Get(key string) (Value, bool)
// GetUsing uses the provided allocator and returns the value for the given key,
// if present, or (nil, false) otherwise.
// The returned Value should be given back to the Allocator when no longer needed
// by calling Allocator.Free(Value).
GetUsing(a Allocator, key string) (Value, bool)
// Has returns true if the key is present, or false otherwise.
Has(key string) bool
// Delete removes the key from the map.
@ -34,12 +38,162 @@ type Map interface {
// Equals compares the two maps, and return true if they are the same, false otherwise.
// Implementations can use MapEquals as a general implementation for this methods.
Equals(other Map) bool
// EqualsUsing uses the provided allocator and compares the two maps, and return true if
// they are the same, false otherwise. Implementations can use MapEqualsUsing as a general
// implementation for this methods.
EqualsUsing(a Allocator, other Map) bool
// Iterate runs the given function for each key/value in the
// map. Returning false in the closure prematurely stops the
// iteration.
Iterate(func(key string, value Value) bool) bool
// IterateUsing uses the provided allocator and runs the given function for each key/value
// in the map. Returning false in the closure prematurely stops the iteration.
IterateUsing(Allocator, func(key string, value Value) bool) bool
// Length returns the number of items in the map.
Length() int
// Empty returns true if the map is empty.
Empty() bool
// Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called
// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil
// for the map that does not contain the key. Returning false in the closure prematurely stops the iteration.
Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool
// ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps
// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with
// the value of the map that contains the key and nil for the map that does not contain the key. Returning
// false in the closure prematurely stops the iteration.
ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool
}
// MapTraverseOrder defines the map traversal ordering available.
type MapTraverseOrder int
const (
// Unordered indicates that the map traversal has no ordering requirement.
Unordered = iota
// LexicalKeyOrder indicates that the map traversal is ordered by key, lexically.
LexicalKeyOrder
)
// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called
// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil
// for the other map. Returning false in the closure prematurely stops the iteration.
func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
return MapZipUsing(HeapAllocator, lhs, rhs, order, fn)
}
// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps
// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with
// the value of the map that contains the key and nil for the other map. Returning false in the closure
// prematurely stops the iteration.
func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
if lhs != nil {
return lhs.ZipUsing(a, rhs, order, fn)
}
if rhs != nil {
return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped
return fn(key, lhs, rhs)
})
}
return true
}
// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide
// their own optimized implementation.
func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
switch order {
case Unordered:
return unorderedMapZip(a, lhs, rhs, fn)
case LexicalKeyOrder:
return lexicalKeyOrderedMapZip(a, lhs, rhs, fn)
default:
panic("Unsupported map order")
}
}
func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool {
if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) {
return true
}
if lhs != nil {
ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool {
var rhsValue Value
if rhs != nil {
if item, ok := rhs.GetUsing(a, key); ok {
rhsValue = item
defer a.Free(rhsValue)
}
}
return fn(key, lhsValue, rhsValue)
})
if !ok {
return false
}
}
if rhs != nil {
return rhs.IterateUsing(a, func(key string, rhsValue Value) bool {
if lhs == nil || !lhs.Has(key) {
return fn(key, nil, rhsValue)
}
return true
})
}
return true
}
func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool {
var lhsLength, rhsLength int
var orderedLength int // rough estimate of length of union of map keys
if lhs != nil {
lhsLength = lhs.Length()
orderedLength = lhsLength
}
if rhs != nil {
rhsLength = rhs.Length()
if rhsLength > orderedLength {
orderedLength = rhsLength
}
}
if lhsLength == 0 && rhsLength == 0 {
return true
}
ordered := make([]string, 0, orderedLength)
if lhs != nil {
lhs.IterateUsing(a, func(key string, _ Value) bool {
ordered = append(ordered, key)
return true
})
}
if rhs != nil {
rhs.IterateUsing(a, func(key string, _ Value) bool {
if lhs == nil || !lhs.Has(key) {
ordered = append(ordered, key)
}
return true
})
}
sort.Strings(ordered)
for _, key := range ordered {
var litem, ritem Value
if lhs != nil {
litem, _ = lhs.GetUsing(a, key)
}
if rhs != nil {
ritem, _ = rhs.GetUsing(a, key)
}
ok := fn(key, litem, ritem)
if litem != nil {
a.Free(litem)
}
if ritem != nil {
a.Free(ritem)
}
if !ok {
return false
}
}
return true
}
// MapLess compares two maps lexically.
@ -49,65 +203,68 @@ func MapLess(lhs, rhs Map) bool {
// MapCompare compares two maps lexically.
func MapCompare(lhs, rhs Map) int {
lorder := make([]string, 0, lhs.Length())
lhs.Iterate(func(key string, _ Value) bool {
lorder = append(lorder, key)
return true
})
sort.Strings(lorder)
rorder := make([]string, 0, rhs.Length())
rhs.Iterate(func(key string, _ Value) bool {
rorder = append(rorder, key)
return true
})
sort.Strings(rorder)
return MapCompareUsing(HeapAllocator, lhs, rhs)
}
i := 0
for {
if i >= len(lorder) && i >= len(rorder) {
// Maps are the same length and all items are equal.
// MapCompareUsing uses the provided allocator and compares two maps lexically.
func MapCompareUsing(a Allocator, lhs, rhs Map) int {
c := 0
var llength, rlength int
if lhs != nil {
llength = lhs.Length()
}
if rhs != nil {
rlength = rhs.Length()
}
if llength == 0 && rlength == 0 {
return 0
}
if i >= len(lorder) {
// LHS is shorter.
return -1
i := 0
MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool {
switch {
case i == llength:
c = -1
case i == rlength:
c = 1
case lhs == nil:
c = 1
case rhs == nil:
c = -1
default:
c = CompareUsing(a, lhs, rhs)
}
if i >= len(rorder) {
// RHS is shorter.
return 1
}
if c := strings.Compare(lorder[i], rorder[i]); c != 0 {
return c
}
litem, _ := lhs.Get(lorder[i])
ritem, _ := rhs.Get(rorder[i])
if c := Compare(litem, ritem); c != 0 {
return c
}
litem.Recycle()
ritem.Recycle()
// The items are equal; continue.
i++
}
return c == 0
})
return c
}
// MapEquals returns true if lhs == rhs, false otherwise. This function
// acts on generic types and should not be used by callers, but can help
// implement Map.Equals.
// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient.
func MapEquals(lhs, rhs Map) bool {
return MapEqualsUsing(HeapAllocator, lhs, rhs)
}
// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs,
// false otherwise. This function acts on generic types and should not be used
// by callers, but can help implement Map.Equals.
// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient.
func MapEqualsUsing(a Allocator, lhs, rhs Map) bool {
if lhs == nil && rhs == nil {
return true
}
if lhs == nil || rhs == nil {
return false
}
if lhs.Length() != rhs.Length() {
return false
}
return lhs.Iterate(func(k string, v Value) bool {
vo, ok := rhs.Get(k)
if !ok {
return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool {
if lhs == nil || rhs == nil {
return false
}
if !Equals(v, vo) {
vo.Recycle()
return false
}
vo.Recycle()
return true
return EqualsUsing(a, lhs, rhs)
})
}

View File

@ -16,7 +16,9 @@ limitations under the License.
package value
import "reflect"
import (
"reflect"
)
type mapReflect struct {
valueReflect
@ -27,13 +29,27 @@ func (r mapReflect) Length() int {
return val.Len()
}
func (r mapReflect) Empty() bool {
val := r.Value
return val.Len() == 0
}
func (r mapReflect) Get(key string) (Value, bool) {
mapKey := r.toMapKey(key)
val := r.Value.MapIndex(mapKey)
if !val.IsValid() {
return r.GetUsing(HeapAllocator, key)
}
func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) {
k, v, ok := r.get(key)
if !ok {
return nil, false
}
return mustWrapValueReflectMapItem(&r.Value, &mapKey, val), val != reflect.Value{}
return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true
}
func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) {
mapKey := r.toMapKey(k)
val := r.Value.MapIndex(mapKey)
return mapKey, val, val.IsValid() && val != reflect.Value{}
}
func (r mapReflect) Has(key string) bool {
@ -61,21 +77,29 @@ func (r mapReflect) toMapKey(key string) reflect.Value {
}
func (r mapReflect) Iterate(fn func(string, Value) bool) bool {
return eachMapEntry(r.Value, func(s string, value reflect.Value) bool {
mapVal := mustWrapValueReflect(value)
defer mapVal.Recycle()
return fn(s, mapVal)
return r.IterateUsing(HeapAllocator, fn)
}
func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool {
if r.Value.Len() == 0 {
return true
}
v := a.allocValueReflect()
defer a.Free(v)
return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool {
return fn(key.String(), v.mustReuse(value, e, &r.Value, &key))
})
}
func eachMapEntry(val reflect.Value, fn func(string, reflect.Value) bool) bool {
func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool {
iter := val.MapRange()
entry := TypeReflectEntryOf(val.Type().Elem())
for iter.Next() {
next := iter.Value()
if !next.IsValid() {
continue
}
if !fn(iter.Key().String(), next) {
if !fn(entry, iter.Key(), next) {
return false
}
}
@ -92,16 +116,94 @@ func (r mapReflect) Unstructured() interface{} {
}
func (r mapReflect) Equals(m Map) bool {
if r.Length() != m.Length() {
return r.EqualsUsing(HeapAllocator, m)
}
func (r mapReflect) EqualsUsing(a Allocator, m Map) bool {
lhsLength := r.Length()
rhsLength := m.Length()
if lhsLength != rhsLength {
return false
}
// TODO: Optimize to avoid Iterate looping here by using r.Value.MapRange or similar if it improves performance.
if lhsLength == 0 {
return true
}
vr := a.allocValueReflect()
defer a.Free(vr)
entry := TypeReflectEntryOf(r.Value.Type().Elem())
return m.Iterate(func(key string, value Value) bool {
lhsVal, ok := r.Get(key)
_, lhsVal, ok := r.get(key)
if !ok {
return false
}
return Equals(lhsVal, value)
return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value)
})
}
func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
return r.ZipUsing(HeapAllocator, other, order, fn)
}
func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered {
return r.unorderedReflectZip(a, otherMapReflect, fn)
}
return defaultMapZip(a, &r, other, order, fn)
}
// unorderedReflectZip provides an optimized unordered zip for mapReflect types.
func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool {
if r.Empty() && (other == nil || other.Empty()) {
return true
}
lhs := r.Value
lhsEntry := TypeReflectEntryOf(lhs.Type().Elem())
// map lookup via reflection is expensive enough that it is better to keep track of visited keys
visited := map[string]struct{}{}
vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect()
defer a.Free(vlhs)
defer a.Free(vrhs)
if other != nil {
rhs := other.Value
rhsEntry := TypeReflectEntryOf(rhs.Type().Elem())
iter := rhs.MapRange()
for iter.Next() {
key := iter.Key()
keyString := key.String()
next := iter.Value()
if !next.IsValid() {
continue
}
rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key)
visited[keyString] = struct{}{}
var lhsVal Value
if _, v, ok := r.get(keyString); ok {
lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key)
}
if !fn(keyString, lhsVal, rhsVal) {
return false
}
}
}
iter := lhs.MapRange()
for iter.Next() {
key := iter.Key()
if _, ok := visited[key.String()]; ok {
continue
}
next := iter.Value()
if !next.IsValid() {
continue
}
if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) {
return false
}
}
return true
}

View File

@ -23,10 +23,14 @@ func (m mapUnstructuredInterface) Set(key string, val Value) {
}
func (m mapUnstructuredInterface) Get(key string) (Value, bool) {
return m.GetUsing(HeapAllocator, key)
}
func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) {
if v, ok := m[key]; !ok {
return nil, false
} else {
return NewValueInterface(v), true
return a.allocValueUnstructured().reuse(v), true
}
}
@ -40,16 +44,22 @@ func (m mapUnstructuredInterface) Delete(key string) {
}
func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool {
return m.IterateUsing(HeapAllocator, fn)
}
func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool {
if len(m) == 0 {
return true
}
vv := a.allocValueUnstructured()
defer a.Free(vv)
for k, v := range m {
if ks, ok := k.(string); !ok {
continue
} else {
vv := NewValueInterface(v)
if !fn(ks, vv) {
vv.Recycle()
if !fn(ks, vv.reuse(v)) {
return false
}
vv.Recycle()
}
}
return true
@ -59,29 +69,40 @@ func (m mapUnstructuredInterface) Length() int {
return len(m)
}
func (m mapUnstructuredInterface) Empty() bool {
return len(m) == 0
}
func (m mapUnstructuredInterface) Equals(other Map) bool {
if m.Length() != other.Length() {
return m.EqualsUsing(HeapAllocator, other)
}
func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool {
lhsLength := m.Length()
rhsLength := other.Length()
if lhsLength != rhsLength {
return false
}
for k, v := range m {
ks, ok := k.(string)
if !ok {
return false
}
vo, ok := other.Get(ks)
if !ok {
return false
}
vv := NewValueInterface(v)
if !Equals(vv, vo) {
vv.Recycle()
vo.Recycle()
return false
}
vo.Recycle()
vv.Recycle()
}
if lhsLength == 0 {
return true
}
vv := a.allocValueUnstructured()
defer a.Free(vv)
return other.Iterate(func(key string, value Value) bool {
lhsVal, ok := m[key]
if !ok {
return false
}
return Equals(vv.reuse(lhsVal), value)
})
}
func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
return m.ZipUsing(HeapAllocator, other, order, fn)
}
func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
return defaultMapZip(a, m, other, order, fn)
}
type mapUnstructuredString map[string]interface{}
@ -91,10 +112,13 @@ func (m mapUnstructuredString) Set(key string, val Value) {
}
func (m mapUnstructuredString) Get(key string) (Value, bool) {
return m.GetUsing(HeapAllocator, key)
}
func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) {
if v, ok := m[key]; !ok {
return nil, false
} else {
return NewValueInterface(v), true
return a.allocValueUnstructured().reuse(v), true
}
}
@ -108,13 +132,19 @@ func (m mapUnstructuredString) Delete(key string) {
}
func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool {
return m.IterateUsing(HeapAllocator, fn)
}
func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool {
if len(m) == 0 {
return true
}
vv := a.allocValueUnstructured()
defer a.Free(vv)
for k, v := range m {
vv := NewValueInterface(v)
if !fn(k, vv) {
vv.Recycle()
if !fn(k, vv.reuse(v)) {
return false
}
vv.Recycle()
}
return true
}
@ -124,22 +154,37 @@ func (m mapUnstructuredString) Length() int {
}
func (m mapUnstructuredString) Equals(other Map) bool {
if m.Length() != other.Length() {
return m.EqualsUsing(HeapAllocator, other)
}
func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool {
lhsLength := m.Length()
rhsLength := other.Length()
if lhsLength != rhsLength {
return false
}
for k, v := range m {
vo, ok := other.Get(k)
if lhsLength == 0 {
return true
}
vv := a.allocValueUnstructured()
defer a.Free(vv)
return other.Iterate(func(key string, value Value) bool {
lhsVal, ok := m[key]
if !ok {
return false
}
vv := NewValueInterface(v)
if !Equals(vv, vo) {
vo.Recycle()
vv.Recycle()
return false
}
vo.Recycle()
vv.Recycle()
}
return true
return Equals(vv.reuse(lhsVal), value)
})
}
func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
return m.ZipUsing(HeapAllocator, other, order, fn)
}
func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
return defaultMapZip(a, m, other, order, fn)
}
func (m mapUnstructuredString) Empty() bool {
return len(m) == 0
}

View File

@ -0,0 +1,463 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package value
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"sort"
"sync"
"sync/atomic"
)
// UnstructuredConverter defines how a type can be converted directly to unstructured.
// Types that implement json.Marshaler may also optionally implement this interface to provide a more
// direct and more efficient conversion. All types that choose to implement this interface must still
// implement this same conversion via json.Marshaler.
type UnstructuredConverter interface {
json.Marshaler // require that json.Marshaler is implemented
// ToUnstructured returns the unstructured representation.
ToUnstructured() interface{}
}
// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured.
type TypeReflectCacheEntry struct {
isJsonMarshaler bool
ptrIsJsonMarshaler bool
isJsonUnmarshaler bool
ptrIsJsonUnmarshaler bool
isStringConvertable bool
ptrIsStringConvertable bool
structFields map[string]*FieldCacheEntry
orderedStructFields []*FieldCacheEntry
}
// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from
// unstructured.
type FieldCacheEntry struct {
// JsonName returns the name of the field according to the json tags on the struct field.
JsonName string
// isOmitEmpty is true if the field has the json 'omitempty' tag.
isOmitEmpty bool
// fieldPath is a list of field indices (see FieldByIndex) to lookup the value of
// a field in a reflect.Value struct. The field indices in the list form a path used
// to traverse through intermediary 'inline' fields.
fieldPath [][]int
fieldType reflect.Type
TypeEntry *TypeReflectCacheEntry
}
func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool {
return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal))
}
// GetUsing returns the field identified by this FieldCacheEntry from the provided struct.
func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value {
// field might be nested within 'inline' structs
for _, elem := range f.fieldPath {
structVal = structVal.FieldByIndex(elem)
}
return structVal
}
var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem()
var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem()
var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem()
var defaultReflectCache = newReflectCache()
// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type.
func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry {
cm := defaultReflectCache.get()
if record, ok := cm[t]; ok {
return record
}
updates := reflectCacheMap{}
result := typeReflectEntryOf(cm, t, updates)
if len(updates) > 0 {
defaultReflectCache.update(updates)
}
return result
}
// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively
// depend on, to the cache.
func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry {
if record, ok := cm[t]; ok {
return record
}
if record, ok := updates[t]; ok {
return record
}
typeEntry := &TypeReflectCacheEntry{
isJsonMarshaler: t.Implements(marshalerType),
ptrIsJsonMarshaler: reflect.PtrTo(t).Implements(marshalerType),
isJsonUnmarshaler: reflect.PtrTo(t).Implements(unmarshalerType),
isStringConvertable: t.Implements(unstructuredConvertableType),
ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType),
}
if t.Kind() == reflect.Struct {
fieldEntries := map[string]*FieldCacheEntry{}
buildStructCacheEntry(t, fieldEntries, nil)
typeEntry.structFields = fieldEntries
sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries))
i := 0
for _, entry := range fieldEntries {
sortedByJsonName[i] = entry
i++
}
sort.Slice(sortedByJsonName, func(i, j int) bool {
return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName
})
typeEntry.orderedStructFields = sortedByJsonName
}
// cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving
// the field.typeEntry references, or creating them if they are not already in the cache
updates[t] = typeEntry
for _, field := range typeEntry.structFields {
if field.TypeEntry == nil {
field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates)
}
}
return typeEntry
}
func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) {
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
jsonName, omit, isInline, isOmitempty := lookupJsonTags(field)
if omit {
continue
}
if isInline {
buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index))
continue
}
info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type}
infos[jsonName] = info
}
}
// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs.
func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry {
return e.structFields
}
// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs.
func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry {
return e.orderedStructFields
}
// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured.
func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool {
return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable
}
// ToUnstructured converts the provided value to unstructured and returns it.
func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) {
// This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505
// and is intended to replace it.
// Check if the object has a custom string converter and use it if available, since it is much more efficient
// than round tripping through json.
if converter, ok := e.getUnstructuredConverter(sv); ok {
return converter.ToUnstructured(), nil
}
// Check if the object has a custom JSON marshaller/unmarshaller.
if marshaler, ok := e.getJsonMarshaler(sv); ok {
if sv.Kind() == reflect.Ptr && sv.IsNil() {
// We're done - we don't need to store anything.
return nil, nil
}
data, err := marshaler.MarshalJSON()
if err != nil {
return nil, err
}
switch {
case len(data) == 0:
return nil, fmt.Errorf("error decoding from json: empty value")
case bytes.Equal(data, nullBytes):
// We're done - we don't need to store anything.
return nil, nil
case bytes.Equal(data, trueBytes):
return true, nil
case bytes.Equal(data, falseBytes):
return false, nil
case data[0] == '"':
var result string
err := unmarshal(data, &result)
if err != nil {
return nil, fmt.Errorf("error decoding string from json: %v", err)
}
return result, nil
case data[0] == '{':
result := make(map[string]interface{})
err := unmarshal(data, &result)
if err != nil {
return nil, fmt.Errorf("error decoding object from json: %v", err)
}
return result, nil
case data[0] == '[':
result := make([]interface{}, 0)
err := unmarshal(data, &result)
if err != nil {
return nil, fmt.Errorf("error decoding array from json: %v", err)
}
return result, nil
default:
var (
resultInt int64
resultFloat float64
err error
)
if err = unmarshal(data, &resultInt); err == nil {
return resultInt, nil
} else if err = unmarshal(data, &resultFloat); err == nil {
return resultFloat, nil
} else {
return nil, fmt.Errorf("error decoding number from json: %v", err)
}
}
}
return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type())
}
// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured.
func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool {
return e.isJsonUnmarshaler
}
// FromUnstructured converts the provided source value from unstructured into the provided destination value.
func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error {
// TODO: this could be made much more efficient using direct conversions like
// UnstructuredConverter.ToUnstructured provides.
st := dv.Type()
data, err := json.Marshal(sv.Interface())
if err != nil {
return fmt.Errorf("error encoding %s to json: %v", st.String(), err)
}
if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok {
return unmarshaler.UnmarshalJSON(data)
}
return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type())
}
var (
nullBytes = []byte("null")
trueBytes = []byte("true")
falseBytes = []byte("false")
)
func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) {
if e.isJsonMarshaler {
return v.Interface().(json.Marshaler), true
}
if e.ptrIsJsonMarshaler {
// Check pointer receivers if v is not a pointer
if v.Kind() != reflect.Ptr && v.CanAddr() {
v = v.Addr()
return v.Interface().(json.Marshaler), true
}
}
return nil, false
}
func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) {
if !e.isJsonUnmarshaler {
return nil, false
}
return v.Addr().Interface().(json.Unmarshaler), true
}
func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) {
if e.isStringConvertable {
return v.Interface().(UnstructuredConverter), true
}
if e.ptrIsStringConvertable {
// Check pointer receivers if v is not a pointer
if v.CanAddr() {
v = v.Addr()
return v.Interface().(UnstructuredConverter), true
}
}
return nil, false
}
type typeReflectCache struct {
// use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any
// go program using this cache
value atomic.Value
// mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a
// read-lock since the atomic value is always read-only
mu sync.Mutex
}
func newReflectCache() *typeReflectCache {
cache := &typeReflectCache{}
cache.value.Store(make(reflectCacheMap))
return cache
}
type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry
// get returns the reflectCacheMap.
func (c *typeReflectCache) get() reflectCacheMap {
return c.value.Load().(reflectCacheMap)
}
// update merges the provided updates into the cache.
func (c *typeReflectCache) update(updates reflectCacheMap) {
c.mu.Lock()
defer c.mu.Unlock()
currentCacheMap := c.value.Load().(reflectCacheMap)
hasNewEntries := false
for t := range updates {
if _, ok := currentCacheMap[t]; !ok {
hasNewEntries = true
break
}
}
if !hasNewEntries {
// Bail if the updates have been set while waiting for lock acquisition.
// This is safe since setting entries is idempotent.
return
}
newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates))
for k, v := range currentCacheMap {
newCacheMap[k] = v
}
for t, update := range updates {
newCacheMap[t] = update
}
c.value.Store(newCacheMap)
}
// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json
// to handle number conversions as expected by Kubernetes
// limit recursive depth to prevent stack overflow errors
const maxDepth = 10000
// unmarshal unmarshals the given data
// If v is a *map[string]interface{}, numbers are converted to int64 or float64
func unmarshal(data []byte, v interface{}) error {
switch v := v.(type) {
case *map[string]interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertMapNumbers(*v, 0)
case *[]interface{}:
// Build a decoder from the given data
decoder := json.NewDecoder(bytes.NewBuffer(data))
// Preserve numbers, rather than casting to float64 automatically
decoder.UseNumber()
// Run the decode
if err := decoder.Decode(v); err != nil {
return err
}
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
return convertSliceNumbers(*v, 0)
default:
return json.Unmarshal(data, v)
}
}
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertMapNumbers(m map[string]interface{}, depth int) error {
if depth > maxDepth {
return fmt.Errorf("exceeded max depth of %d", maxDepth)
}
var err error
for k, v := range m {
switch v := v.(type) {
case json.Number:
m[k], err = convertNumber(v)
case map[string]interface{}:
err = convertMapNumbers(v, depth+1)
case []interface{}:
err = convertSliceNumbers(v, depth+1)
}
if err != nil {
return err
}
}
return nil
}
// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64.
// values which are map[string]interface{} or []interface{} are recursively visited
func convertSliceNumbers(s []interface{}, depth int) error {
if depth > maxDepth {
return fmt.Errorf("exceeded max depth of %d", maxDepth)
}
var err error
for i, v := range s {
switch v := v.(type) {
case json.Number:
s[i], err = convertNumber(v)
case map[string]interface{}:
err = convertMapNumbers(v, depth+1)
case []interface{}:
err = convertSliceNumbers(v, depth+1)
}
if err != nil {
return err
}
}
return nil
}
// convertNumber converts a json.Number to an int64 or float64, or returns an error
func convertNumber(n json.Number) (interface{}, error) {
// Attempt to convert to an int64 first
if i, err := n.Int64(); err == nil {
return i, nil
}
// Return a float64 (default json.Decode() behavior)
// An overflow will return an error
return n.Float64()
}

View File

@ -19,162 +19,66 @@ package value
import (
"fmt"
"reflect"
"sync"
"sync/atomic"
)
// reflectStructCache keeps track of json tag related data for structs and fields to speed up reflection.
// TODO: This overlaps in functionality with the fieldCache in
// https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L57 but
// is more efficient at lookup by json field name. The logic should be consolidated. Only one copy of the cache needs
// to be kept for each running process.
var (
reflectStructCache = newStructCache()
)
type structCache struct {
// use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any
// go program using this cache
value atomic.Value
// mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a
// read-lock since the atomic value is always read-only
mu sync.Mutex
}
type structCacheMap map[reflect.Type]structCacheEntry
// structCacheEntry contains information about each struct field, keyed by json field name, that is expensive to
// compute using reflection.
type structCacheEntry map[string]*fieldCacheEntry
// Get returns true and fieldCacheEntry for the given type if the type is in the cache. Otherwise Get returns false.
func (c *structCache) Get(t reflect.Type) (map[string]*fieldCacheEntry, bool) {
entry, ok := c.value.Load().(structCacheMap)[t]
return entry, ok
}
// Set sets the fieldCacheEntry for the given type via a copy-on-write update to the struct cache.
func (c *structCache) Set(t reflect.Type, m map[string]*fieldCacheEntry) {
c.mu.Lock()
defer c.mu.Unlock()
currentCacheMap := c.value.Load().(structCacheMap)
if _, ok := currentCacheMap[t]; ok {
// Bail if the entry has been set while waiting for lock acquisition.
// This is safe since setting entries is idempotent.
return
}
newCacheMap := make(structCacheMap, len(currentCacheMap)+1)
for k, v := range currentCacheMap {
newCacheMap[k] = v
}
newCacheMap[t] = m
c.value.Store(newCacheMap)
}
func newStructCache() *structCache {
cache := &structCache{}
cache.value.Store(make(structCacheMap))
return cache
}
type fieldCacheEntry struct {
// isOmitEmpty is true if the field has the json 'omitempty' tag.
isOmitEmpty bool
// fieldPath is the field indices (see FieldByIndex) to lookup the value of
// a field in a reflect.Value struct. A path of field indices is used
// to support traversing to a field field in struct fields that have the 'inline'
// json tag.
fieldPath [][]int
}
func (f *fieldCacheEntry) getFieldFromStruct(structVal reflect.Value) reflect.Value {
// field might be field within 'inline' structs
for _, elem := range f.fieldPath {
structVal = structVal.FieldByIndex(elem)
}
return structVal
}
func getStructCacheEntry(t reflect.Type) structCacheEntry {
if hints, ok := reflectStructCache.Get(t); ok {
return hints
}
hints := map[string]*fieldCacheEntry{}
buildStructCacheEntry(t, hints, nil)
reflectStructCache.Set(t, hints)
return hints
}
func buildStructCacheEntry(t reflect.Type, infos map[string]*fieldCacheEntry, fieldPath [][]int) {
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
jsonName, omit, isInline, isOmitempty := lookupJsonTags(field)
if omit {
continue
}
if isInline {
buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index))
continue
}
info := &fieldCacheEntry{isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index)}
infos[jsonName] = info
}
}
type structReflect struct {
valueReflect
}
func (r structReflect) Length() int {
i := 0
eachStructField(r.Value, func(s string, value reflect.Value) bool {
eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool {
i++
return true
})
return i
}
func (r structReflect) Empty() bool {
return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool {
return false // exit early if the struct is non-empty
})
}
func (r structReflect) Get(key string) (Value, bool) {
if val, ok, _ := r.findJsonNameField(key); ok {
return mustWrapValueReflect(val), true
return r.GetUsing(HeapAllocator, key)
}
func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) {
if val, ok := r.findJsonNameField(key); ok {
return a.allocValueReflect().mustReuse(val, nil, nil, nil), true
}
return nil, false
}
func (r structReflect) Has(key string) bool {
_, ok, _ := r.findJsonNameField(key)
_, ok := r.findJsonNameField(key)
return ok
}
func (r structReflect) Set(key string, val Value) {
fieldEntry, ok := getStructCacheEntry(r.Value.Type())[key]
fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key]
if !ok {
panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface()))
}
oldVal := fieldEntry.getFieldFromStruct(r.Value)
oldVal := fieldEntry.GetFrom(r.Value)
newVal := reflect.ValueOf(val.Unstructured())
r.update(fieldEntry, key, oldVal, newVal)
}
func (r structReflect) Delete(key string) {
fieldEntry, ok := getStructCacheEntry(r.Value.Type())[key]
fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key]
if !ok {
panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface()))
}
oldVal := fieldEntry.getFieldFromStruct(r.Value)
oldVal := fieldEntry.GetFrom(r.Value)
if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty {
panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface()))
}
r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type()))
}
func (r structReflect) update(fieldEntry *fieldCacheEntry, key string, oldVal, newVal reflect.Value) {
func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) {
if oldVal.CanSet() {
oldVal.Set(newVal)
return
@ -187,7 +91,7 @@ func (r structReflect) update(fieldEntry *fieldCacheEntry, key string, oldVal, n
panic("ParentMapKey must not be nil if ParentMap is not nil")
}
replacement := reflect.New(r.Value.Type()).Elem()
fieldEntry.getFieldFromStruct(replacement).Set(newVal)
fieldEntry.GetFrom(replacement).Set(newVal)
r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement)
return
}
@ -198,21 +102,25 @@ func (r structReflect) update(fieldEntry *fieldCacheEntry, key string, oldVal, n
}
func (r structReflect) Iterate(fn func(string, Value) bool) bool {
return eachStructField(r.Value, func(s string, value reflect.Value) bool {
v := mustWrapValueReflect(value)
defer v.Recycle()
return fn(s, v)
return r.IterateUsing(HeapAllocator, fn)
}
func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool {
vr := a.allocValueReflect()
defer a.Free(vr)
return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool {
return fn(s, vr.mustReuse(value, e, nil, nil))
})
}
func eachStructField(structVal reflect.Value, fn func(string, reflect.Value) bool) bool {
for jsonName, fieldCacheEntry := range getStructCacheEntry(structVal.Type()) {
fieldVal := fieldCacheEntry.getFieldFromStruct(structVal)
if fieldCacheEntry.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) {
func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool {
for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() {
fieldVal := fieldCacheEntry.GetFrom(structVal)
if fieldCacheEntry.CanOmit(fieldVal) {
// omit it
continue
}
ok := fn(jsonName, fieldVal)
ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal)
if !ok {
return false
}
@ -231,39 +139,70 @@ func (r structReflect) Unstructured() interface{} {
}
func (r structReflect) Equals(m Map) bool {
if rhsStruct, ok := m.(structReflect); ok {
return reflect.DeepEqual(r.Value.Interface(), rhsStruct.Value.Interface())
}
if r.Length() != m.Length() {
return false
}
structCacheEntry := getStructCacheEntry(r.Value.Type())
return r.EqualsUsing(HeapAllocator, m)
}
return m.Iterate(func(s string, value Value) bool {
fieldCacheEntry, ok := structCacheEntry[s]
if !ok {
return false
}
lhsVal := fieldCacheEntry.getFieldFromStruct(r.Value)
return Equals(mustWrapValueReflect(lhsVal), value)
})
func (r structReflect) EqualsUsing(a Allocator, m Map) bool {
// MapEquals uses zip and is fairly efficient for structReflect
return MapEqualsUsing(a, &r, m)
}
func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) {
structCacheEntry, ok := getStructCacheEntry(r.Value.Type())[jsonName]
structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName]
if !ok {
return reflect.Value{}, false
}
fieldVal := structCacheEntry.getFieldFromStruct(r.Value)
omit := structCacheEntry.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal))
return fieldVal, !omit
fieldVal := structCacheEntry.GetFrom(r.Value)
return fieldVal, !structCacheEntry.CanOmit(fieldVal)
}
func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool, omitEmpty bool) {
structCacheEntry, ok := getStructCacheEntry(r.Value.Type())[jsonName]
func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) {
structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName]
if !ok {
return reflect.Value{}, false, false
return reflect.Value{}, false
}
fieldVal := structCacheEntry.getFieldFromStruct(r.Value)
return fieldVal, true, structCacheEntry.isOmitEmpty
fieldVal := structCacheEntry.GetFrom(r.Value)
return fieldVal, !structCacheEntry.CanOmit(fieldVal)
}
func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
return r.ZipUsing(HeapAllocator, other, order, fn)
}
func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool {
if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() {
lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect()
defer a.Free(lhsvr)
defer a.Free(rhsvr)
return r.structZip(otherStruct, lhsvr, rhsvr, fn)
}
return defaultMapZip(a, &r, other, order, fn)
}
// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is
// no additional cost to ordering the zip for structured types.
func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool {
lhsVal := r.Value
rhsVal := other.Value
for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() {
lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal)
rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal)
lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal)
rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal)
if lhsOmit && rhsOmit {
continue
}
var lhsVal, rhsVal Value
if !lhsOmit {
lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil)
}
if !rhsOmit {
rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil)
}
if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) {
return false
}
}
return true
}

View File

@ -55,9 +55,15 @@ type Value interface {
// AsMap converts the Value into a Map (or panic if the type
// doesn't allow it).
AsMap() Map
// AsMapUsing uses the provided allocator and converts the Value
// into a Map (or panic if the type doesn't allow it).
AsMapUsing(Allocator) Map
// AsList converts the Value into a List (or panic if the type
// doesn't allow it).
AsList() List
// AsListUsing uses the provided allocator and converts the Value
// into a List (or panic if the type doesn't allow it).
AsListUsing(Allocator) List
// AsBool converts the Value into a bool (or panic if the type
// doesn't allow it).
AsBool() bool
@ -71,10 +77,6 @@ type Value interface {
// doesn't allow it).
AsString() string
// Recycle returns a value of this type that is no longer needed. The
// value shouldn't be used after this call.
Recycle()
// Unstructured converts the Value into an Unstructured interface{}.
Unstructured() interface{}
}
@ -128,6 +130,11 @@ func ToYAML(v Value) ([]byte, error) {
// Equals returns true iff the two values are equal.
func Equals(lhs, rhs Value) bool {
return EqualsUsing(HeapAllocator, lhs, rhs)
}
// EqualsUsing uses the provided allocator and returns true iff the two values are equal.
func EqualsUsing(a Allocator, lhs, rhs Value) bool {
if lhs.IsFloat() || rhs.IsFloat() {
var lf float64
if lhs.IsFloat() {
@ -173,7 +180,11 @@ func Equals(lhs, rhs Value) bool {
}
if lhs.IsList() {
if rhs.IsList() {
return ListEquals(lhs.AsList(), rhs.AsList())
lhsList := lhs.AsListUsing(a)
defer a.Free(lhsList)
rhsList := rhs.AsListUsing(a)
defer a.Free(rhsList)
return lhsList.EqualsUsing(a, rhsList)
}
return false
} else if rhs.IsList() {
@ -181,7 +192,11 @@ func Equals(lhs, rhs Value) bool {
}
if lhs.IsMap() {
if rhs.IsMap() {
return lhs.AsMap().Equals(rhs.AsMap())
lhsList := lhs.AsMapUsing(a)
defer a.Free(lhsList)
rhsList := rhs.AsMapUsing(a)
defer a.Free(rhsList)
return lhsList.EqualsUsing(a, rhsList)
}
return false
} else if rhs.IsMap() {
@ -215,8 +230,9 @@ func ToString(v Value) string {
return fmt.Sprintf("%v", v.AsBool())
case v.IsList():
strs := []string{}
for i := 0; i < v.AsList().Length(); i++ {
strs = append(strs, ToString(v.AsList().At(i)))
list := v.AsList()
for i := 0; i < list.Length(); i++ {
strs = append(strs, ToString(list.At(i)))
}
return "[" + strings.Join(strs, ",") + "]"
case v.IsMap():
@ -241,6 +257,14 @@ func Less(lhs, rhs Value) bool {
// sorted, even if they are of different types). The result will be 0 if
// v==rhs, -1 if v < rhs, and +1 if v > rhs.
func Compare(lhs, rhs Value) int {
return CompareUsing(HeapAllocator, lhs, rhs)
}
// CompareUsing uses the provided allocator and provides a total
// ordering for Value (so that they can be sorted, even if they
// are of different types). The result will be 0 if v==rhs, -1
// if v < rhs, and +1 if v > rhs.
func CompareUsing(a Allocator, lhs, rhs Value) int {
if lhs.IsFloat() {
if !rhs.IsFloat() {
// Extra: compare floats and ints numerically.
@ -289,7 +313,11 @@ func Compare(lhs, rhs Value) int {
if !rhs.IsList() {
return -1
}
return ListCompare(lhs.AsList(), rhs.AsList())
lhsList := lhs.AsListUsing(a)
defer a.Free(lhsList)
rhsList := rhs.AsListUsing(a)
defer a.Free(rhsList)
return ListCompareUsing(a, lhsList, rhsList)
} else if rhs.IsList() {
return 1
}
@ -297,7 +325,11 @@ func Compare(lhs, rhs Value) int {
if !rhs.IsMap() {
return -1
}
return MapCompare(lhs.AsMap(), rhs.AsMap())
lhsMap := lhs.AsMapUsing(a)
defer a.Free(lhsMap)
rhsMap := rhs.AsMapUsing(a)
defer a.Free(rhsMap)
return MapCompareUsing(a, lhsMap, rhsMap)
} else if rhs.IsMap() {
return 1
}

View File

@ -17,20 +17,11 @@ limitations under the License.
package value
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"reflect"
"sync"
)
var reflectPool = sync.Pool{
New: func() interface{} {
return &valueReflect{}
},
}
// NewValueReflect creates a Value backed by an "interface{}" type,
// typically an structured object in Kubernetes world that is uses reflection to expose.
// The provided "interface{}" value must be a pointer so that the value can be modified via reflection.
@ -45,33 +36,57 @@ func NewValueReflect(value interface{}) (Value, error) {
// The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible.
return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer")
}
return wrapValueReflect(nil, nil, v)
return wrapValueReflect(v, nil, nil)
}
func wrapValueReflect(parentMap, parentMapKey *reflect.Value, value reflect.Value) (Value, error) {
// TODO: conversion of json.Marshaller interface types is expensive. This can be mostly optimized away by
// introducing conversion functions that do not require going through JSON and using those here.
if marshaler, ok := getMarshaler(value); ok {
return toUnstructured(marshaler, value)
}
value = dereference(value)
val := reflectPool.Get().(*valueReflect)
val.Value = value
val.ParentMap = parentMap
val.ParentMapKey = parentMapKey
return Value(val), nil
// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap
// and parentMapKey must be provided so that the returned value may be set and deleted.
func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) {
val := HeapAllocator.allocValueReflect()
return val.reuse(value, nil, parentMap, parentMapKey)
}
func mustWrapValueReflect(value reflect.Value) Value {
v, err := wrapValueReflect(nil, nil, value)
// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data
// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value {
v, err := wrapValueReflect(value, parentMap, parentMapKey)
if err != nil {
panic(err)
}
return v
}
func mustWrapValueReflectMapItem(parentMap, parentMapKey *reflect.Value, value reflect.Value) Value {
v, err := wrapValueReflect(parentMap, parentMapKey, value)
// the value interface doesn't care about the type for value.IsNull, so we can use a constant
var nilType = reflect.TypeOf(&struct{}{})
// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey
// must be provided so that the returned value may be set and deleted.
func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) {
if cacheEntry == nil {
cacheEntry = TypeReflectEntryOf(value.Type())
}
if cacheEntry.CanConvertToUnstructured() {
u, err := cacheEntry.ToUnstructured(value)
if err != nil {
return nil, err
}
if u == nil {
value = reflect.Zero(nilType)
} else {
value = reflect.ValueOf(u)
}
}
r.Value = dereference(value)
r.ParentMap = parentMap
r.ParentMapKey = parentMapKey
r.kind = kind(r.Value)
return r, nil
}
// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a
// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted.
func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value {
v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey)
if err != nil {
panic(err)
}
@ -90,53 +105,91 @@ type valueReflect struct {
ParentMap *reflect.Value
ParentMapKey *reflect.Value
Value reflect.Value
kind reflectType
}
func (r valueReflect) IsMap() bool {
return r.isKind(reflect.Map, reflect.Struct)
return r.kind == mapType || r.kind == structMapType
}
func (r valueReflect) IsList() bool {
typ := r.Value.Type()
return typ.Kind() == reflect.Slice && !(typ.Elem().Kind() == reflect.Uint8)
return r.kind == listType
}
func (r valueReflect) IsBool() bool {
return r.isKind(reflect.Bool)
return r.kind == boolType
}
func (r valueReflect) IsInt() bool {
// Uint64 deliberately excluded, see valueUnstructured.Int.
return r.isKind(reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8)
return r.kind == intType || r.kind == uintType
}
func (r valueReflect) IsFloat() bool {
return r.isKind(reflect.Float64, reflect.Float32)
return r.kind == floatType
}
func (r valueReflect) IsString() bool {
kind := r.Value.Kind()
if kind == reflect.String {
return true
}
if kind == reflect.Slice && r.Value.Type().Elem().Kind() == reflect.Uint8 {
return true
}
return false
return r.kind == stringType || r.kind == byteStringType
}
func (r valueReflect) IsNull() bool {
return safeIsNil(r.Value)
return r.kind == nullType
}
func (r valueReflect) isKind(kinds ...reflect.Kind) bool {
kind := r.Value.Kind()
for _, k := range kinds {
if kind == k {
return true
type reflectType = int
const (
mapType = iota
structMapType
listType
intType
uintType
floatType
stringType
byteStringType
boolType
nullType
)
func kind(v reflect.Value) reflectType {
typ := v.Type()
rk := typ.Kind()
switch rk {
case reflect.Map:
if v.IsNil() {
return nullType
}
return mapType
case reflect.Struct:
return structMapType
case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8:
return intType
case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8:
// Uint64 deliberately excluded, see valueUnstructured.Int.
return uintType
case reflect.Float64, reflect.Float32:
return floatType
case reflect.String:
return stringType
case reflect.Bool:
return boolType
case reflect.Slice:
if v.IsNil() {
return nullType
}
elemKind := typ.Elem().Kind()
if elemKind == reflect.Uint8 {
return byteStringType
}
return listType
case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface:
if v.IsNil() {
return nullType
}
panic(fmt.Sprintf("unsupported type: %v", v.Type()))
default:
panic(fmt.Sprintf("unsupported type: %v", v.Type()))
}
return false
}
// TODO find a cleaner way to avoid panics from reflect.IsNil()
@ -150,24 +203,33 @@ func safeIsNil(v reflect.Value) bool {
}
func (r valueReflect) AsMap() Map {
val := r.Value
switch val.Kind() {
case reflect.Struct:
return structReflect{r}
case reflect.Map:
return mapReflect{r}
return r.AsMapUsing(HeapAllocator)
}
func (r valueReflect) AsMapUsing(a Allocator) Map {
switch r.kind {
case structMapType:
v := a.allocStructReflect()
v.valueReflect = r
return v
case mapType:
v := a.allocMapReflect()
v.valueReflect = r
return v
default:
panic("value is not a map or struct")
}
}
func (r *valueReflect) Recycle() {
reflectPool.Put(r)
func (r valueReflect) AsList() List {
return r.AsListUsing(HeapAllocator)
}
func (r valueReflect) AsList() List {
func (r valueReflect) AsListUsing(a Allocator) List {
if r.IsList() {
return listReflect{r.Value}
v := a.allocListReflect()
v.Value = r.Value
return v
}
panic("value is not a list")
}
@ -180,10 +242,10 @@ func (r valueReflect) AsBool() bool {
}
func (r valueReflect) AsInt() int64 {
if r.isKind(reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8) {
if r.kind == intType {
return r.Value.Int()
}
if r.isKind(reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8) {
if r.kind == uintType {
return int64(r.Value.Uint())
}
@ -198,11 +260,10 @@ func (r valueReflect) AsFloat() float64 {
}
func (r valueReflect) AsString() string {
kind := r.Value.Kind()
if kind == reflect.String {
switch r.kind {
case stringType:
return r.Value.String()
}
if kind == reflect.Slice && r.Value.Type().Elem().Kind() == reflect.Uint8 {
case byteStringType:
return base64.StdEncoding.EncodeToString(r.Value.Bytes())
}
panic("value is not a string")
@ -216,9 +277,9 @@ func (r valueReflect) Unstructured() interface{} {
case val.Kind() == reflect.Struct:
return structReflect{r}.Unstructured()
case val.Kind() == reflect.Map:
return mapReflect{r}.Unstructured()
return mapReflect{valueReflect: r}.Unstructured()
case r.IsList():
return listReflect{Value: r.Value}.Unstructured()
return listReflect{r.Value}.Unstructured()
case r.IsString():
return r.AsString()
case r.IsInt():
@ -231,89 +292,3 @@ func (r valueReflect) Unstructured() interface{} {
panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type()))
}
}
// The below getMarshaler and toUnstructured functions are based on
// https://github.com/kubernetes/kubernetes/blob/40df9f82d0572a123f5ad13f48312978a2ff5877/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L509
// and should somehow be consolidated with it
var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem()
func getMarshaler(v reflect.Value) (json.Marshaler, bool) {
// Check value receivers if v is not a pointer and pointer receivers if v is a pointer
if v.Type().Implements(marshalerType) {
return v.Interface().(json.Marshaler), true
}
// Check pointer receivers if v is not a pointer
if v.Kind() != reflect.Ptr && v.CanAddr() {
v = v.Addr()
if v.Type().Implements(marshalerType) {
return v.Interface().(json.Marshaler), true
}
}
return nil, false
}
var (
nullBytes = []byte("null")
trueBytes = []byte("true")
falseBytes = []byte("false")
)
func toUnstructured(marshaler json.Marshaler, sv reflect.Value) (Value, error) {
data, err := marshaler.MarshalJSON()
if err != nil {
return nil, err
}
switch {
case len(data) == 0:
return nil, fmt.Errorf("error decoding from json: empty value")
case bytes.Equal(data, nullBytes):
// We're done - we don't need to store anything.
return NewValueInterface(nil), nil
case bytes.Equal(data, trueBytes):
return NewValueInterface(true), nil
case bytes.Equal(data, falseBytes):
return NewValueInterface(false), nil
case data[0] == '"':
var result string
err := json.Unmarshal(data, &result)
if err != nil {
return nil, fmt.Errorf("error decoding string from json: %v", err)
}
return NewValueInterface(result), nil
case data[0] == '{':
result := make(map[string]interface{})
err := json.Unmarshal(data, &result)
if err != nil {
return nil, fmt.Errorf("error decoding object from json: %v", err)
}
return NewValueInterface(result), nil
case data[0] == '[':
result := make([]interface{}, 0)
err := json.Unmarshal(data, &result)
if err != nil {
return nil, fmt.Errorf("error decoding array from json: %v", err)
}
return NewValueInterface(result), nil
default:
var (
resultInt int64
resultFloat float64
err error
)
if err = json.Unmarshal(data, &resultInt); err == nil {
return NewValueInterface(resultInt), nil
}
if err = json.Unmarshal(data, &resultFloat); err == nil {
return NewValueInterface(resultFloat), nil
}
return nil, fmt.Errorf("error decoding number from json: %v", err)
}
}

View File

@ -18,29 +18,26 @@ package value
import (
"fmt"
"sync"
)
var viPool = sync.Pool{
New: func() interface{} {
return &valueUnstructured{}
},
}
// NewValueInterface creates a Value backed by an "interface{}" type,
// typically an unstructured object in Kubernetes world.
// interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types,
// string or boolean. Nested interface{} must also be one of these types.
func NewValueInterface(v interface{}) Value {
vi := viPool.Get().(*valueUnstructured)
vi.Value = v
return Value(vi)
return Value(HeapAllocator.allocValueUnstructured().reuse(v))
}
type valueUnstructured struct {
Value interface{}
}
// reuse replaces the value of the valueUnstructured.
func (vi *valueUnstructured) reuse(value interface{}) Value {
vi.Value = value
return vi
}
func (v valueUnstructured) IsMap() bool {
if _, ok := v.Value.(map[string]interface{}); ok {
return true
@ -52,6 +49,10 @@ func (v valueUnstructured) IsMap() bool {
}
func (v valueUnstructured) AsMap() Map {
return v.AsMapUsing(HeapAllocator)
}
func (v valueUnstructured) AsMapUsing(_ Allocator) Map {
if v.Value == nil {
panic("invalid nil")
}
@ -73,6 +74,10 @@ func (v valueUnstructured) IsList() bool {
}
func (v valueUnstructured) AsList() List {
return v.AsListUsing(HeapAllocator)
}
func (v valueUnstructured) AsListUsing(_ Allocator) List {
return listUnstructured(v.Value.([]interface{}))
}
@ -168,10 +173,6 @@ func (v valueUnstructured) IsNull() bool {
return v.Value == nil
}
func (v *valueUnstructured) Recycle() {
viPool.Put(v)
}
func (v valueUnstructured) Unstructured() interface{} {
return v.Value
}