Update Kubernetes to 14b32888de6403aa38aedc69086c5a3aff7a4ace

Signed-off-by: Lantao Liu <lantaol@google.com>
This commit is contained in:
Lantao Liu 2017-09-20 01:31:26 +00:00
parent 3647ff5976
commit 5af5a04b6f
108 changed files with 13708 additions and 132253 deletions

View File

@ -22,11 +22,13 @@ github.com/godbus/dbus 97646858c46433e4afb3432ad28c12e968efa298
github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8 github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
github.com/google/btree 7d79101e329e5a3adf994758c578dab82b90c017
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
github.com/go-openapi/jsonpointer 46af16f9f7b149af66e5d1bd010e3574dc06de98 github.com/go-openapi/jsonpointer 46af16f9f7b149af66e5d1bd010e3574dc06de98
github.com/go-openapi/jsonreference 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 github.com/go-openapi/jsonreference 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff
github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72 github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72
github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342 github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342
@ -38,6 +40,7 @@ github.com/opencontainers/runc 593914b8bd5448a93f7c3e4902a03408b6d5c0ce
github.com/opencontainers/runtime-spec v1.0.0 github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206 github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
github.com/peterbourgon/diskv v2.0.1
github.com/pkg/errors v0.8.0 github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib v1.0.0
github.com/PuerkitoBio/purell v1.0.0 github.com/PuerkitoBio/purell v1.0.0
@ -46,7 +49,6 @@ github.com/sirupsen/logrus v1.0.0
github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7
github.com/stretchr/testify v1.1.4 github.com/stretchr/testify v1.1.4
github.com/syndtr/gocapability e7cb7fa329f456b3855136a2642b197bad7366ba github.com/syndtr/gocapability e7cb7fa329f456b3855136a2642b197bad7366ba
github.com/ugorji/go ded73eae5db7e7a0ef6f55aace87a2873c5d2b74
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
golang.org/x/sys 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce golang.org/x/sys 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce
@ -55,10 +57,10 @@ google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
google.golang.org/grpc v1.3.0 google.golang.org/grpc v1.3.0
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
k8s.io/api f30e293246921de7f4ee46bb65b8762b2f890fc4 k8s.io/api 0b011bb8613011ab0909c6a759d9d811dc21a156
k8s.io/apimachinery b166f81f5c4c88402ae23a0d0944c6ad08bffd3b k8s.io/apimachinery 6011652d09716d30e3d09cc9438b161efd59338d
k8s.io/apiserver b2a8ad67a002d27c8945573abb80b4be543f2a1f k8s.io/apiserver 19667a1afc13cc13930c40a20f2c12bbdcaaa246
k8s.io/client-go db8228460e2de17f5d3a9a453f61dde0ba86545a k8s.io/client-go c6f8cf2c47d21d55fa0df928291b2580544886c8
k8s.io/kube-openapi 2fbf05e337e56c983d9df1220b9e67cf132a1669 k8s.io/kube-openapi abfc5fbe1cf87ee697db107fdfd24c32fe4397a8
k8s.io/kubernetes 11a836078d0c78a4253a77a3ff6f4a555c4121f9 k8s.io/kubernetes 14b32888de6403aa38aedc69086c5a3aff7a4ace
k8s.io/utils 1f5ba483856f60b34bb29864d4129a8065d1c83b k8s.io/utils 4fe312863be2155a7b68acd2aff1c9221b24e68c

202
vendor/github.com/google/btree/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

12
vendor/github.com/google/btree/README.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
# BTree implementation for Go
![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
This package provides an in-memory B-Tree implementation for Go, useful as
an ordered, mutable data structure.
The API is based off of the wonderful
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
act as a drop-in replacement for gollrb trees.
See http://godoc.org/github.com/google/btree for documentation.

649
vendor/github.com/google/btree/btree.go generated vendored Normal file
View File

@ -0,0 +1,649 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package btree implements in-memory B-Trees of arbitrary degree.
//
// btree implements an in-memory B-Tree for use as an ordered data structure.
// It is not meant for persistent storage solutions.
//
// It has a flatter structure than an equivalent red-black or other binary tree,
// which in some cases yields better memory usage and/or performance.
// See some discussion on the matter here:
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
// Note, though, that this project is in no way related to the C++ B-Tree
// implmentation written about there.
//
// Within this tree, each node contains a slice of items and a (possibly nil)
// slice of children. For basic numeric values or raw structs, this can cause
// efficiency differences when compared to equivalent C++ template code that
// stores values in arrays within the node:
// * Due to the overhead of storing values as interfaces (each
// value needs to be stored as the value itself, then 2 words for the
// interface pointing to that value and its type), resulting in higher
// memory use.
// * Since interfaces can point to values anywhere in memory, values are
// most likely not stored in contiguous blocks, resulting in a higher
// number of cache misses.
// These issues don't tend to matter, though, when working with strings or other
// heap-allocated structures, since C++-equivalent structures also must store
// pointers and also distribute their values across the heap.
//
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
// widely used ordered tree implementation in the Go ecosystem currently.
// Its functions, therefore, exactly mirror those of
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
// support storing multiple equivalent values or backwards iteration.
package btree
import (
"fmt"
"io"
"sort"
"strings"
)
// Item represents a single object in the tree.
type Item interface {
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
Less(than Item) bool
}
const (
DefaultFreeListSize = 32
)
// FreeList represents a free list of btree nodes. By default each
// BTree has its own FreeList, but multiple BTrees can share the same
// FreeList.
// Two Btrees using the same freelist are not safe for concurrent write access.
type FreeList struct {
freelist []*node
}
// NewFreeList creates a new free list.
// size is the maximum size of the returned free list.
func NewFreeList(size int) *FreeList {
return &FreeList{freelist: make([]*node, 0, size)}
}
func (f *FreeList) newNode() (n *node) {
index := len(f.freelist) - 1
if index < 0 {
return new(node)
}
f.freelist, n = f.freelist[:index], f.freelist[index]
return
}
func (f *FreeList) freeNode(n *node) {
if len(f.freelist) < cap(f.freelist) {
f.freelist = append(f.freelist, n)
}
}
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
// the tree. When this function returns false, iteration will stop and the
// associated Ascend* function will immediately return.
type ItemIterator func(i Item) bool
// New creates a new B-Tree with the given degree.
//
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
// and 2-4 children).
func New(degree int) *BTree {
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
}
// NewWithFreeList creates a new B-Tree that uses the given node free list.
func NewWithFreeList(degree int, f *FreeList) *BTree {
if degree <= 1 {
panic("bad degree")
}
return &BTree{
degree: degree,
freelist: f,
}
}
// items stores items in a node.
type items []Item
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *items) insertAt(index int, item Item) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = item
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *items) removeAt(index int) Item {
item := (*s)[index]
(*s)[index] = nil
copy((*s)[index:], (*s)[index+1:])
*s = (*s)[:len(*s)-1]
return item
}
// pop removes and returns the last element in the list.
func (s *items) pop() (out Item) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// find returns the index where the given item should be inserted into this
// list. 'found' is true if the item already exists in the list at the given
// index.
func (s items) find(item Item) (index int, found bool) {
i := sort.Search(len(s), func(i int) bool {
return item.Less(s[i])
})
if i > 0 && !s[i-1].Less(item) {
return i - 1, true
}
return i, false
}
// children stores child nodes in a node.
type children []*node
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *children) insertAt(index int, n *node) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = n
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *children) removeAt(index int) *node {
n := (*s)[index]
(*s)[index] = nil
copy((*s)[index:], (*s)[index+1:])
*s = (*s)[:len(*s)-1]
return n
}
// pop removes and returns the last element in the list.
func (s *children) pop() (out *node) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// node is an internal node in a tree.
//
// It must at all times maintain the invariant that either
// * len(children) == 0, len(items) unconstrained
// * len(children) == len(items) + 1
type node struct {
items items
children children
t *BTree
}
// split splits the given node at the given index. The current node shrinks,
// and this function returns the item that existed at that index and a new node
// containing all items/children after it.
func (n *node) split(i int) (Item, *node) {
item := n.items[i]
next := n.t.newNode()
next.items = append(next.items, n.items[i+1:]...)
n.items = n.items[:i]
if len(n.children) > 0 {
next.children = append(next.children, n.children[i+1:]...)
n.children = n.children[:i+1]
}
return item, next
}
// maybeSplitChild checks if a child should be split, and if so splits it.
// Returns whether or not a split occurred.
func (n *node) maybeSplitChild(i, maxItems int) bool {
if len(n.children[i].items) < maxItems {
return false
}
first := n.children[i]
item, second := first.split(maxItems / 2)
n.items.insertAt(i, item)
n.children.insertAt(i+1, second)
return true
}
// insert inserts an item into the subtree rooted at this node, making sure
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
// be found/replaced by insert, it will be returned.
func (n *node) insert(item Item, maxItems int) Item {
i, found := n.items.find(item)
if found {
out := n.items[i]
n.items[i] = item
return out
}
if len(n.children) == 0 {
n.items.insertAt(i, item)
return nil
}
if n.maybeSplitChild(i, maxItems) {
inTree := n.items[i]
switch {
case item.Less(inTree):
// no change, we want first split node
case inTree.Less(item):
i++ // we want second split node
default:
out := n.items[i]
n.items[i] = item
return out
}
}
return n.children[i].insert(item, maxItems)
}
// get finds the given key in the subtree and returns it.
func (n *node) get(key Item) Item {
i, found := n.items.find(key)
if found {
return n.items[i]
} else if len(n.children) > 0 {
return n.children[i].get(key)
}
return nil
}
// min returns the first item in the subtree.
func min(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[0]
}
if len(n.items) == 0 {
return nil
}
return n.items[0]
}
// max returns the last item in the subtree.
func max(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[len(n.children)-1]
}
if len(n.items) == 0 {
return nil
}
return n.items[len(n.items)-1]
}
// toRemove details what item to remove in a node.remove call.
type toRemove int
const (
removeItem toRemove = iota // removes the given item
removeMin // removes smallest item in the subtree
removeMax // removes largest item in the subtree
)
// remove removes an item from the subtree rooted at this node.
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
var i int
var found bool
switch typ {
case removeMax:
if len(n.children) == 0 {
return n.items.pop()
}
i = len(n.items)
case removeMin:
if len(n.children) == 0 {
return n.items.removeAt(0)
}
i = 0
case removeItem:
i, found = n.items.find(item)
if len(n.children) == 0 {
if found {
return n.items.removeAt(i)
}
return nil
}
default:
panic("invalid type")
}
// If we get to here, we have children.
child := n.children[i]
if len(child.items) <= minItems {
return n.growChildAndRemove(i, item, minItems, typ)
}
// Either we had enough items to begin with, or we've done some
// merging/stealing, because we've got enough now and we're ready to return
// stuff.
if found {
// The item exists at index 'i', and the child we've selected can give us a
// predecessor, since if we've gotten here it's got > minItems items in it.
out := n.items[i]
// We use our special-case 'remove' call with typ=maxItem to pull the
// predecessor of item i (the rightmost leaf of our immediate left child)
// and set it into where we pulled the item from.
n.items[i] = child.remove(nil, minItems, removeMax)
return out
}
// Final recursive call. Once we're here, we know that the item isn't in this
// node and that the child is big enough to remove from.
return child.remove(item, minItems, typ)
}
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
// item from it while keeping it at minItems, then calls remove to actually
// remove it.
//
// Most documentation says we have to do two sets of special casing:
// 1) item is in this node
// 2) item is in child
// In both cases, we need to handle the two subcases:
// A) node has enough values that it can spare one
// B) node doesn't have enough values
// For the latter, we have to check:
// a) left sibling has node to spare
// b) right sibling has node to spare
// c) we must merge
// To simplify our code here, we handle cases #1 and #2 the same:
// If a node doesn't have enough items, we make sure it does (using a,b,c).
// We then simply redo our remove call, and the second time (regardless of
// whether we're in case 1 or 2), we'll have enough items and can guarantee
// that we hit case A.
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
child := n.children[i]
if i > 0 && len(n.children[i-1].items) > minItems {
// Steal from left child
stealFrom := n.children[i-1]
stolenItem := stealFrom.items.pop()
child.items.insertAt(0, n.items[i-1])
n.items[i-1] = stolenItem
if len(stealFrom.children) > 0 {
child.children.insertAt(0, stealFrom.children.pop())
}
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
// steal from right child
stealFrom := n.children[i+1]
stolenItem := stealFrom.items.removeAt(0)
child.items = append(child.items, n.items[i])
n.items[i] = stolenItem
if len(stealFrom.children) > 0 {
child.children = append(child.children, stealFrom.children.removeAt(0))
}
} else {
if i >= len(n.items) {
i--
child = n.children[i]
}
// merge with right child
mergeItem := n.items.removeAt(i)
mergeChild := n.children.removeAt(i + 1)
child.items = append(child.items, mergeItem)
child.items = append(child.items, mergeChild.items...)
child.children = append(child.children, mergeChild.children...)
n.t.freeNode(mergeChild)
}
return n.remove(item, minItems, typ)
}
// iterate provides a simple method for iterating over elements in the tree.
// It could probably use some work to be extra-efficient (it calls from() a
// little more than it should), but it works pretty well for now.
//
// It requires that 'from' and 'to' both return true for values we should hit
// with the iterator. It should also be the case that 'from' returns true for
// values less than or equal to values 'to' returns true for, and 'to'
// returns true for values greater than or equal to those that 'from'
// does.
func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool {
for i, item := range n.items {
if !from(item) {
continue
}
if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) {
return false
}
if !to(item) {
return false
}
if !iter(item) {
return false
}
}
if len(n.children) > 0 {
return n.children[len(n.children)-1].iterate(from, to, iter)
}
return true
}
// Used for testing/debugging purposes.
func (n *node) print(w io.Writer, level int) {
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
for _, c := range n.children {
c.print(w, level+1)
}
}
// BTree is an implementation of a B-Tree.
//
// BTree stores Item instances in an ordered structure, allowing easy insertion,
// removal, and iteration.
//
// Write operations are not safe for concurrent mutation by multiple
// goroutines, but Read operations are.
type BTree struct {
degree int
length int
root *node
freelist *FreeList
}
// maxItems returns the max number of items to allow per node.
func (t *BTree) maxItems() int {
return t.degree*2 - 1
}
// minItems returns the min number of items to allow per node (ignored for the
// root node).
func (t *BTree) minItems() int {
return t.degree - 1
}
func (t *BTree) newNode() (n *node) {
n = t.freelist.newNode()
n.t = t
return
}
func (t *BTree) freeNode(n *node) {
for i := range n.items {
n.items[i] = nil // clear to allow GC
}
n.items = n.items[:0]
for i := range n.children {
n.children[i] = nil // clear to allow GC
}
n.children = n.children[:0]
n.t = nil // clear to allow GC
t.freelist.freeNode(n)
}
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
// already equals the given one, it is removed from the tree and returned.
// Otherwise, nil is returned.
//
// nil cannot be added to the tree (will panic).
func (t *BTree) ReplaceOrInsert(item Item) Item {
if item == nil {
panic("nil item being added to BTree")
}
if t.root == nil {
t.root = t.newNode()
t.root.items = append(t.root.items, item)
t.length++
return nil
} else if len(t.root.items) >= t.maxItems() {
item2, second := t.root.split(t.maxItems() / 2)
oldroot := t.root
t.root = t.newNode()
t.root.items = append(t.root.items, item2)
t.root.children = append(t.root.children, oldroot, second)
}
out := t.root.insert(item, t.maxItems())
if out == nil {
t.length++
}
return out
}
// Delete removes an item equal to the passed in item from the tree, returning
// it. If no such item exists, returns nil.
func (t *BTree) Delete(item Item) Item {
return t.deleteItem(item, removeItem)
}
// DeleteMin removes the smallest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMin() Item {
return t.deleteItem(nil, removeMin)
}
// DeleteMax removes the largest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMax() Item {
return t.deleteItem(nil, removeMax)
}
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
if t.root == nil || len(t.root.items) == 0 {
return nil
}
out := t.root.remove(item, t.minItems(), typ)
if len(t.root.items) == 0 && len(t.root.children) > 0 {
oldroot := t.root
t.root = t.root.children[0]
t.freeNode(oldroot)
}
if out != nil {
t.length--
}
return out
}
// AscendRange calls the iterator for every value in the tree within the range
// [greaterOrEqual, lessThan), until iterator returns false.
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return !a.Less(greaterOrEqual) },
func(a Item) bool { return a.Less(lessThan) },
iterator)
}
// AscendLessThan calls the iterator for every value in the tree within the range
// [first, pivot), until iterator returns false.
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return true },
func(a Item) bool { return a.Less(pivot) },
iterator)
}
// AscendGreaterOrEqual calls the iterator for every value in the tree within
// the range [pivot, last], until iterator returns false.
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return !a.Less(pivot) },
func(a Item) bool { return true },
iterator)
}
// Ascend calls the iterator for every value in the tree within the range
// [first, last], until iterator returns false.
func (t *BTree) Ascend(iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return true },
func(a Item) bool { return true },
iterator)
}
// Get looks for the key item in the tree, returning it. It returns nil if
// unable to find that item.
func (t *BTree) Get(key Item) Item {
if t.root == nil {
return nil
}
return t.root.get(key)
}
// Min returns the smallest item in the tree, or nil if the tree is empty.
func (t *BTree) Min() Item {
return min(t.root)
}
// Max returns the largest item in the tree, or nil if the tree is empty.
func (t *BTree) Max() Item {
return max(t.root)
}
// Has returns true if the given key is in the tree.
func (t *BTree) Has(key Item) bool {
return t.Get(key) != nil
}
// Len returns the number of items currently in the tree.
func (t *BTree) Len() int {
return t.length
}
// Int implements the Item interface for integers.
type Int int
// Less returns true if int(a) < int(b).
func (a Int) Less(b Item) bool {
return a < b.(Int)
}

7
vendor/github.com/gregjones/httpcache/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

24
vendor/github.com/gregjones/httpcache/README.md generated vendored Normal file
View File

@ -0,0 +1,24 @@
httpcache
=========
[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
Cache Backends
--------------
- The built-in 'memory' cache stores responses in an in-memory map.
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
License
-------
- [MIT License](LICENSE.txt)

View File

@ -0,0 +1,61 @@
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
// to supplement an in-memory map with persistent storage
//
package diskcache
import (
"bytes"
"crypto/md5"
"encoding/hex"
"github.com/peterbourgon/diskv"
"io"
)
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
type Cache struct {
d *diskv.Diskv
}
// Get returns the response corresponding to key if present
func (c *Cache) Get(key string) (resp []byte, ok bool) {
key = keyToFilename(key)
resp, err := c.d.Read(key)
if err != nil {
return []byte{}, false
}
return resp, true
}
// Set saves a response to the cache as key
func (c *Cache) Set(key string, resp []byte) {
key = keyToFilename(key)
c.d.WriteStream(key, bytes.NewReader(resp), true)
}
// Delete removes the response with key from the cache
func (c *Cache) Delete(key string) {
key = keyToFilename(key)
c.d.Erase(key)
}
func keyToFilename(key string) string {
h := md5.New()
io.WriteString(h, key)
return hex.EncodeToString(h.Sum(nil))
}
// New returns a new Cache that will store files in basePath
func New(basePath string) *Cache {
return &Cache{
d: diskv.New(diskv.Options{
BasePath: basePath,
CacheSizeMax: 100 * 1024 * 1024, // 100MB
}),
}
}
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
// storage.
func NewWithDiskv(d *diskv.Diskv) *Cache {
return &Cache{d}
}

553
vendor/github.com/gregjones/httpcache/httpcache.go generated vendored Normal file
View File

@ -0,0 +1,553 @@
// Package httpcache provides a http.RoundTripper implementation that works as a
// mostly RFC-compliant cache for http responses.
//
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
// and not for a shared proxy).
//
package httpcache
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"strings"
"sync"
"time"
)
const (
stale = iota
fresh
transparent
// XFromCache is the header added to responses that are returned from the cache
XFromCache = "X-From-Cache"
)
// A Cache interface is used by the Transport to store and retrieve responses.
type Cache interface {
// Get returns the []byte representation of a cached response and a bool
// set to true if the value isn't empty
Get(key string) (responseBytes []byte, ok bool)
// Set stores the []byte representation of a response against a key
Set(key string, responseBytes []byte)
// Delete removes the value associated with the key
Delete(key string)
}
// cacheKey returns the cache key for req.
func cacheKey(req *http.Request) string {
return req.URL.String()
}
// CachedResponse returns the cached http.Response for req if present, and nil
// otherwise.
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
cachedVal, ok := c.Get(cacheKey(req))
if !ok {
return
}
b := bytes.NewBuffer(cachedVal)
return http.ReadResponse(bufio.NewReader(b), req)
}
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
type MemoryCache struct {
mu sync.RWMutex
items map[string][]byte
}
// Get returns the []byte representation of the response and true if present, false if not
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
c.mu.RLock()
resp, ok = c.items[key]
c.mu.RUnlock()
return resp, ok
}
// Set saves response resp to the cache with key
func (c *MemoryCache) Set(key string, resp []byte) {
c.mu.Lock()
c.items[key] = resp
c.mu.Unlock()
}
// Delete removes key from the cache
func (c *MemoryCache) Delete(key string) {
c.mu.Lock()
delete(c.items, key)
c.mu.Unlock()
}
// NewMemoryCache returns a new Cache that will store items in an in-memory map
func NewMemoryCache() *MemoryCache {
c := &MemoryCache{items: map[string][]byte{}}
return c
}
// Transport is an implementation of http.RoundTripper that will return values from a cache
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
// to repeated requests allowing servers to return 304 / Not Modified
type Transport struct {
// The RoundTripper interface actually used to make requests
// If nil, http.DefaultTransport is used
Transport http.RoundTripper
Cache Cache
// If true, responses returned from the cache will be given an extra header, X-From-Cache
MarkCachedResponses bool
}
// NewTransport returns a new Transport with the
// provided Cache implementation and MarkCachedResponses set to true
func NewTransport(c Cache) *Transport {
return &Transport{Cache: c, MarkCachedResponses: true}
}
// Client returns an *http.Client that caches responses.
func (t *Transport) Client() *http.Client {
return &http.Client{Transport: t}
}
// varyMatches will return false unless all of the cached values for the headers listed in Vary
// match the new request
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
header = http.CanonicalHeaderKey(header)
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
return false
}
}
return true
}
// RoundTrip takes a Request and returns a Response
//
// If there is a fresh Response already in cache, then it will be returned without connecting to
// the server.
//
// If there is a stale Response, then any validators it contains will be set on the new request
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
// will be returned.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
cacheKey := cacheKey(req)
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
var cachedResp *http.Response
if cacheable {
cachedResp, err = CachedResponse(t.Cache, req)
} else {
// Need to invalidate an existing value
t.Cache.Delete(cacheKey)
}
transport := t.Transport
if transport == nil {
transport = http.DefaultTransport
}
if cacheable && cachedResp != nil && err == nil {
if t.MarkCachedResponses {
cachedResp.Header.Set(XFromCache, "1")
}
if varyMatches(cachedResp, req) {
// Can only use cached value if the new request doesn't Vary significantly
freshness := getFreshness(cachedResp.Header, req.Header)
if freshness == fresh {
return cachedResp, nil
}
if freshness == stale {
var req2 *http.Request
// Add validators if caller hasn't already done so
etag := cachedResp.Header.Get("etag")
if etag != "" && req.Header.Get("etag") == "" {
req2 = cloneRequest(req)
req2.Header.Set("if-none-match", etag)
}
lastModified := cachedResp.Header.Get("last-modified")
if lastModified != "" && req.Header.Get("last-modified") == "" {
if req2 == nil {
req2 = cloneRequest(req)
}
req2.Header.Set("if-modified-since", lastModified)
}
if req2 != nil {
req = req2
}
}
}
resp, err = transport.RoundTrip(req)
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
// Replace the 304 response with the one from cache, but update with some new headers
endToEndHeaders := getEndToEndHeaders(resp.Header)
for _, header := range endToEndHeaders {
cachedResp.Header[header] = resp.Header[header]
}
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
cachedResp.StatusCode = http.StatusOK
resp = cachedResp
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
// In case of transport failure and stale-if-error activated, returns cached content
// when available
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
cachedResp.StatusCode = http.StatusOK
return cachedResp, nil
} else {
if err != nil || resp.StatusCode != http.StatusOK {
t.Cache.Delete(cacheKey)
}
if err != nil {
return nil, err
}
}
} else {
reqCacheControl := parseCacheControl(req.Header)
if _, ok := reqCacheControl["only-if-cached"]; ok {
resp = newGatewayTimeoutResponse(req)
} else {
resp, err = transport.RoundTrip(req)
if err != nil {
return nil, err
}
}
}
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
varyKey = http.CanonicalHeaderKey(varyKey)
fakeHeader := "X-Varied-" + varyKey
reqValue := req.Header.Get(varyKey)
if reqValue != "" {
resp.Header.Set(fakeHeader, reqValue)
}
}
switch req.Method {
case "GET":
// Delay caching until EOF is reached.
resp.Body = &cachingReadCloser{
R: resp.Body,
OnEOF: func(r io.Reader) {
resp := *resp
resp.Body = ioutil.NopCloser(r)
respBytes, err := httputil.DumpResponse(&resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
},
}
default:
respBytes, err := httputil.DumpResponse(resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
}
} else {
t.Cache.Delete(cacheKey)
}
return resp, nil
}
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
var ErrNoDateHeader = errors.New("no Date header")
// Date parses and returns the value of the Date header.
func Date(respHeaders http.Header) (date time.Time, err error) {
dateHeader := respHeaders.Get("date")
if dateHeader == "" {
err = ErrNoDateHeader
return
}
return time.Parse(time.RFC1123, dateHeader)
}
type realClock struct{}
func (c *realClock) since(d time.Time) time.Duration {
return time.Since(d)
}
type timer interface {
since(d time.Time) time.Duration
}
var clock timer = &realClock{}
// getFreshness will return one of fresh/stale/transparent based on the cache-control
// values of the request and the response
//
// fresh indicates the response can be returned
// stale indicates that the response needs validating before it is returned
// transparent indicates the response should not be used to fulfil the request
//
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
// signficant. Similarly, smax-age isn't used.
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
if _, ok := reqCacheControl["no-cache"]; ok {
return transparent
}
if _, ok := respCacheControl["no-cache"]; ok {
return stale
}
if _, ok := reqCacheControl["only-if-cached"]; ok {
return fresh
}
date, err := Date(respHeaders)
if err != nil {
return stale
}
currentAge := clock.since(date)
var lifetime time.Duration
var zeroDuration time.Duration
// If a response includes both an Expires header and a max-age directive,
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
if maxAge, ok := respCacheControl["max-age"]; ok {
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
} else {
expiresHeader := respHeaders.Get("Expires")
if expiresHeader != "" {
expires, err := time.Parse(time.RFC1123, expiresHeader)
if err != nil {
lifetime = zeroDuration
} else {
lifetime = expires.Sub(date)
}
}
}
if maxAge, ok := reqCacheControl["max-age"]; ok {
// the client is willing to accept a response whose age is no greater than the specified time in seconds
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
}
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
// the client wants a response that will still be fresh for at least the specified number of seconds.
minfreshDuration, err := time.ParseDuration(minfresh + "s")
if err == nil {
currentAge = time.Duration(currentAge + minfreshDuration)
}
}
if maxstale, ok := reqCacheControl["max-stale"]; ok {
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
// its expiration time by no more than the specified number of seconds.
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
//
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
// return-value available here.
if maxstale == "" {
return fresh
}
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
if err == nil {
currentAge = time.Duration(currentAge - maxstaleDuration)
}
}
if lifetime > currentAge {
return fresh
}
return stale
}
// Returns true if either the request or the response includes the stale-if-error
// cache control extension: https://tools.ietf.org/html/rfc5861
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
var err error
lifetime := time.Duration(-1)
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if lifetime >= 0 {
date, err := Date(respHeaders)
if err != nil {
return false
}
currentAge := clock.since(date)
if lifetime > currentAge {
return true
}
}
return false
}
func getEndToEndHeaders(respHeaders http.Header) []string {
// These headers are always hop-by-hop
hopByHopHeaders := map[string]struct{}{
"Connection": struct{}{},
"Keep-Alive": struct{}{},
"Proxy-Authenticate": struct{}{},
"Proxy-Authorization": struct{}{},
"Te": struct{}{},
"Trailers": struct{}{},
"Transfer-Encoding": struct{}{},
"Upgrade": struct{}{},
}
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
// any header listed in connection, if present, is also considered hop-by-hop
if strings.Trim(extra, " ") != "" {
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
}
}
endToEndHeaders := []string{}
for respHeader, _ := range respHeaders {
if _, ok := hopByHopHeaders[respHeader]; !ok {
endToEndHeaders = append(endToEndHeaders, respHeader)
}
}
return endToEndHeaders
}
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
if _, ok := respCacheControl["no-store"]; ok {
return false
}
if _, ok := reqCacheControl["no-store"]; ok {
return false
}
return true
}
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
var braw bytes.Buffer
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
if err != nil {
panic(err)
}
return resp
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
type cacheControl map[string]string
func parseCacheControl(headers http.Header) cacheControl {
cc := cacheControl{}
ccHeader := headers.Get("Cache-Control")
for _, part := range strings.Split(ccHeader, ",") {
part = strings.Trim(part, " ")
if part == "" {
continue
}
if strings.ContainsRune(part, '=') {
keyval := strings.Split(part, "=")
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
} else {
cc[part] = ""
}
}
return cc
}
// headerAllCommaSepValues returns all comma-separated values (each
// with whitespace trimmed) for header name in headers. According to
// Section 4.2 of the HTTP/1.1 spec
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
// values from multiple occurrences of a header should be concatenated, if
// the header's value is a comma-separated list.
func headerAllCommaSepValues(headers http.Header, name string) []string {
var vals []string
for _, val := range headers[http.CanonicalHeaderKey(name)] {
fields := strings.Split(val, ",")
for i, f := range fields {
fields[i] = strings.TrimSpace(f)
}
vals = append(vals, fields...)
}
return vals
}
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
// handler with a full copy of the content read from R when EOF is
// reached.
type cachingReadCloser struct {
// Underlying ReadCloser.
R io.ReadCloser
// OnEOF is called with a copy of the content of R when EOF is reached.
OnEOF func(io.Reader)
buf bytes.Buffer // buf stores a copy of the content of R.
}
// Read reads the next len(p) bytes from R or until R is drained. The
// return value n is the number of bytes read. If R has no data to
// return, err is io.EOF and OnEOF is called with a full copy of what
// has been read so far.
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
n, err = r.R.Read(p)
r.buf.Write(p[:n])
if err == io.EOF {
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
}
return n, err
}
func (r *cachingReadCloser) Close() error {
return r.R.Close()
}
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
func NewMemoryCacheTransport() *Transport {
c := NewMemoryCache()
t := NewTransport(c)
return t
}

View File

@ -1,7 +1,6 @@
The MIT License (MIT) MIT License
Copyright (c) 2012-2015 Ugorji Nwoke. Copyright (c) 2016 json-iterator
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

80
vendor/github.com/json-iterator/go/README.md generated vendored Normal file
View File

@ -0,0 +1,80 @@
[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
A high-performance 100% compatible drop-in replacement of "encoding/json"
```
Go开发者们请加入我们滴滴出行平台技术部 taowen@didichuxing.com
```
# Benchmark
![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
Raw Result (easyjson requires static code generation)
| | ns/op | allocation bytes | allocation times |
| --- | --- | --- | --- |
| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
# Usage
100% compatibility with standard lib
Replace
```go
import "encoding/json"
json.Marshal(&data)
```
with
```go
import "github.com/json-iterator/go"
jsoniter.Marshal(&data)
```
Replace
```go
import "encoding/json"
json.Unmarshal(input, &data)
```
with
```go
import "github.com/json-iterator/go"
jsoniter.Unmarshal(input, &data)
```
[More documentation](http://jsoniter.com/migrate-from-go-std.html)
# How to get
```
go get github.com/json-iterator/go
```
# Contribution Welcomed !
Contributors
* [thockin](https://github.com/thockin)
* [mattn](https://github.com/mattn)
* [cch123](https://github.com/cch123)
Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)

127
vendor/github.com/json-iterator/go/feature_adapter.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
package jsoniter
import (
"bytes"
"io"
)
// RawMessage to make replace json with jsoniter
type RawMessage []byte
// Unmarshal adapts to json/encoding Unmarshal API
//
// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
// Refer to https://godoc.org/encoding/json#Unmarshal for more information
func Unmarshal(data []byte, v interface{}) error {
return ConfigDefault.Unmarshal(data, v)
}
func lastNotSpacePos(data []byte) int {
for i := len(data) - 1; i >= 0; i-- {
if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' {
return i + 1
}
}
return 0
}
// UnmarshalFromString convenient method to read from string instead of []byte
func UnmarshalFromString(str string, v interface{}) error {
return ConfigDefault.UnmarshalFromString(str, v)
}
// Get quick method to get value from deeply nested JSON structure
func Get(data []byte, path ...interface{}) Any {
return ConfigDefault.Get(data, path...)
}
// Marshal adapts to json/encoding Marshal API
//
// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
// Refer to https://godoc.org/encoding/json#Marshal for more information
func Marshal(v interface{}) ([]byte, error) {
return ConfigDefault.Marshal(v)
}
// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
return ConfigDefault.MarshalIndent(v, prefix, indent)
}
// MarshalToString convenient method to write as string instead of []byte
func MarshalToString(v interface{}) (string, error) {
return ConfigDefault.MarshalToString(v)
}
// NewDecoder adapts to json/stream NewDecoder API.
//
// NewDecoder returns a new decoder that reads from r.
//
// Instead of a json/encoding Decoder, an Decoder is returned
// Refer to https://godoc.org/encoding/json#NewDecoder for more information
func NewDecoder(reader io.Reader) *Decoder {
return ConfigDefault.NewDecoder(reader)
}
// Decoder reads and decodes JSON values from an input stream.
// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
type Decoder struct {
iter *Iterator
}
// Decode decode JSON into interface{}
func (adapter *Decoder) Decode(obj interface{}) error {
adapter.iter.ReadVal(obj)
err := adapter.iter.Error
if err == io.EOF {
return nil
}
return adapter.iter.Error
}
// More is there more?
func (adapter *Decoder) More() bool {
return adapter.iter.head != adapter.iter.tail
}
// Buffered remaining buffer
func (adapter *Decoder) Buffered() io.Reader {
remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
return bytes.NewReader(remaining)
}
// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string)
func (adapter *Decoder) UseNumber() {
origCfg := adapter.iter.cfg.configBeforeFrozen
origCfg.UseNumber = true
adapter.iter.cfg = origCfg.Froze().(*frozenConfig)
}
// NewEncoder same as json.NewEncoder
func NewEncoder(writer io.Writer) *Encoder {
return ConfigDefault.NewEncoder(writer)
}
// Encoder same as json.Encoder
type Encoder struct {
stream *Stream
}
// Encode encode interface{} as JSON to io.Writer
func (adapter *Encoder) Encode(val interface{}) error {
adapter.stream.WriteVal(val)
adapter.stream.Flush()
return adapter.stream.Error
}
// SetIndent set the indention. Prefix is not supported
func (adapter *Encoder) SetIndent(prefix, indent string) {
adapter.stream.cfg.indentionStep = len(indent)
}
// SetEscapeHTML escape html by default, set to false to disable
func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
config := adapter.stream.cfg.configBeforeFrozen
config.EscapeHTML = escapeHTML
adapter.stream.cfg = config.Froze().(*frozenConfig)
}

242
vendor/github.com/json-iterator/go/feature_any.go generated vendored Normal file
View File

@ -0,0 +1,242 @@
package jsoniter
import (
"fmt"
"io"
"reflect"
)
// Any generic object representation.
// The lazy json implementation holds []byte and parse lazily.
type Any interface {
LastError() error
ValueType() ValueType
MustBeValid() Any
ToBool() bool
ToInt() int
ToInt32() int32
ToInt64() int64
ToUint() uint
ToUint32() uint32
ToUint64() uint64
ToFloat32() float32
ToFloat64() float64
ToString() string
ToVal(val interface{})
Get(path ...interface{}) Any
// TODO: add Set
Size() int
Keys() []string
GetInterface() interface{}
WriteTo(stream *Stream)
}
type baseAny struct{}
func (any *baseAny) Get(path ...interface{}) Any {
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)}
}
func (any *baseAny) Size() int {
return 0
}
func (any *baseAny) Keys() []string {
return []string{}
}
func (any *baseAny) ToVal(obj interface{}) {
panic("not implemented")
}
// WrapInt32 turn int32 into Any interface
func WrapInt32(val int32) Any {
return &int32Any{baseAny{}, val}
}
// WrapInt64 turn int64 into Any interface
func WrapInt64(val int64) Any {
return &int64Any{baseAny{}, val}
}
// WrapUint32 turn uint32 into Any interface
func WrapUint32(val uint32) Any {
return &uint32Any{baseAny{}, val}
}
// WrapUint64 turn uint64 into Any interface
func WrapUint64(val uint64) Any {
return &uint64Any{baseAny{}, val}
}
// WrapFloat64 turn float64 into Any interface
func WrapFloat64(val float64) Any {
return &floatAny{baseAny{}, val}
}
// WrapString turn string into Any interface
func WrapString(val string) Any {
return &stringAny{baseAny{}, val}
}
// Wrap turn a go object into Any interface
func Wrap(val interface{}) Any {
if val == nil {
return &nilAny{}
}
asAny, isAny := val.(Any)
if isAny {
return asAny
}
typ := reflect.TypeOf(val)
switch typ.Kind() {
case reflect.Slice:
return wrapArray(val)
case reflect.Struct:
return wrapStruct(val)
case reflect.Map:
return wrapMap(val)
case reflect.String:
return WrapString(val.(string))
case reflect.Int:
return WrapInt64(int64(val.(int)))
case reflect.Int8:
return WrapInt32(int32(val.(int8)))
case reflect.Int16:
return WrapInt32(int32(val.(int16)))
case reflect.Int32:
return WrapInt32(val.(int32))
case reflect.Int64:
return WrapInt64(val.(int64))
case reflect.Uint:
return WrapUint64(uint64(val.(uint)))
case reflect.Uint8:
return WrapUint32(uint32(val.(uint8)))
case reflect.Uint16:
return WrapUint32(uint32(val.(uint16)))
case reflect.Uint32:
return WrapUint32(uint32(val.(uint32)))
case reflect.Uint64:
return WrapUint64(val.(uint64))
case reflect.Float32:
return WrapFloat64(float64(val.(float32)))
case reflect.Float64:
return WrapFloat64(val.(float64))
case reflect.Bool:
if val.(bool) == true {
return &trueAny{}
}
return &falseAny{}
}
return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
}
// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
func (iter *Iterator) ReadAny() Any {
return iter.readAny()
}
func (iter *Iterator) readAny() Any {
c := iter.nextToken()
switch c {
case '"':
iter.unreadByte()
return &stringAny{baseAny{}, iter.ReadString()}
case 'n':
iter.skipThreeBytes('u', 'l', 'l') // null
return &nilAny{}
case 't':
iter.skipThreeBytes('r', 'u', 'e') // true
return &trueAny{}
case 'f':
iter.skipFourBytes('a', 'l', 's', 'e') // false
return &falseAny{}
case '{':
return iter.readObjectAny()
case '[':
return iter.readArrayAny()
case '-':
return iter.readNumberAny(false)
default:
return iter.readNumberAny(true)
}
}
func (iter *Iterator) readNumberAny(positive bool) Any {
iter.startCapture(iter.head - 1)
iter.skipNumber()
lazyBuf := iter.stopCapture()
return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
}
func (iter *Iterator) readObjectAny() Any {
iter.startCapture(iter.head - 1)
iter.skipObject()
lazyBuf := iter.stopCapture()
return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
}
func (iter *Iterator) readArrayAny() Any {
iter.startCapture(iter.head - 1)
iter.skipArray()
lazyBuf := iter.stopCapture()
return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
}
func locateObjectField(iter *Iterator, target string) []byte {
var found []byte
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
if field == target {
found = iter.SkipAndReturnBytes()
return false
}
iter.Skip()
return true
})
return found
}
func locateArrayElement(iter *Iterator, target int) []byte {
var found []byte
n := 0
iter.ReadArrayCB(func(iter *Iterator) bool {
if n == target {
found = iter.SkipAndReturnBytes()
return false
}
iter.Skip()
n++
return true
})
return found
}
func locatePath(iter *Iterator, path []interface{}) Any {
for i, pathKeyObj := range path {
switch pathKey := pathKeyObj.(type) {
case string:
valueBytes := locateObjectField(iter, pathKey)
if valueBytes == nil {
return newInvalidAny(path[i:])
}
iter.ResetBytes(valueBytes)
case int:
valueBytes := locateArrayElement(iter, pathKey)
if valueBytes == nil {
return newInvalidAny(path[i:])
}
iter.ResetBytes(valueBytes)
case int32:
if '*' == pathKey {
return iter.readAny().Get(path[i:]...)
}
return newInvalidAny(path[i:])
default:
return newInvalidAny(path[i:])
}
}
if iter.Error != nil && iter.Error != io.EOF {
return &invalidAny{baseAny{}, iter.Error}
}
return iter.readAny()
}

278
vendor/github.com/json-iterator/go/feature_any_array.go generated vendored Normal file
View File

@ -0,0 +1,278 @@
package jsoniter
import (
"reflect"
"unsafe"
)
type arrayLazyAny struct {
baseAny
cfg *frozenConfig
buf []byte
err error
}
func (any *arrayLazyAny) ValueType() ValueType {
return ArrayValue
}
func (any *arrayLazyAny) MustBeValid() Any {
return any
}
func (any *arrayLazyAny) LastError() error {
return any.err
}
func (any *arrayLazyAny) ToBool() bool {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.ReadArray()
}
func (any *arrayLazyAny) ToInt() int {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToInt32() int32 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToInt64() int64 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToUint() uint {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToUint32() uint32 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToUint64() uint64 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToFloat32() float32 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToFloat64() float64 {
if any.ToBool() {
return 1
}
return 0
}
func (any *arrayLazyAny) ToString() string {
return *(*string)(unsafe.Pointer(&any.buf))
}
func (any *arrayLazyAny) ToVal(val interface{}) {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadVal(val)
}
func (any *arrayLazyAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case int:
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
valueBytes := locateArrayElement(iter, firstPath)
if valueBytes == nil {
return newInvalidAny(path)
}
iter.ResetBytes(valueBytes)
return locatePath(iter, path[1:])
case int32:
if '*' == firstPath {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
arr := make([]Any, 0)
iter.ReadArrayCB(func(iter *Iterator) bool {
found := iter.readAny().Get(path[1:]...)
if found.ValueType() != InvalidValue {
arr = append(arr, found)
}
return true
})
return wrapArray(arr)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *arrayLazyAny) Size() int {
size := 0
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadArrayCB(func(iter *Iterator) bool {
size++
iter.Skip()
return true
})
return size
}
func (any *arrayLazyAny) WriteTo(stream *Stream) {
stream.Write(any.buf)
}
func (any *arrayLazyAny) GetInterface() interface{} {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.Read()
}
type arrayAny struct {
baseAny
val reflect.Value
}
func wrapArray(val interface{}) *arrayAny {
return &arrayAny{baseAny{}, reflect.ValueOf(val)}
}
func (any *arrayAny) ValueType() ValueType {
return ArrayValue
}
func (any *arrayAny) MustBeValid() Any {
return any
}
func (any *arrayAny) LastError() error {
return nil
}
func (any *arrayAny) ToBool() bool {
return any.val.Len() != 0
}
func (any *arrayAny) ToInt() int {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToInt32() int32 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToInt64() int64 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToUint() uint {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToUint32() uint32 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToUint64() uint64 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToFloat32() float32 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToFloat64() float64 {
if any.val.Len() == 0 {
return 0
}
return 1
}
func (any *arrayAny) ToString() string {
str, _ := MarshalToString(any.val.Interface())
return str
}
func (any *arrayAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case int:
if firstPath < 0 || firstPath >= any.val.Len() {
return newInvalidAny(path)
}
return Wrap(any.val.Index(firstPath).Interface())
case int32:
if '*' == firstPath {
mappedAll := make([]Any, 0)
for i := 0; i < any.val.Len(); i++ {
mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
if mapped.ValueType() != InvalidValue {
mappedAll = append(mappedAll, mapped)
}
}
return wrapArray(mappedAll)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *arrayAny) Size() int {
return any.val.Len()
}
func (any *arrayAny) WriteTo(stream *Stream) {
stream.WriteVal(any.val)
}
func (any *arrayAny) GetInterface() interface{} {
return any.val.Interface()
}

137
vendor/github.com/json-iterator/go/feature_any_bool.go generated vendored Normal file
View File

@ -0,0 +1,137 @@
package jsoniter
type trueAny struct {
baseAny
}
func (any *trueAny) LastError() error {
return nil
}
func (any *trueAny) ToBool() bool {
return true
}
func (any *trueAny) ToInt() int {
return 1
}
func (any *trueAny) ToInt32() int32 {
return 1
}
func (any *trueAny) ToInt64() int64 {
return 1
}
func (any *trueAny) ToUint() uint {
return 1
}
func (any *trueAny) ToUint32() uint32 {
return 1
}
func (any *trueAny) ToUint64() uint64 {
return 1
}
func (any *trueAny) ToFloat32() float32 {
return 1
}
func (any *trueAny) ToFloat64() float64 {
return 1
}
func (any *trueAny) ToString() string {
return "true"
}
func (any *trueAny) WriteTo(stream *Stream) {
stream.WriteTrue()
}
func (any *trueAny) Parse() *Iterator {
return nil
}
func (any *trueAny) GetInterface() interface{} {
return true
}
func (any *trueAny) ValueType() ValueType {
return BoolValue
}
func (any *trueAny) MustBeValid() Any {
return any
}
type falseAny struct {
baseAny
}
func (any *falseAny) LastError() error {
return nil
}
func (any *falseAny) ToBool() bool {
return false
}
func (any *falseAny) ToInt() int {
return 0
}
func (any *falseAny) ToInt32() int32 {
return 0
}
func (any *falseAny) ToInt64() int64 {
return 0
}
func (any *falseAny) ToUint() uint {
return 0
}
func (any *falseAny) ToUint32() uint32 {
return 0
}
func (any *falseAny) ToUint64() uint64 {
return 0
}
func (any *falseAny) ToFloat32() float32 {
return 0
}
func (any *falseAny) ToFloat64() float64 {
return 0
}
func (any *falseAny) ToString() string {
return "false"
}
func (any *falseAny) WriteTo(stream *Stream) {
stream.WriteFalse()
}
func (any *falseAny) Parse() *Iterator {
return nil
}
func (any *falseAny) GetInterface() interface{} {
return false
}
func (any *falseAny) ValueType() ValueType {
return BoolValue
}
func (any *falseAny) MustBeValid() Any {
return any
}

View File

@ -0,0 +1,83 @@
package jsoniter
import (
"strconv"
)
type floatAny struct {
baseAny
val float64
}
func (any *floatAny) Parse() *Iterator {
return nil
}
func (any *floatAny) ValueType() ValueType {
return NumberValue
}
func (any *floatAny) MustBeValid() Any {
return any
}
func (any *floatAny) LastError() error {
return nil
}
func (any *floatAny) ToBool() bool {
return any.ToFloat64() != 0
}
func (any *floatAny) ToInt() int {
return int(any.val)
}
func (any *floatAny) ToInt32() int32 {
return int32(any.val)
}
func (any *floatAny) ToInt64() int64 {
return int64(any.val)
}
func (any *floatAny) ToUint() uint {
if any.val > 0 {
return uint(any.val)
}
return 0
}
func (any *floatAny) ToUint32() uint32 {
if any.val > 0 {
return uint32(any.val)
}
return 0
}
func (any *floatAny) ToUint64() uint64 {
if any.val > 0 {
return uint64(any.val)
}
return 0
}
func (any *floatAny) ToFloat32() float32 {
return float32(any.val)
}
func (any *floatAny) ToFloat64() float64 {
return any.val
}
func (any *floatAny) ToString() string {
return strconv.FormatFloat(any.val, 'E', -1, 64)
}
func (any *floatAny) WriteTo(stream *Stream) {
stream.WriteFloat64(any.val)
}
func (any *floatAny) GetInterface() interface{} {
return any.val
}

View File

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type int32Any struct {
baseAny
val int32
}
func (any *int32Any) LastError() error {
return nil
}
func (any *int32Any) ValueType() ValueType {
return NumberValue
}
func (any *int32Any) MustBeValid() Any {
return any
}
func (any *int32Any) ToBool() bool {
return any.val != 0
}
func (any *int32Any) ToInt() int {
return int(any.val)
}
func (any *int32Any) ToInt32() int32 {
return any.val
}
func (any *int32Any) ToInt64() int64 {
return int64(any.val)
}
func (any *int32Any) ToUint() uint {
return uint(any.val)
}
func (any *int32Any) ToUint32() uint32 {
return uint32(any.val)
}
func (any *int32Any) ToUint64() uint64 {
return uint64(any.val)
}
func (any *int32Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *int32Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *int32Any) ToString() string {
return strconv.FormatInt(int64(any.val), 10)
}
func (any *int32Any) WriteTo(stream *Stream) {
stream.WriteInt32(any.val)
}
func (any *int32Any) Parse() *Iterator {
return nil
}
func (any *int32Any) GetInterface() interface{} {
return any.val
}

View File

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type int64Any struct {
baseAny
val int64
}
func (any *int64Any) LastError() error {
return nil
}
func (any *int64Any) ValueType() ValueType {
return NumberValue
}
func (any *int64Any) MustBeValid() Any {
return any
}
func (any *int64Any) ToBool() bool {
return any.val != 0
}
func (any *int64Any) ToInt() int {
return int(any.val)
}
func (any *int64Any) ToInt32() int32 {
return int32(any.val)
}
func (any *int64Any) ToInt64() int64 {
return any.val
}
func (any *int64Any) ToUint() uint {
return uint(any.val)
}
func (any *int64Any) ToUint32() uint32 {
return uint32(any.val)
}
func (any *int64Any) ToUint64() uint64 {
return uint64(any.val)
}
func (any *int64Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *int64Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *int64Any) ToString() string {
return strconv.FormatInt(any.val, 10)
}
func (any *int64Any) WriteTo(stream *Stream) {
stream.WriteInt64(any.val)
}
func (any *int64Any) Parse() *Iterator {
return nil
}
func (any *int64Any) GetInterface() interface{} {
return any.val
}

View File

@ -0,0 +1,82 @@
package jsoniter
import "fmt"
type invalidAny struct {
baseAny
err error
}
func newInvalidAny(path []interface{}) *invalidAny {
return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
}
func (any *invalidAny) LastError() error {
return any.err
}
func (any *invalidAny) ValueType() ValueType {
return InvalidValue
}
func (any *invalidAny) MustBeValid() Any {
panic(any.err)
}
func (any *invalidAny) ToBool() bool {
return false
}
func (any *invalidAny) ToInt() int {
return 0
}
func (any *invalidAny) ToInt32() int32 {
return 0
}
func (any *invalidAny) ToInt64() int64 {
return 0
}
func (any *invalidAny) ToUint() uint {
return 0
}
func (any *invalidAny) ToUint32() uint32 {
return 0
}
func (any *invalidAny) ToUint64() uint64 {
return 0
}
func (any *invalidAny) ToFloat32() float32 {
return 0
}
func (any *invalidAny) ToFloat64() float64 {
return 0
}
func (any *invalidAny) ToString() string {
return ""
}
func (any *invalidAny) WriteTo(stream *Stream) {
}
func (any *invalidAny) Get(path ...interface{}) Any {
if any.err == nil {
return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
}
return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
}
func (any *invalidAny) Parse() *Iterator {
return nil
}
func (any *invalidAny) GetInterface() interface{} {
return nil
}

69
vendor/github.com/json-iterator/go/feature_any_nil.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
package jsoniter
type nilAny struct {
baseAny
}
func (any *nilAny) LastError() error {
return nil
}
func (any *nilAny) ValueType() ValueType {
return NilValue
}
func (any *nilAny) MustBeValid() Any {
return any
}
func (any *nilAny) ToBool() bool {
return false
}
func (any *nilAny) ToInt() int {
return 0
}
func (any *nilAny) ToInt32() int32 {
return 0
}
func (any *nilAny) ToInt64() int64 {
return 0
}
func (any *nilAny) ToUint() uint {
return 0
}
func (any *nilAny) ToUint32() uint32 {
return 0
}
func (any *nilAny) ToUint64() uint64 {
return 0
}
func (any *nilAny) ToFloat32() float32 {
return 0
}
func (any *nilAny) ToFloat64() float64 {
return 0
}
func (any *nilAny) ToString() string {
return ""
}
func (any *nilAny) WriteTo(stream *Stream) {
stream.WriteNil()
}
func (any *nilAny) Parse() *Iterator {
return nil
}
func (any *nilAny) GetInterface() interface{} {
return nil
}

View File

@ -0,0 +1,104 @@
package jsoniter
import "unsafe"
type numberLazyAny struct {
baseAny
cfg *frozenConfig
buf []byte
err error
}
func (any *numberLazyAny) ValueType() ValueType {
return NumberValue
}
func (any *numberLazyAny) MustBeValid() Any {
return any
}
func (any *numberLazyAny) LastError() error {
return any.err
}
func (any *numberLazyAny) ToBool() bool {
return any.ToFloat64() != 0
}
func (any *numberLazyAny) ToInt() int {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToInt32() int32 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt32()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToInt64() int64 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt64()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToUint() uint {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToUint32() uint32 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint32()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToUint64() uint64 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint64()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToFloat32() float32 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadFloat32()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToFloat64() float64 {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
val := iter.ReadFloat64()
any.err = iter.Error
return val
}
func (any *numberLazyAny) ToString() string {
return *(*string)(unsafe.Pointer(&any.buf))
}
func (any *numberLazyAny) WriteTo(stream *Stream) {
stream.Write(any.buf)
}
func (any *numberLazyAny) GetInterface() interface{} {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.Read()
}

View File

@ -0,0 +1,374 @@
package jsoniter
import (
"reflect"
"unsafe"
)
type objectLazyAny struct {
baseAny
cfg *frozenConfig
buf []byte
err error
}
func (any *objectLazyAny) ValueType() ValueType {
return ObjectValue
}
func (any *objectLazyAny) MustBeValid() Any {
return any
}
func (any *objectLazyAny) LastError() error {
return any.err
}
func (any *objectLazyAny) ToBool() bool {
return true
}
func (any *objectLazyAny) ToInt() int {
return 0
}
func (any *objectLazyAny) ToInt32() int32 {
return 0
}
func (any *objectLazyAny) ToInt64() int64 {
return 0
}
func (any *objectLazyAny) ToUint() uint {
return 0
}
func (any *objectLazyAny) ToUint32() uint32 {
return 0
}
func (any *objectLazyAny) ToUint64() uint64 {
return 0
}
func (any *objectLazyAny) ToFloat32() float32 {
return 0
}
func (any *objectLazyAny) ToFloat64() float64 {
return 0
}
func (any *objectLazyAny) ToString() string {
return *(*string)(unsafe.Pointer(&any.buf))
}
func (any *objectLazyAny) ToVal(obj interface{}) {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadVal(obj)
}
func (any *objectLazyAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case string:
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
valueBytes := locateObjectField(iter, firstPath)
if valueBytes == nil {
return newInvalidAny(path)
}
iter.ResetBytes(valueBytes)
return locatePath(iter, path[1:])
case int32:
if '*' == firstPath {
mappedAll := map[string]Any{}
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadMapCB(func(iter *Iterator, field string) bool {
mapped := locatePath(iter, path[1:])
if mapped.ValueType() != InvalidValue {
mappedAll[field] = mapped
}
return true
})
return wrapMap(mappedAll)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *objectLazyAny) Keys() []string {
keys := []string{}
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadMapCB(func(iter *Iterator, field string) bool {
iter.Skip()
keys = append(keys, field)
return true
})
return keys
}
func (any *objectLazyAny) Size() int {
size := 0
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
iter.Skip()
size++
return true
})
return size
}
func (any *objectLazyAny) WriteTo(stream *Stream) {
stream.Write(any.buf)
}
func (any *objectLazyAny) GetInterface() interface{} {
iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter)
return iter.Read()
}
type objectAny struct {
baseAny
err error
val reflect.Value
}
func wrapStruct(val interface{}) *objectAny {
return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
}
func (any *objectAny) ValueType() ValueType {
return ObjectValue
}
func (any *objectAny) MustBeValid() Any {
return any
}
func (any *objectAny) Parse() *Iterator {
return nil
}
func (any *objectAny) LastError() error {
return any.err
}
func (any *objectAny) ToBool() bool {
return any.val.NumField() != 0
}
func (any *objectAny) ToInt() int {
return 0
}
func (any *objectAny) ToInt32() int32 {
return 0
}
func (any *objectAny) ToInt64() int64 {
return 0
}
func (any *objectAny) ToUint() uint {
return 0
}
func (any *objectAny) ToUint32() uint32 {
return 0
}
func (any *objectAny) ToUint64() uint64 {
return 0
}
func (any *objectAny) ToFloat32() float32 {
return 0
}
func (any *objectAny) ToFloat64() float64 {
return 0
}
func (any *objectAny) ToString() string {
str, err := MarshalToString(any.val.Interface())
any.err = err
return str
}
func (any *objectAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case string:
field := any.val.FieldByName(firstPath)
if !field.IsValid() {
return newInvalidAny(path)
}
return Wrap(field.Interface())
case int32:
if '*' == firstPath {
mappedAll := map[string]Any{}
for i := 0; i < any.val.NumField(); i++ {
field := any.val.Field(i)
if field.CanInterface() {
mapped := Wrap(field.Interface()).Get(path[1:]...)
if mapped.ValueType() != InvalidValue {
mappedAll[any.val.Type().Field(i).Name] = mapped
}
}
}
return wrapMap(mappedAll)
}
return newInvalidAny(path)
default:
return newInvalidAny(path)
}
}
func (any *objectAny) Keys() []string {
keys := make([]string, 0, any.val.NumField())
for i := 0; i < any.val.NumField(); i++ {
keys = append(keys, any.val.Type().Field(i).Name)
}
return keys
}
func (any *objectAny) Size() int {
return any.val.NumField()
}
func (any *objectAny) WriteTo(stream *Stream) {
stream.WriteVal(any.val)
}
func (any *objectAny) GetInterface() interface{} {
return any.val.Interface()
}
type mapAny struct {
baseAny
err error
val reflect.Value
}
func wrapMap(val interface{}) *mapAny {
return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
}
func (any *mapAny) ValueType() ValueType {
return ObjectValue
}
func (any *mapAny) MustBeValid() Any {
return any
}
func (any *mapAny) Parse() *Iterator {
return nil
}
func (any *mapAny) LastError() error {
return any.err
}
func (any *mapAny) ToBool() bool {
return true
}
func (any *mapAny) ToInt() int {
return 0
}
func (any *mapAny) ToInt32() int32 {
return 0
}
func (any *mapAny) ToInt64() int64 {
return 0
}
func (any *mapAny) ToUint() uint {
return 0
}
func (any *mapAny) ToUint32() uint32 {
return 0
}
func (any *mapAny) ToUint64() uint64 {
return 0
}
func (any *mapAny) ToFloat32() float32 {
return 0
}
func (any *mapAny) ToFloat64() float64 {
return 0
}
func (any *mapAny) ToString() string {
str, err := MarshalToString(any.val.Interface())
any.err = err
return str
}
func (any *mapAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
switch firstPath := path[0].(type) {
case int32:
if '*' == firstPath {
mappedAll := map[string]Any{}
for _, key := range any.val.MapKeys() {
keyAsStr := key.String()
element := Wrap(any.val.MapIndex(key).Interface())
mapped := element.Get(path[1:]...)
if mapped.ValueType() != InvalidValue {
mappedAll[keyAsStr] = mapped
}
}
return wrapMap(mappedAll)
}
return newInvalidAny(path)
default:
value := any.val.MapIndex(reflect.ValueOf(firstPath))
if !value.IsValid() {
return newInvalidAny(path)
}
return Wrap(value.Interface())
}
}
func (any *mapAny) Keys() []string {
keys := make([]string, 0, any.val.Len())
for _, key := range any.val.MapKeys() {
keys = append(keys, key.String())
}
return keys
}
func (any *mapAny) Size() int {
return any.val.Len()
}
func (any *mapAny) WriteTo(stream *Stream) {
stream.WriteVal(any.val)
}
func (any *mapAny) GetInterface() interface{} {
return any.val.Interface()
}

View File

@ -0,0 +1,166 @@
package jsoniter
import (
"fmt"
"strconv"
)
type stringAny struct {
baseAny
val string
}
func (any *stringAny) Get(path ...interface{}) Any {
if len(path) == 0 {
return any
}
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)}
}
func (any *stringAny) Parse() *Iterator {
return nil
}
func (any *stringAny) ValueType() ValueType {
return StringValue
}
func (any *stringAny) MustBeValid() Any {
return any
}
func (any *stringAny) LastError() error {
return nil
}
func (any *stringAny) ToBool() bool {
str := any.ToString()
if str == "0" {
return false
}
for _, c := range str {
switch c {
case ' ', '\n', '\r', '\t':
default:
return true
}
}
return false
}
func (any *stringAny) ToInt() int {
return int(any.ToInt64())
}
func (any *stringAny) ToInt32() int32 {
return int32(any.ToInt64())
}
func (any *stringAny) ToInt64() int64 {
if any.val == "" {
return 0
}
flag := 1
startPos := 0
endPos := 0
if any.val[0] == '+' || any.val[0] == '-' {
startPos = 1
}
if any.val[0] == '-' {
flag = -1
}
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
} else {
break
}
}
parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
return int64(flag) * parsed
}
func (any *stringAny) ToUint() uint {
return uint(any.ToUint64())
}
func (any *stringAny) ToUint32() uint32 {
return uint32(any.ToUint64())
}
func (any *stringAny) ToUint64() uint64 {
if any.val == "" {
return 0
}
startPos := 0
endPos := 0
if any.val[0] == '-' {
return 0
}
if any.val[0] == '+' {
startPos = 1
}
for i := startPos; i < len(any.val); i++ {
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
} else {
break
}
}
parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
return parsed
}
func (any *stringAny) ToFloat32() float32 {
return float32(any.ToFloat64())
}
func (any *stringAny) ToFloat64() float64 {
if len(any.val) == 0 {
return 0
}
// first char invalid
if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
return 0
}
// extract valid num expression from string
// eg 123true => 123, -12.12xxa => -12.12
endPos := 1
for i := 1; i < len(any.val); i++ {
if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
endPos = i + 1
continue
}
// end position is the first char which is not digit
if any.val[i] >= '0' && any.val[i] <= '9' {
endPos = i + 1
} else {
endPos = i
break
}
}
parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
return parsed
}
func (any *stringAny) ToString() string {
return any.val
}
func (any *stringAny) WriteTo(stream *Stream) {
stream.WriteString(any.val)
}
func (any *stringAny) GetInterface() interface{} {
return any.val
}

View File

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type uint32Any struct {
baseAny
val uint32
}
func (any *uint32Any) LastError() error {
return nil
}
func (any *uint32Any) ValueType() ValueType {
return NumberValue
}
func (any *uint32Any) MustBeValid() Any {
return any
}
func (any *uint32Any) ToBool() bool {
return any.val != 0
}
func (any *uint32Any) ToInt() int {
return int(any.val)
}
func (any *uint32Any) ToInt32() int32 {
return int32(any.val)
}
func (any *uint32Any) ToInt64() int64 {
return int64(any.val)
}
func (any *uint32Any) ToUint() uint {
return uint(any.val)
}
func (any *uint32Any) ToUint32() uint32 {
return any.val
}
func (any *uint32Any) ToUint64() uint64 {
return uint64(any.val)
}
func (any *uint32Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *uint32Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *uint32Any) ToString() string {
return strconv.FormatInt(int64(any.val), 10)
}
func (any *uint32Any) WriteTo(stream *Stream) {
stream.WriteUint32(any.val)
}
func (any *uint32Any) Parse() *Iterator {
return nil
}
func (any *uint32Any) GetInterface() interface{} {
return any.val
}

View File

@ -0,0 +1,74 @@
package jsoniter
import (
"strconv"
)
type uint64Any struct {
baseAny
val uint64
}
func (any *uint64Any) LastError() error {
return nil
}
func (any *uint64Any) ValueType() ValueType {
return NumberValue
}
func (any *uint64Any) MustBeValid() Any {
return any
}
func (any *uint64Any) ToBool() bool {
return any.val != 0
}
func (any *uint64Any) ToInt() int {
return int(any.val)
}
func (any *uint64Any) ToInt32() int32 {
return int32(any.val)
}
func (any *uint64Any) ToInt64() int64 {
return int64(any.val)
}
func (any *uint64Any) ToUint() uint {
return uint(any.val)
}
func (any *uint64Any) ToUint32() uint32 {
return uint32(any.val)
}
func (any *uint64Any) ToUint64() uint64 {
return any.val
}
func (any *uint64Any) ToFloat32() float32 {
return float32(any.val)
}
func (any *uint64Any) ToFloat64() float64 {
return float64(any.val)
}
func (any *uint64Any) ToString() string {
return strconv.FormatUint(any.val, 10)
}
func (any *uint64Any) WriteTo(stream *Stream) {
stream.WriteUint64(any.val)
}
func (any *uint64Any) Parse() *Iterator {
return nil
}
func (any *uint64Any) GetInterface() interface{} {
return any.val
}

312
vendor/github.com/json-iterator/go/feature_config.go generated vendored Normal file
View File

@ -0,0 +1,312 @@
package jsoniter
import (
"encoding/json"
"errors"
"io"
"reflect"
"sync/atomic"
"unsafe"
)
// Config customize how the API should behave.
// The API is created from Config by Froze.
type Config struct {
IndentionStep int
MarshalFloatWith6Digits bool
EscapeHTML bool
SortMapKeys bool
UseNumber bool
TagKey string
}
type frozenConfig struct {
configBeforeFrozen Config
sortMapKeys bool
indentionStep int
decoderCache unsafe.Pointer
encoderCache unsafe.Pointer
extensions []Extension
streamPool chan *Stream
iteratorPool chan *Iterator
}
// API the public interface of this package.
// Primary Marshal and Unmarshal.
type API interface {
IteratorPool
StreamPool
MarshalToString(v interface{}) (string, error)
Marshal(v interface{}) ([]byte, error)
MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
UnmarshalFromString(str string, v interface{}) error
Unmarshal(data []byte, v interface{}) error
Get(data []byte, path ...interface{}) Any
NewEncoder(writer io.Writer) *Encoder
NewDecoder(reader io.Reader) *Decoder
}
// ConfigDefault the default API
var ConfigDefault = Config{
EscapeHTML: true,
}.Froze()
// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
var ConfigCompatibleWithStandardLibrary = Config{
EscapeHTML: true,
SortMapKeys: true,
}.Froze()
// ConfigFastest marshals float with only 6 digits precision
var ConfigFastest = Config{
EscapeHTML: false,
MarshalFloatWith6Digits: true,
}.Froze()
// Froze forge API from config
func (cfg Config) Froze() API {
// TODO: cache frozen config
frozenConfig := &frozenConfig{
sortMapKeys: cfg.SortMapKeys,
indentionStep: cfg.IndentionStep,
streamPool: make(chan *Stream, 16),
iteratorPool: make(chan *Iterator, 16),
}
atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{}))
atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{}))
if cfg.MarshalFloatWith6Digits {
frozenConfig.marshalFloatWith6Digits()
}
if cfg.EscapeHTML {
frozenConfig.escapeHTML()
}
if cfg.UseNumber {
frozenConfig.useNumber()
}
frozenConfig.configBeforeFrozen = cfg
return frozenConfig
}
func (cfg *frozenConfig) useNumber() {
cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
if iter.WhatIsNext() == NumberValue {
*((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
} else {
*((*interface{})(ptr)) = iter.Read()
}
}})
}
func (cfg *frozenConfig) getTagKey() string {
tagKey := cfg.configBeforeFrozen.TagKey
if tagKey == "" {
return "json"
}
return tagKey
}
func (cfg *frozenConfig) registerExtension(extension Extension) {
cfg.extensions = append(cfg.extensions, extension)
}
type lossyFloat32Encoder struct {
}
func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat32Lossy(*((*float32)(ptr)))
}
func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float32)(ptr)) == 0
}
type lossyFloat64Encoder struct {
}
func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat64Lossy(*((*float64)(ptr)))
}
func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float64)(ptr)) == 0
}
// EnableLossyFloatMarshalling keeps 10**(-6) precision
// for float variables for better performance.
func (cfg *frozenConfig) marshalFloatWith6Digits() {
// for better performance
cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{})
cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{})
}
type htmlEscapedStringEncoder struct {
}
func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
str := *((*string)(ptr))
stream.WriteStringWithHTMLEscaped(str)
}
func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*string)(ptr)) == ""
}
func (cfg *frozenConfig) escapeHTML() {
cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{})
}
func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) {
done := false
for !done {
ptr := atomic.LoadPointer(&cfg.decoderCache)
cache := *(*map[reflect.Type]ValDecoder)(ptr)
copied := map[reflect.Type]ValDecoder{}
for k, v := range cache {
copied[k] = v
}
copied[cacheKey] = decoder
done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied))
}
}
func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) {
done := false
for !done {
ptr := atomic.LoadPointer(&cfg.encoderCache)
cache := *(*map[reflect.Type]ValEncoder)(ptr)
copied := map[reflect.Type]ValEncoder{}
for k, v := range cache {
copied[k] = v
}
copied[cacheKey] = encoder
done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied))
}
}
func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder {
ptr := atomic.LoadPointer(&cfg.decoderCache)
cache := *(*map[reflect.Type]ValDecoder)(ptr)
return cache[cacheKey]
}
func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder {
ptr := atomic.LoadPointer(&cfg.encoderCache)
cache := *(*map[reflect.Type]ValEncoder)(ptr)
return cache[cacheKey]
}
func (cfg *frozenConfig) cleanDecoders() {
typeDecoders = map[string]ValDecoder{}
fieldDecoders = map[string]ValDecoder{}
*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
}
func (cfg *frozenConfig) cleanEncoders() {
typeEncoders = map[string]ValEncoder{}
fieldEncoders = map[string]ValEncoder{}
*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
}
func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
stream := cfg.BorrowStream(nil)
defer cfg.ReturnStream(stream)
stream.WriteVal(v)
if stream.Error != nil {
return "", stream.Error
}
return string(stream.Buffer()), nil
}
func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
stream := cfg.BorrowStream(nil)
defer cfg.ReturnStream(stream)
stream.WriteVal(v)
if stream.Error != nil {
return nil, stream.Error
}
result := stream.Buffer()
copied := make([]byte, len(result))
copy(copied, result)
return copied, nil
}
func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
if prefix != "" {
panic("prefix is not supported")
}
for _, r := range indent {
if r != ' ' {
panic("indent can only be space")
}
}
newCfg := cfg.configBeforeFrozen
newCfg.IndentionStep = len(indent)
return newCfg.Froze().Marshal(v)
}
func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
data := []byte(str)
data = data[:lastNotSpacePos(data)]
iter := cfg.BorrowIterator(data)
defer cfg.ReturnIterator(iter)
iter.ReadVal(v)
if iter.head == iter.tail {
iter.loadMore()
}
if iter.Error == io.EOF {
return nil
}
if iter.Error == nil {
iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal")
}
return iter.Error
}
func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
iter := cfg.BorrowIterator(data)
defer cfg.ReturnIterator(iter)
return locatePath(iter, path)
}
func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
data = data[:lastNotSpacePos(data)]
iter := cfg.BorrowIterator(data)
defer cfg.ReturnIterator(iter)
typ := reflect.TypeOf(v)
if typ.Kind() != reflect.Ptr {
// return non-pointer error
return errors.New("the second param must be ptr type")
}
iter.ReadVal(v)
if iter.head == iter.tail {
iter.loadMore()
}
if iter.Error == io.EOF {
return nil
}
if iter.Error == nil {
iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
}
return iter.Error
}
func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
stream := NewStream(cfg, writer, 512)
return &Encoder{stream}
}
func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
iter := Parse(cfg, reader, 512)
return &Decoder{iter}
}

307
vendor/github.com/json-iterator/go/feature_iter.go generated vendored Normal file
View File

@ -0,0 +1,307 @@
package jsoniter
import (
"encoding/json"
"fmt"
"io"
)
// ValueType the type for JSON element
type ValueType int
const (
// InvalidValue invalid JSON element
InvalidValue ValueType = iota
// StringValue JSON element "string"
StringValue
// NumberValue JSON element 100 or 0.10
NumberValue
// NilValue JSON element null
NilValue
// BoolValue JSON element true or false
BoolValue
// ArrayValue JSON element []
ArrayValue
// ObjectValue JSON element {}
ObjectValue
)
var hexDigits []byte
var valueTypes []ValueType
func init() {
hexDigits = make([]byte, 256)
for i := 0; i < len(hexDigits); i++ {
hexDigits[i] = 255
}
for i := '0'; i <= '9'; i++ {
hexDigits[i] = byte(i - '0')
}
for i := 'a'; i <= 'f'; i++ {
hexDigits[i] = byte((i - 'a') + 10)
}
for i := 'A'; i <= 'F'; i++ {
hexDigits[i] = byte((i - 'A') + 10)
}
valueTypes = make([]ValueType, 256)
for i := 0; i < len(valueTypes); i++ {
valueTypes[i] = InvalidValue
}
valueTypes['"'] = StringValue
valueTypes['-'] = NumberValue
valueTypes['0'] = NumberValue
valueTypes['1'] = NumberValue
valueTypes['2'] = NumberValue
valueTypes['3'] = NumberValue
valueTypes['4'] = NumberValue
valueTypes['5'] = NumberValue
valueTypes['6'] = NumberValue
valueTypes['7'] = NumberValue
valueTypes['8'] = NumberValue
valueTypes['9'] = NumberValue
valueTypes['t'] = BoolValue
valueTypes['f'] = BoolValue
valueTypes['n'] = NilValue
valueTypes['['] = ArrayValue
valueTypes['{'] = ObjectValue
}
// Iterator is a io.Reader like object, with JSON specific read functions.
// Error is not returned as return value, but stored as Error member on this iterator instance.
type Iterator struct {
cfg *frozenConfig
reader io.Reader
buf []byte
head int
tail int
captureStartedAt int
captured []byte
Error error
}
// NewIterator creates an empty Iterator instance
func NewIterator(cfg API) *Iterator {
return &Iterator{
cfg: cfg.(*frozenConfig),
reader: nil,
buf: nil,
head: 0,
tail: 0,
}
}
// Parse creates an Iterator instance from io.Reader
func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
return &Iterator{
cfg: cfg.(*frozenConfig),
reader: reader,
buf: make([]byte, bufSize),
head: 0,
tail: 0,
}
}
// ParseBytes creates an Iterator instance from byte array
func ParseBytes(cfg API, input []byte) *Iterator {
return &Iterator{
cfg: cfg.(*frozenConfig),
reader: nil,
buf: input,
head: 0,
tail: len(input),
}
}
// ParseString creates an Iterator instance from string
func ParseString(cfg API, input string) *Iterator {
return ParseBytes(cfg, []byte(input))
}
// Pool returns a pool can provide more iterator with same configuration
func (iter *Iterator) Pool() IteratorPool {
return iter.cfg
}
// Reset reuse iterator instance by specifying another reader
func (iter *Iterator) Reset(reader io.Reader) *Iterator {
iter.reader = reader
iter.head = 0
iter.tail = 0
return iter
}
// ResetBytes reuse iterator instance by specifying another byte array as input
func (iter *Iterator) ResetBytes(input []byte) *Iterator {
iter.reader = nil
iter.buf = input
iter.head = 0
iter.tail = len(input)
return iter
}
// WhatIsNext gets ValueType of relatively next json element
func (iter *Iterator) WhatIsNext() ValueType {
valueType := valueTypes[iter.nextToken()]
iter.unreadByte()
return valueType
}
func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
switch c {
case ' ', '\n', '\t', '\r':
continue
}
iter.head = i
return false
}
return true
}
func (iter *Iterator) isObjectEnd() bool {
c := iter.nextToken()
if c == ',' {
return false
}
if c == '}' {
return true
}
iter.ReportError("isObjectEnd", "object ended prematurely")
return true
}
func (iter *Iterator) nextToken() byte {
// a variation of skip whitespaces, returning the next non-whitespace token
for {
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
switch c {
case ' ', '\n', '\t', '\r':
continue
}
iter.head = i + 1
return c
}
if !iter.loadMore() {
return 0
}
}
}
// ReportError record a error in iterator instance with current position.
func (iter *Iterator) ReportError(operation string, msg string) {
if iter.Error != nil {
if iter.Error != io.EOF {
return
}
}
peekStart := iter.head - 10
if peekStart < 0 {
peekStart = 0
}
iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head,
string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
}
// CurrentBuffer gets current buffer as string for debugging purpose
func (iter *Iterator) CurrentBuffer() string {
peekStart := iter.head - 10
if peekStart < 0 {
peekStart = 0
}
return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head,
string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
}
func (iter *Iterator) readByte() (ret byte) {
if iter.head == iter.tail {
if iter.loadMore() {
ret = iter.buf[iter.head]
iter.head++
return ret
}
return 0
}
ret = iter.buf[iter.head]
iter.head++
return ret
}
func (iter *Iterator) loadMore() bool {
if iter.reader == nil {
if iter.Error == nil {
iter.head = iter.tail
iter.Error = io.EOF
}
return false
}
if iter.captured != nil {
iter.captured = append(iter.captured,
iter.buf[iter.captureStartedAt:iter.tail]...)
iter.captureStartedAt = 0
}
for {
n, err := iter.reader.Read(iter.buf)
if n == 0 {
if err != nil {
if iter.Error == nil {
iter.Error = err
}
return false
}
} else {
iter.head = 0
iter.tail = n
return true
}
}
}
func (iter *Iterator) unreadByte() {
if iter.Error != nil {
return
}
iter.head--
return
}
// Read read the next JSON element as generic interface{}.
func (iter *Iterator) Read() interface{} {
valueType := iter.WhatIsNext()
switch valueType {
case StringValue:
return iter.ReadString()
case NumberValue:
if iter.cfg.configBeforeFrozen.UseNumber {
return json.Number(iter.readNumberAsString())
}
return iter.ReadFloat64()
case NilValue:
iter.skipFourBytes('n', 'u', 'l', 'l')
return nil
case BoolValue:
return iter.ReadBool()
case ArrayValue:
arr := []interface{}{}
iter.ReadArrayCB(func(iter *Iterator) bool {
var elem interface{}
iter.ReadVal(&elem)
arr = append(arr, elem)
return true
})
return arr
case ObjectValue:
obj := map[string]interface{}{}
iter.ReadMapCB(func(Iter *Iterator, field string) bool {
var elem interface{}
iter.ReadVal(&elem)
obj[field] = elem
return true
})
return obj
default:
iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
return nil
}
}

View File

@ -0,0 +1,58 @@
package jsoniter
// ReadArray read array element, tells if the array has more element to read.
func (iter *Iterator) ReadArray() (ret bool) {
c := iter.nextToken()
switch c {
case 'n':
iter.skipThreeBytes('u', 'l', 'l')
return false // null
case '[':
c = iter.nextToken()
if c != ']' {
iter.unreadByte()
return true
}
return false
case ']':
return false
case ',':
return true
default:
iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c}))
return
}
}
// ReadArrayCB read array with callback
func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
c := iter.nextToken()
if c == '[' {
c = iter.nextToken()
if c != ']' {
iter.unreadByte()
if !callback(iter) {
return false
}
c = iter.nextToken()
for c == ',' {
if !callback(iter) {
return false
}
c = iter.nextToken()
}
if c != ']' {
iter.ReportError("ReadArrayCB", "expect ] in the end")
return false
}
return true
}
return true
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return true // null
}
iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c}))
return false
}

View File

@ -0,0 +1,341 @@
package jsoniter
import (
"io"
"math/big"
"strconv"
"strings"
"unsafe"
)
var floatDigits []int8
const invalidCharForNumber = int8(-1)
const endOfNumber = int8(-2)
const dotInNumber = int8(-3)
func init() {
floatDigits = make([]int8, 256)
for i := 0; i < len(floatDigits); i++ {
floatDigits[i] = invalidCharForNumber
}
for i := int8('0'); i <= int8('9'); i++ {
floatDigits[i] = i - int8('0')
}
floatDigits[','] = endOfNumber
floatDigits[']'] = endOfNumber
floatDigits['}'] = endOfNumber
floatDigits[' '] = endOfNumber
floatDigits['\t'] = endOfNumber
floatDigits['\n'] = endOfNumber
floatDigits['.'] = dotInNumber
}
// ReadBigFloat read big.Float
func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
str := iter.readNumberAsString()
if iter.Error != nil && iter.Error != io.EOF {
return nil
}
prec := 64
if len(str) > prec {
prec = len(str)
}
val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
if err != nil {
iter.Error = err
return nil
}
return val
}
// ReadBigInt read big.Int
func (iter *Iterator) ReadBigInt() (ret *big.Int) {
str := iter.readNumberAsString()
if iter.Error != nil && iter.Error != io.EOF {
return nil
}
ret = big.NewInt(0)
var success bool
ret, success = ret.SetString(str, 10)
if !success {
iter.ReportError("ReadBigInt", "invalid big int")
return nil
}
return ret
}
//ReadFloat32 read float32
func (iter *Iterator) ReadFloat32() (ret float32) {
c := iter.nextToken()
if c == '-' {
return -iter.readPositiveFloat32()
}
iter.unreadByte()
return iter.readPositiveFloat32()
}
func (iter *Iterator) readPositiveFloat32() (ret float32) {
value := uint64(0)
c := byte(' ')
i := iter.head
// first char
if i == iter.tail {
return iter.readFloat32SlowPath()
}
c = iter.buf[i]
i++
ind := floatDigits[c]
switch ind {
case invalidCharForNumber:
return iter.readFloat32SlowPath()
case endOfNumber:
iter.ReportError("readFloat32", "empty number")
return
case dotInNumber:
iter.ReportError("readFloat32", "leading dot is invalid")
return
case 0:
if i == iter.tail {
return iter.readFloat32SlowPath()
}
c = iter.buf[i]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
iter.ReportError("readFloat32", "leading zero is invalid")
return
}
}
value = uint64(ind)
// chars before dot
non_decimal_loop:
for ; i < iter.tail; i++ {
c = iter.buf[i]
ind := floatDigits[c]
switch ind {
case invalidCharForNumber:
return iter.readFloat32SlowPath()
case endOfNumber:
iter.head = i
return float32(value)
case dotInNumber:
break non_decimal_loop
}
if value > uint64SafeToMultiple10 {
return iter.readFloat32SlowPath()
}
value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
}
// chars after dot
if c == '.' {
i++
decimalPlaces := 0
if i == iter.tail {
return iter.readFloat32SlowPath()
}
for ; i < iter.tail; i++ {
c = iter.buf[i]
ind := floatDigits[c]
switch ind {
case endOfNumber:
if decimalPlaces > 0 && decimalPlaces < len(pow10) {
iter.head = i
return float32(float64(value) / float64(pow10[decimalPlaces]))
}
// too many decimal places
return iter.readFloat32SlowPath()
case invalidCharForNumber:
fallthrough
case dotInNumber:
return iter.readFloat32SlowPath()
}
decimalPlaces++
if value > uint64SafeToMultiple10 {
return iter.readFloat32SlowPath()
}
value = (value << 3) + (value << 1) + uint64(ind)
}
}
return iter.readFloat32SlowPath()
}
func (iter *Iterator) readNumberAsString() (ret string) {
strBuf := [16]byte{}
str := strBuf[0:0]
load_loop:
for {
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
switch c {
case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
str = append(str, c)
continue
default:
iter.head = i
break load_loop
}
}
if !iter.loadMore() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
return
}
if len(str) == 0 {
iter.ReportError("readNumberAsString", "invalid number")
}
return *(*string)(unsafe.Pointer(&str))
}
func (iter *Iterator) readFloat32SlowPath() (ret float32) {
str := iter.readNumberAsString()
if iter.Error != nil && iter.Error != io.EOF {
return
}
errMsg := validateFloat(str)
if errMsg != "" {
iter.ReportError("readFloat32SlowPath", errMsg)
return
}
val, err := strconv.ParseFloat(str, 32)
if err != nil {
iter.Error = err
return
}
return float32(val)
}
// ReadFloat64 read float64
func (iter *Iterator) ReadFloat64() (ret float64) {
c := iter.nextToken()
if c == '-' {
return -iter.readPositiveFloat64()
}
iter.unreadByte()
return iter.readPositiveFloat64()
}
func (iter *Iterator) readPositiveFloat64() (ret float64) {
value := uint64(0)
c := byte(' ')
i := iter.head
// first char
if i == iter.tail {
return iter.readFloat64SlowPath()
}
c = iter.buf[i]
i++
ind := floatDigits[c]
switch ind {
case invalidCharForNumber:
return iter.readFloat64SlowPath()
case endOfNumber:
iter.ReportError("readFloat64", "empty number")
return
case dotInNumber:
iter.ReportError("readFloat64", "leading dot is invalid")
return
case 0:
if i == iter.tail {
return iter.readFloat64SlowPath()
}
c = iter.buf[i]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
iter.ReportError("readFloat64", "leading zero is invalid")
return
}
}
value = uint64(ind)
// chars before dot
non_decimal_loop:
for ; i < iter.tail; i++ {
c = iter.buf[i]
ind := floatDigits[c]
switch ind {
case invalidCharForNumber:
return iter.readFloat64SlowPath()
case endOfNumber:
iter.head = i
return float64(value)
case dotInNumber:
break non_decimal_loop
}
if value > uint64SafeToMultiple10 {
return iter.readFloat64SlowPath()
}
value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
}
// chars after dot
if c == '.' {
i++
decimalPlaces := 0
if i == iter.tail {
return iter.readFloat64SlowPath()
}
for ; i < iter.tail; i++ {
c = iter.buf[i]
ind := floatDigits[c]
switch ind {
case endOfNumber:
if decimalPlaces > 0 && decimalPlaces < len(pow10) {
iter.head = i
return float64(value) / float64(pow10[decimalPlaces])
}
// too many decimal places
return iter.readFloat64SlowPath()
case invalidCharForNumber:
fallthrough
case dotInNumber:
return iter.readFloat64SlowPath()
}
decimalPlaces++
if value > uint64SafeToMultiple10 {
return iter.readFloat64SlowPath()
}
value = (value << 3) + (value << 1) + uint64(ind)
}
}
return iter.readFloat64SlowPath()
}
func (iter *Iterator) readFloat64SlowPath() (ret float64) {
str := iter.readNumberAsString()
if iter.Error != nil && iter.Error != io.EOF {
return
}
errMsg := validateFloat(str)
if errMsg != "" {
iter.ReportError("readFloat64SlowPath", errMsg)
return
}
val, err := strconv.ParseFloat(str, 64)
if err != nil {
iter.Error = err
return
}
return val
}
func validateFloat(str string) string {
// strconv.ParseFloat is not validating `1.` or `1.e1`
if len(str) == 0 {
return "empty number"
}
if str[0] == '-' {
return "-- is not valid"
}
dotPos := strings.IndexByte(str, '.')
if dotPos != -1 {
if dotPos == len(str)-1 {
return "dot can not be last character"
}
switch str[dotPos+1] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
return "missing digit after dot"
}
}
return ""
}

258
vendor/github.com/json-iterator/go/feature_iter_int.go generated vendored Normal file
View File

@ -0,0 +1,258 @@
package jsoniter
import (
"math"
"strconv"
)
var intDigits []int8
const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
func init() {
intDigits = make([]int8, 256)
for i := 0; i < len(intDigits); i++ {
intDigits[i] = invalidCharForNumber
}
for i := int8('0'); i <= int8('9'); i++ {
intDigits[i] = i - int8('0')
}
}
// ReadUint read uint
func (iter *Iterator) ReadUint() uint {
return uint(iter.ReadUint64())
}
// ReadInt read int
func (iter *Iterator) ReadInt() int {
return int(iter.ReadInt64())
}
// ReadInt8 read int8
func (iter *Iterator) ReadInt8() (ret int8) {
c := iter.nextToken()
if c == '-' {
val := iter.readUint32(iter.readByte())
if val > math.MaxInt8+1 {
iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return -int8(val)
}
val := iter.readUint32(c)
if val > math.MaxInt8 {
iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return int8(val)
}
// ReadUint8 read uint8
func (iter *Iterator) ReadUint8() (ret uint8) {
val := iter.readUint32(iter.nextToken())
if val > math.MaxUint8 {
iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return uint8(val)
}
// ReadInt16 read int16
func (iter *Iterator) ReadInt16() (ret int16) {
c := iter.nextToken()
if c == '-' {
val := iter.readUint32(iter.readByte())
if val > math.MaxInt16+1 {
iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return -int16(val)
}
val := iter.readUint32(c)
if val > math.MaxInt16 {
iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return int16(val)
}
// ReadUint16 read uint16
func (iter *Iterator) ReadUint16() (ret uint16) {
val := iter.readUint32(iter.nextToken())
if val > math.MaxUint16 {
iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return uint16(val)
}
// ReadInt32 read int32
func (iter *Iterator) ReadInt32() (ret int32) {
c := iter.nextToken()
if c == '-' {
val := iter.readUint32(iter.readByte())
if val > math.MaxInt32+1 {
iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return -int32(val)
}
val := iter.readUint32(c)
if val > math.MaxInt32 {
iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
return
}
return int32(val)
}
// ReadUint32 read uint32
func (iter *Iterator) ReadUint32() (ret uint32) {
return iter.readUint32(iter.nextToken())
}
func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind := intDigits[c]
if ind == 0 {
return 0 // single zero
}
if ind == invalidCharForNumber {
iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
return
}
value := uint32(ind)
if iter.tail-iter.head > 10 {
i := iter.head
ind2 := intDigits[iter.buf[i]]
if ind2 == invalidCharForNumber {
iter.head = i
return value
}
i++
ind3 := intDigits[iter.buf[i]]
if ind3 == invalidCharForNumber {
iter.head = i
return value*10 + uint32(ind2)
}
//iter.head = i + 1
//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
i++
ind4 := intDigits[iter.buf[i]]
if ind4 == invalidCharForNumber {
iter.head = i
return value*100 + uint32(ind2)*10 + uint32(ind3)
}
i++
ind5 := intDigits[iter.buf[i]]
if ind5 == invalidCharForNumber {
iter.head = i
return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
}
i++
ind6 := intDigits[iter.buf[i]]
if ind6 == invalidCharForNumber {
iter.head = i
return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
}
i++
ind7 := intDigits[iter.buf[i]]
if ind7 == invalidCharForNumber {
iter.head = i
return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
}
i++
ind8 := intDigits[iter.buf[i]]
if ind8 == invalidCharForNumber {
iter.head = i
return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
}
i++
ind9 := intDigits[iter.buf[i]]
value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
iter.head = i
if ind9 == invalidCharForNumber {
return value
}
}
for {
for i := iter.head; i < iter.tail; i++ {
ind = intDigits[iter.buf[i]]
if ind == invalidCharForNumber {
iter.head = i
return value
}
if value > uint32SafeToMultiply10 {
value2 := (value << 3) + (value << 1) + uint32(ind)
if value2 < value {
iter.ReportError("readUint32", "overflow")
return
}
value = value2
continue
}
value = (value << 3) + (value << 1) + uint32(ind)
}
if !iter.loadMore() {
return value
}
}
}
// ReadInt64 read int64
func (iter *Iterator) ReadInt64() (ret int64) {
c := iter.nextToken()
if c == '-' {
val := iter.readUint64(iter.readByte())
if val > math.MaxInt64+1 {
iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
return
}
return -int64(val)
}
val := iter.readUint64(c)
if val > math.MaxInt64 {
iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
return
}
return int64(val)
}
// ReadUint64 read uint64
func (iter *Iterator) ReadUint64() uint64 {
return iter.readUint64(iter.nextToken())
}
func (iter *Iterator) readUint64(c byte) (ret uint64) {
ind := intDigits[c]
if ind == 0 {
return 0 // single zero
}
if ind == invalidCharForNumber {
iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
return
}
value := uint64(ind)
for {
for i := iter.head; i < iter.tail; i++ {
ind = intDigits[iter.buf[i]]
if ind == invalidCharForNumber {
iter.head = i
return value
}
if value > uint64SafeToMultiple10 {
value2 := (value << 3) + (value << 1) + uint64(ind)
if value2 < value {
iter.ReportError("readUint64", "overflow")
return
}
value = value2
continue
}
value = (value << 3) + (value << 1) + uint64(ind)
}
if !iter.loadMore() {
return value
}
}
}

View File

@ -0,0 +1,212 @@
package jsoniter
import (
"fmt"
"unicode"
"unsafe"
)
// ReadObject read one field from object.
// If object ended, returns empty string.
// Otherwise, returns the field name.
func (iter *Iterator) ReadObject() (ret string) {
c := iter.nextToken()
switch c {
case 'n':
iter.skipThreeBytes('u', 'l', 'l')
return "" // null
case '{':
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
return string(iter.readObjectFieldAsBytes())
}
if c == '}' {
return "" // end of object
}
iter.ReportError("ReadObject", `expect " after {`)
return
case ',':
return string(iter.readObjectFieldAsBytes())
case '}':
return "" // end of object
default:
iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
return
}
}
func (iter *Iterator) readFieldHash() int32 {
hash := int64(0x811c9dc5)
c := iter.nextToken()
if c == '"' {
for {
for i := iter.head; i < iter.tail; i++ {
// require ascii string and no escape
b := iter.buf[i]
if 'A' <= b && b <= 'Z' {
b += 'a' - 'A'
}
if b == '"' {
iter.head = i + 1
c = iter.nextToken()
if c != ':' {
iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
}
return int32(hash)
}
hash ^= int64(b)
hash *= 0x1000193
}
if !iter.loadMore() {
iter.ReportError("readFieldHash", `incomplete field name`)
return 0
}
}
}
iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
return 0
}
func calcHash(str string) int32 {
hash := int64(0x811c9dc5)
for _, b := range str {
hash ^= int64(unicode.ToLower(b))
hash *= 0x1000193
}
return int32(hash)
}
// ReadObjectCB read object with callback, the key is ascii only and field name not copied
func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
if c == '{' {
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
field := iter.readObjectFieldAsBytes()
if !callback(iter, *(*string)(unsafe.Pointer(&field))) {
return false
}
c = iter.nextToken()
for c == ',' {
field = iter.readObjectFieldAsBytes()
if !callback(iter, *(*string)(unsafe.Pointer(&field))) {
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadObjectCB", `object not ended with }`)
return false
}
return true
}
if c == '}' {
return true
}
iter.ReportError("ReadObjectCB", `expect " after }`)
return false
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return true // null
}
iter.ReportError("ReadObjectCB", `expect { or n`)
return false
}
// ReadMapCB read map with callback, the key can be any string
func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
if c == '{' {
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
field := iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field")
return false
}
if !callback(iter, field) {
return false
}
c = iter.nextToken()
for c == ',' {
field = iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field")
return false
}
if !callback(iter, field) {
return false
}
c = iter.nextToken()
}
if c != '}' {
iter.ReportError("ReadMapCB", `object not ended with }`)
return false
}
return true
}
if c == '}' {
return true
}
iter.ReportError("ReadMapCB", `expect " after }`)
return false
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return true // null
}
iter.ReportError("ReadMapCB", `expect { or n`)
return false
}
func (iter *Iterator) readObjectStart() bool {
c := iter.nextToken()
if c == '{' {
c = iter.nextToken()
if c == '}' {
return false
}
iter.unreadByte()
return true
} else if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return false
}
iter.ReportError("readObjectStart", "expect { or n")
return false
}
func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
str := iter.ReadStringAsSlice()
if iter.skipWhitespacesWithoutLoadMore() {
if ret == nil {
ret = make([]byte, len(str))
copy(ret, str)
}
if !iter.loadMore() {
return
}
}
if iter.buf[iter.head] != ':' {
iter.ReportError("readObjectFieldAsBytes", "expect : after object field")
return
}
iter.head++
if iter.skipWhitespacesWithoutLoadMore() {
if ret == nil {
ret = make([]byte, len(str))
copy(ret, str)
}
if !iter.loadMore() {
return
}
}
if ret == nil {
return str
}
return ret
}

127
vendor/github.com/json-iterator/go/feature_iter_skip.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
package jsoniter
import "fmt"
// ReadNil reads a json object as nil and
// returns whether it's a nil or not
func (iter *Iterator) ReadNil() (ret bool) {
c := iter.nextToken()
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l') // null
return true
}
iter.unreadByte()
return false
}
// ReadBool reads a json object as BoolValue
func (iter *Iterator) ReadBool() (ret bool) {
c := iter.nextToken()
if c == 't' {
iter.skipThreeBytes('r', 'u', 'e')
return true
}
if c == 'f' {
iter.skipFourBytes('a', 'l', 's', 'e')
return false
}
iter.ReportError("ReadBool", "expect t or f")
return
}
// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
// The []byte can be kept, it is a copy of data.
func (iter *Iterator) SkipAndReturnBytes() []byte {
iter.startCapture(iter.head)
iter.Skip()
return iter.stopCapture()
}
type captureBuffer struct {
startedAt int
captured []byte
}
func (iter *Iterator) startCapture(captureStartedAt int) {
if iter.captured != nil {
panic("already in capture mode")
}
iter.captureStartedAt = captureStartedAt
iter.captured = make([]byte, 0, 32)
}
func (iter *Iterator) stopCapture() []byte {
if iter.captured == nil {
panic("not in capture mode")
}
captured := iter.captured
remaining := iter.buf[iter.captureStartedAt:iter.head]
iter.captureStartedAt = -1
iter.captured = nil
if len(captured) == 0 {
return remaining
}
captured = append(captured, remaining...)
return captured
}
// Skip skips a json object and positions to relatively the next json object
func (iter *Iterator) Skip() {
c := iter.nextToken()
switch c {
case '"':
iter.skipString()
case 'n':
iter.skipThreeBytes('u', 'l', 'l') // null
case 't':
iter.skipThreeBytes('r', 'u', 'e') // true
case 'f':
iter.skipFourBytes('a', 'l', 's', 'e') // false
case '0':
iter.unreadByte()
iter.ReadFloat32()
case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
iter.skipNumber()
case '[':
iter.skipArray()
case '{':
iter.skipObject()
default:
iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
return
}
}
func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
if iter.readByte() != b1 {
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
return
}
if iter.readByte() != b2 {
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
return
}
if iter.readByte() != b3 {
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
return
}
if iter.readByte() != b4 {
iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
return
}
}
func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
if iter.readByte() != b1 {
iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
return
}
if iter.readByte() != b2 {
iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
return
}
if iter.readByte() != b3 {
iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
return
}
}

View File

@ -0,0 +1,144 @@
//+build jsoniter-sloppy
package jsoniter
// sloppy but faster implementation, do not validate the input json
func (iter *Iterator) skipNumber() {
for {
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
switch c {
case ' ', '\n', '\r', '\t', ',', '}', ']':
iter.head = i
return
}
}
if !iter.loadMore() {
return
}
}
}
func (iter *Iterator) skipArray() {
level := 1
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
case '"': // If inside string, skip it
iter.head = i + 1
iter.skipString()
i = iter.head - 1 // it will be i++ soon
case '[': // If open symbol, increase level
level++
case ']': // If close symbol, increase level
level--
// If we have returned to the original level, we're done
if level == 0 {
iter.head = i + 1
return
}
}
}
if !iter.loadMore() {
iter.ReportError("skipObject", "incomplete array")
return
}
}
}
func (iter *Iterator) skipObject() {
level := 1
for {
for i := iter.head; i < iter.tail; i++ {
switch iter.buf[i] {
case '"': // If inside string, skip it
iter.head = i + 1
iter.skipString()
i = iter.head - 1 // it will be i++ soon
case '{': // If open symbol, increase level
level++
case '}': // If close symbol, increase level
level--
// If we have returned to the original level, we're done
if level == 0 {
iter.head = i + 1
return
}
}
}
if !iter.loadMore() {
iter.ReportError("skipObject", "incomplete object")
return
}
}
}
func (iter *Iterator) skipString() {
for {
end, escaped := iter.findStringEnd()
if end == -1 {
if !iter.loadMore() {
iter.ReportError("skipString", "incomplete string")
return
}
if escaped {
iter.head = 1 // skip the first char as last char read is \
}
} else {
iter.head = end
return
}
}
}
// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
// Tries to find the end of string
// Support if string contains escaped quote symbols.
func (iter *Iterator) findStringEnd() (int, bool) {
escaped := false
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
if c == '"' {
if !escaped {
return i + 1, false
}
j := i - 1
for {
if j < iter.head || iter.buf[j] != '\\' {
// even number of backslashes
// either end of buffer, or " found
return i + 1, true
}
j--
if j < iter.head || iter.buf[j] != '\\' {
// odd number of backslashes
// it is \" or \\\"
break
}
j--
}
} else if c == '\\' {
escaped = true
}
}
j := iter.tail - 1
for {
if j < iter.head || iter.buf[j] != '\\' {
// even number of backslashes
// either end of buffer, or " found
return -1, false // do not end with \
}
j--
if j < iter.head || iter.buf[j] != '\\' {
// odd number of backslashes
// it is \" or \\\"
break
}
j--
}
return -1, true // end with \
}

View File

@ -0,0 +1,89 @@
//+build !jsoniter-sloppy
package jsoniter
import "fmt"
func (iter *Iterator) skipNumber() {
if !iter.trySkipNumber() {
iter.unreadByte()
iter.ReadFloat32()
}
}
func (iter *Iterator) trySkipNumber() bool {
dotFound := false
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
case '.':
if dotFound {
iter.ReportError("validateNumber", `more than one dot found in number`)
return true // already failed
}
if i+1 == iter.tail {
return false
}
c = iter.buf[i+1]
switch c {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
iter.ReportError("validateNumber", `missing digit after dot`)
return true // already failed
}
dotFound = true
default:
switch c {
case ',', ']', '}', ' ', '\t', '\n', '\r':
if iter.head == i {
return false // if - without following digits
}
iter.head = i
return true // must be valid
}
return false // may be invalid
}
}
return false
}
func (iter *Iterator) skipString() {
if !iter.trySkipString() {
iter.unreadByte()
iter.ReadString()
}
}
func (iter *Iterator) trySkipString() bool {
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
if c == '"' {
iter.head = i + 1
return true // valid
} else if c == '\\' {
return false
} else if c < ' ' {
iter.ReportError("ReadString",
fmt.Sprintf(`invalid control character found: %d`, c))
return true // already failed
}
}
return false
}
func (iter *Iterator) skipObject() {
iter.unreadByte()
iter.ReadObjectCB(func(iter *Iterator, field string) bool {
iter.Skip()
return true
})
}
func (iter *Iterator) skipArray() {
iter.unreadByte()
iter.ReadArrayCB(func(iter *Iterator) bool {
iter.Skip()
return true
})
}

View File

@ -0,0 +1,215 @@
package jsoniter
import (
"fmt"
"unicode/utf16"
)
// ReadString read string from iterator
func (iter *Iterator) ReadString() (ret string) {
c := iter.nextToken()
if c == '"' {
for i := iter.head; i < iter.tail; i++ {
c := iter.buf[i]
if c == '"' {
ret = string(iter.buf[iter.head:i])
iter.head = i + 1
return ret
} else if c == '\\' {
break
} else if c < ' ' {
iter.ReportError("ReadString",
fmt.Sprintf(`invalid control character found: %d`, c))
return
}
}
return iter.readStringSlowPath()
} else if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return ""
}
iter.ReportError("ReadString", `expects " or n`)
return
}
func (iter *Iterator) readStringSlowPath() (ret string) {
var str []byte
var c byte
for iter.Error == nil {
c = iter.readByte()
if c == '"' {
return string(str)
}
if c == '\\' {
c = iter.readByte()
str = iter.readEscapedChar(c, str)
} else {
str = append(str, c)
}
}
iter.ReportError("ReadString", "unexpected end of input")
return
}
func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
switch c {
case 'u':
r := iter.readU4()
if utf16.IsSurrogate(r) {
c = iter.readByte()
if iter.Error != nil {
return nil
}
if c != '\\' {
iter.unreadByte()
str = appendRune(str, r)
return str
}
c = iter.readByte()
if iter.Error != nil {
return nil
}
if c != 'u' {
str = appendRune(str, r)
return iter.readEscapedChar(c, str)
}
r2 := iter.readU4()
if iter.Error != nil {
return nil
}
combined := utf16.DecodeRune(r, r2)
if combined == '\uFFFD' {
str = appendRune(str, r)
str = appendRune(str, r2)
} else {
str = appendRune(str, combined)
}
} else {
str = appendRune(str, r)
}
case '"':
str = append(str, '"')
case '\\':
str = append(str, '\\')
case '/':
str = append(str, '/')
case 'b':
str = append(str, '\b')
case 'f':
str = append(str, '\f')
case 'n':
str = append(str, '\n')
case 'r':
str = append(str, '\r')
case 't':
str = append(str, '\t')
default:
iter.ReportError("ReadString",
`invalid escape char after \`)
return nil
}
return str
}
// ReadStringAsSlice read string from iterator without copying into string form.
// The []byte can not be kept, as it will change after next iterator call.
func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
c := iter.nextToken()
if c == '"' {
for i := iter.head; i < iter.tail; i++ {
// require ascii string and no escape
// for: field name, base64, number
if iter.buf[i] == '"' {
// fast path: reuse the underlying buffer
ret = iter.buf[iter.head:i]
iter.head = i + 1
return ret
}
}
readLen := iter.tail - iter.head
copied := make([]byte, readLen, readLen*2)
copy(copied, iter.buf[iter.head:iter.tail])
iter.head = iter.tail
for iter.Error == nil {
c := iter.readByte()
if c == '"' {
return copied
}
copied = append(copied, c)
}
return copied
}
iter.ReportError("ReadString", `expects " or n`)
return
}
func (iter *Iterator) readU4() (ret rune) {
for i := 0; i < 4; i++ {
c := iter.readByte()
if iter.Error != nil {
return
}
if c >= '0' && c <= '9' {
ret = ret*16 + rune(c-'0')
} else if c >= 'a' && c <= 'f' {
ret = ret*16 + rune(c-'a'+10)
} else if c >= 'A' && c <= 'F' {
ret = ret*16 + rune(c-'A'+10)
} else {
iter.ReportError("readU4", "expects 0~9 or a~f")
return
}
}
return ret
}
const (
t1 = 0x00 // 0000 0000
tx = 0x80 // 1000 0000
t2 = 0xC0 // 1100 0000
t3 = 0xE0 // 1110 0000
t4 = 0xF0 // 1111 0000
t5 = 0xF8 // 1111 1000
maskx = 0x3F // 0011 1111
mask2 = 0x1F // 0001 1111
mask3 = 0x0F // 0000 1111
mask4 = 0x07 // 0000 0111
rune1Max = 1<<7 - 1
rune2Max = 1<<11 - 1
rune3Max = 1<<16 - 1
surrogateMin = 0xD800
surrogateMax = 0xDFFF
maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
)
func appendRune(p []byte, r rune) []byte {
// Negative values are erroneous. Making it unsigned addresses the problem.
switch i := uint32(r); {
case i <= rune1Max:
p = append(p, byte(r))
return p
case i <= rune2Max:
p = append(p, t2|byte(r>>6))
p = append(p, tx|byte(r)&maskx)
return p
case i > maxRune, surrogateMin <= i && i <= surrogateMax:
r = runeError
fallthrough
case i <= rune3Max:
p = append(p, t3|byte(r>>12))
p = append(p, tx|byte(r>>6)&maskx)
p = append(p, tx|byte(r)&maskx)
return p
default:
p = append(p, t4|byte(r>>18))
p = append(p, tx|byte(r>>12)&maskx)
p = append(p, tx|byte(r>>6)&maskx)
p = append(p, tx|byte(r)&maskx)
return p
}
}

View File

@ -0,0 +1,15 @@
package jsoniter
import "encoding/json"
type Number string
func CastJsonNumber(val interface{}) (string, bool) {
switch typedVal := val.(type) {
case json.Number:
return string(typedVal), true
case Number:
return string(typedVal), true
}
return "", false
}

57
vendor/github.com/json-iterator/go/feature_pool.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
package jsoniter
import (
"io"
)
// IteratorPool a thread safe pool of iterators with same configuration
type IteratorPool interface {
BorrowIterator(data []byte) *Iterator
ReturnIterator(iter *Iterator)
}
// StreamPool a thread safe pool of streams with same configuration
type StreamPool interface {
BorrowStream(writer io.Writer) *Stream
ReturnStream(stream *Stream)
}
func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
select {
case stream := <-cfg.streamPool:
stream.Reset(writer)
return stream
default:
return NewStream(cfg, writer, 512)
}
}
func (cfg *frozenConfig) ReturnStream(stream *Stream) {
stream.Error = nil
select {
case cfg.streamPool <- stream:
return
default:
return
}
}
func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
select {
case iter := <-cfg.iteratorPool:
iter.ResetBytes(data)
return iter
default:
return ParseBytes(cfg, data)
}
}
func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
iter.Error = nil
select {
case cfg.iteratorPool <- iter:
return
default:
return
}
}

691
vendor/github.com/json-iterator/go/feature_reflect.go generated vendored Normal file
View File

@ -0,0 +1,691 @@
package jsoniter
import (
"encoding"
"encoding/json"
"fmt"
"reflect"
"time"
"unsafe"
)
// ValDecoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValDecoder with json.Decoder.
// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
//
// Reflection on type to create decoders, which is then cached
// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
// 1. create instance of new value, for example *int will need a int to be allocated
// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
// 3. assignment to map, both key and value will be reflect.Value
// For a simple struct binding, it will be reflect.Value free and allocation free
type ValDecoder interface {
Decode(ptr unsafe.Pointer, iter *Iterator)
}
// ValEncoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValEncoder with json.Encoder.
// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
type ValEncoder interface {
IsEmpty(ptr unsafe.Pointer) bool
Encode(ptr unsafe.Pointer, stream *Stream)
EncodeInterface(val interface{}, stream *Stream)
}
type checkIsEmpty interface {
IsEmpty(ptr unsafe.Pointer) bool
}
// WriteToStream the default implementation for TypeEncoder method EncodeInterface
func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) {
e := (*emptyInterface)(unsafe.Pointer(&val))
if e.word == nil {
stream.WriteNil()
return
}
if reflect.TypeOf(val).Kind() == reflect.Ptr {
encoder.Encode(unsafe.Pointer(&e.word), stream)
} else {
encoder.Encode(e.word, stream)
}
}
var jsonNumberType reflect.Type
var jsoniterNumberType reflect.Type
var jsonRawMessageType reflect.Type
var jsoniterRawMessageType reflect.Type
var anyType reflect.Type
var marshalerType reflect.Type
var unmarshalerType reflect.Type
var textMarshalerType reflect.Type
var textUnmarshalerType reflect.Type
func init() {
jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem()
jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem()
jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem()
jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem()
anyType = reflect.TypeOf((*Any)(nil)).Elem()
marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
}
type optionalDecoder struct {
valueType reflect.Type
valueDecoder ValDecoder
}
func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
*((*unsafe.Pointer)(ptr)) = nil
} else {
if *((*unsafe.Pointer)(ptr)) == nil {
//pointer to null, we have to allocate memory to hold the value
value := reflect.New(decoder.valueType)
newPtr := extractInterface(value.Interface()).word
decoder.valueDecoder.Decode(newPtr, iter)
*((*uintptr)(ptr)) = uintptr(newPtr)
} else {
//reuse existing instance
decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
}
}
}
type deferenceDecoder struct {
// only to deference a pointer
valueType reflect.Type
valueDecoder ValDecoder
}
func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if *((*unsafe.Pointer)(ptr)) == nil {
//pointer to null, we have to allocate memory to hold the value
value := reflect.New(decoder.valueType)
newPtr := extractInterface(value.Interface()).word
decoder.valueDecoder.Decode(newPtr, iter)
*((*uintptr)(ptr)) = uintptr(newPtr)
} else {
//reuse existing instance
decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
}
}
type optionalEncoder struct {
valueEncoder ValEncoder
}
func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *((*unsafe.Pointer)(ptr)) == nil {
stream.WriteNil()
} else {
encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
}
}
func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
if *((*unsafe.Pointer)(ptr)) == nil {
return true
}
return false
}
type placeholderEncoder struct {
cfg *frozenConfig
cacheKey reflect.Type
}
func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.getRealEncoder().Encode(ptr, stream)
}
func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.getRealEncoder().IsEmpty(ptr)
}
func (encoder *placeholderEncoder) getRealEncoder() ValEncoder {
for i := 0; i < 30; i++ {
realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey)
_, isPlaceholder := realDecoder.(*placeholderEncoder)
if isPlaceholder {
time.Sleep(time.Second)
} else {
return realDecoder
}
}
panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey))
}
type placeholderDecoder struct {
cfg *frozenConfig
cacheKey reflect.Type
}
func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
for i := 0; i < 30; i++ {
realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey)
_, isPlaceholder := realDecoder.(*placeholderDecoder)
if isPlaceholder {
time.Sleep(time.Second)
} else {
realDecoder.Decode(ptr, iter)
return
}
}
panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey))
}
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
typ unsafe.Pointer
word unsafe.Pointer
}
// emptyInterface is the header for an interface with method (not interface{})
type nonEmptyInterface struct {
// see ../runtime/iface.go:/Itab
itab *struct {
ityp unsafe.Pointer // static interface type
typ unsafe.Pointer // dynamic concrete type
link unsafe.Pointer
bad int32
unused int32
fun [100000]unsafe.Pointer // method table
}
word unsafe.Pointer
}
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
func (iter *Iterator) ReadVal(obj interface{}) {
typ := reflect.TypeOf(obj)
cacheKey := typ.Elem()
decoder, err := decoderOfType(iter.cfg, cacheKey)
if err != nil {
iter.Error = err
return
}
e := (*emptyInterface)(unsafe.Pointer(&obj))
decoder.Decode(e.word, iter)
}
// WriteVal copy the go interface into underlying JSON, same as json.Marshal
func (stream *Stream) WriteVal(val interface{}) {
if nil == val {
stream.WriteNil()
return
}
typ := reflect.TypeOf(val)
cacheKey := typ
encoder, err := encoderOfType(stream.cfg, cacheKey)
if err != nil {
stream.Error = err
return
}
encoder.EncodeInterface(val, stream)
}
type prefix string
func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) {
if err != nil {
return nil, fmt.Errorf("%s: %s", p, err.Error())
}
return decoder, err
}
func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) {
if err != nil {
return nil, fmt.Errorf("%s: %s", p, err.Error())
}
return encoder, err
}
func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
cacheKey := typ
decoder := cfg.getDecoderFromCache(cacheKey)
if decoder != nil {
return decoder, nil
}
decoder = getTypeDecoderFromExtension(typ)
if decoder != nil {
cfg.addDecoderToCache(cacheKey, decoder)
return decoder, nil
}
decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey}
cfg.addDecoderToCache(cacheKey, decoder)
decoder, err := createDecoderOfType(cfg, typ)
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
cfg.addDecoderToCache(cacheKey, decoder)
return decoder, err
}
func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
typeName := typ.String()
if typ == jsonRawMessageType {
return &jsonRawMessageCodec{}, nil
}
if typ == jsoniterRawMessageType {
return &jsoniterRawMessageCodec{}, nil
}
if typ.AssignableTo(jsonNumberType) {
return &jsonNumberCodec{}, nil
}
if typ.AssignableTo(jsoniterNumberType) {
return &jsoniterNumberCodec{}, nil
}
if typ.Implements(unmarshalerType) {
templateInterface := reflect.New(typ).Elem().Interface()
var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)}
if typ.Kind() == reflect.Ptr {
decoder = &optionalDecoder{typ.Elem(), decoder}
}
return decoder, nil
}
if reflect.PtrTo(typ).Implements(unmarshalerType) {
templateInterface := reflect.New(typ).Interface()
var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)}
return decoder, nil
}
if typ.Implements(textUnmarshalerType) {
templateInterface := reflect.New(typ).Elem().Interface()
var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)}
if typ.Kind() == reflect.Ptr {
decoder = &optionalDecoder{typ.Elem(), decoder}
}
return decoder, nil
}
if reflect.PtrTo(typ).Implements(textUnmarshalerType) {
templateInterface := reflect.New(typ).Interface()
var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)}
return decoder, nil
}
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ))
if err != nil {
return nil, err
}
return &base64Codec{sliceDecoder: sliceDecoder}, nil
}
if typ.Implements(anyType) {
return &anyCodec{}, nil
}
switch typ.Kind() {
case reflect.String:
if typeName != "string" {
return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem())
}
return &stringCodec{}, nil
case reflect.Int:
if typeName != "int" {
return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem())
}
return &intCodec{}, nil
case reflect.Int8:
if typeName != "int8" {
return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem())
}
return &int8Codec{}, nil
case reflect.Int16:
if typeName != "int16" {
return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem())
}
return &int16Codec{}, nil
case reflect.Int32:
if typeName != "int32" {
return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem())
}
return &int32Codec{}, nil
case reflect.Int64:
if typeName != "int64" {
return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem())
}
return &int64Codec{}, nil
case reflect.Uint:
if typeName != "uint" {
return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem())
}
return &uintCodec{}, nil
case reflect.Uint8:
if typeName != "uint8" {
return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem())
}
return &uint8Codec{}, nil
case reflect.Uint16:
if typeName != "uint16" {
return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem())
}
return &uint16Codec{}, nil
case reflect.Uint32:
if typeName != "uint32" {
return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem())
}
return &uint32Codec{}, nil
case reflect.Uintptr:
if typeName != "uintptr" {
return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem())
}
return &uintptrCodec{}, nil
case reflect.Uint64:
if typeName != "uint64" {
return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem())
}
return &uint64Codec{}, nil
case reflect.Float32:
if typeName != "float32" {
return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem())
}
return &float32Codec{}, nil
case reflect.Float64:
if typeName != "float64" {
return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem())
}
return &float64Codec{}, nil
case reflect.Bool:
if typeName != "bool" {
return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem())
}
return &boolCodec{}, nil
case reflect.Interface:
if typ.NumMethod() == 0 {
return &emptyInterfaceCodec{}, nil
}
return &nonEmptyInterfaceCodec{}, nil
case reflect.Struct:
return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ))
case reflect.Array:
return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ))
case reflect.Slice:
return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ))
case reflect.Map:
return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ))
case reflect.Ptr:
return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ))
default:
return nil, fmt.Errorf("unsupported type: %v", typ)
}
}
func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
cacheKey := typ
encoder := cfg.getEncoderFromCache(cacheKey)
if encoder != nil {
return encoder, nil
}
encoder = getTypeEncoderFromExtension(typ)
if encoder != nil {
cfg.addEncoderToCache(cacheKey, encoder)
return encoder, nil
}
encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey}
cfg.addEncoderToCache(cacheKey, encoder)
encoder, err := createEncoderOfType(cfg, typ)
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
cfg.addEncoderToCache(cacheKey, encoder)
return encoder, err
}
func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
if typ == jsonRawMessageType {
return &jsonRawMessageCodec{}, nil
}
if typ == jsoniterRawMessageType {
return &jsoniterRawMessageCodec{}, nil
}
if typ.AssignableTo(jsonNumberType) {
return &jsonNumberCodec{}, nil
}
if typ.AssignableTo(jsoniterNumberType) {
return &jsoniterNumberCodec{}, nil
}
if typ.Implements(marshalerType) {
checkIsEmpty, err := createCheckIsEmpty(typ)
if err != nil {
return nil, err
}
templateInterface := reflect.New(typ).Elem().Interface()
var encoder ValEncoder = &marshalerEncoder{
templateInterface: extractInterface(templateInterface),
checkIsEmpty: checkIsEmpty,
}
if typ.Kind() == reflect.Ptr {
encoder = &optionalEncoder{encoder}
}
return encoder, nil
}
if typ.Implements(textMarshalerType) {
checkIsEmpty, err := createCheckIsEmpty(typ)
if err != nil {
return nil, err
}
templateInterface := reflect.New(typ).Elem().Interface()
var encoder ValEncoder = &textMarshalerEncoder{
templateInterface: extractInterface(templateInterface),
checkIsEmpty: checkIsEmpty,
}
if typ.Kind() == reflect.Ptr {
encoder = &optionalEncoder{encoder}
}
return encoder, nil
}
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
return &base64Codec{}, nil
}
if typ.Implements(anyType) {
return &anyCodec{}, nil
}
return createEncoderOfSimpleType(cfg, typ)
}
func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) {
kind := typ.Kind()
switch kind {
case reflect.String:
return &stringCodec{}, nil
case reflect.Int:
return &intCodec{}, nil
case reflect.Int8:
return &int8Codec{}, nil
case reflect.Int16:
return &int16Codec{}, nil
case reflect.Int32:
return &int32Codec{}, nil
case reflect.Int64:
return &int64Codec{}, nil
case reflect.Uint:
return &uintCodec{}, nil
case reflect.Uint8:
return &uint8Codec{}, nil
case reflect.Uint16:
return &uint16Codec{}, nil
case reflect.Uint32:
return &uint32Codec{}, nil
case reflect.Uintptr:
return &uintptrCodec{}, nil
case reflect.Uint64:
return &uint64Codec{}, nil
case reflect.Float32:
return &float32Codec{}, nil
case reflect.Float64:
return &float64Codec{}, nil
case reflect.Bool:
return &boolCodec{}, nil
case reflect.Interface:
if typ.NumMethod() == 0 {
return &emptyInterfaceCodec{}, nil
}
return &nonEmptyInterfaceCodec{}, nil
case reflect.Struct:
return &structEncoder{}, nil
case reflect.Array:
return &arrayEncoder{}, nil
case reflect.Slice:
return &sliceEncoder{}, nil
case reflect.Map:
return &mapEncoder{}, nil
case reflect.Ptr:
return &optionalEncoder{}, nil
default:
return nil, fmt.Errorf("unsupported type: %v", typ)
}
}
func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
typeName := typ.String()
kind := typ.Kind()
switch kind {
case reflect.String:
if typeName != "string" {
return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem())
}
return &stringCodec{}, nil
case reflect.Int:
if typeName != "int" {
return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem())
}
return &intCodec{}, nil
case reflect.Int8:
if typeName != "int8" {
return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem())
}
return &int8Codec{}, nil
case reflect.Int16:
if typeName != "int16" {
return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem())
}
return &int16Codec{}, nil
case reflect.Int32:
if typeName != "int32" {
return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem())
}
return &int32Codec{}, nil
case reflect.Int64:
if typeName != "int64" {
return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem())
}
return &int64Codec{}, nil
case reflect.Uint:
if typeName != "uint" {
return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem())
}
return &uintCodec{}, nil
case reflect.Uint8:
if typeName != "uint8" {
return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem())
}
return &uint8Codec{}, nil
case reflect.Uint16:
if typeName != "uint16" {
return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem())
}
return &uint16Codec{}, nil
case reflect.Uint32:
if typeName != "uint32" {
return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem())
}
return &uint32Codec{}, nil
case reflect.Uintptr:
if typeName != "uintptr" {
return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem())
}
return &uintptrCodec{}, nil
case reflect.Uint64:
if typeName != "uint64" {
return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem())
}
return &uint64Codec{}, nil
case reflect.Float32:
if typeName != "float32" {
return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem())
}
return &float32Codec{}, nil
case reflect.Float64:
if typeName != "float64" {
return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem())
}
return &float64Codec{}, nil
case reflect.Bool:
if typeName != "bool" {
return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem())
}
return &boolCodec{}, nil
case reflect.Interface:
if typ.NumMethod() == 0 {
return &emptyInterfaceCodec{}, nil
}
return &nonEmptyInterfaceCodec{}, nil
case reflect.Struct:
return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ))
case reflect.Array:
return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ))
case reflect.Slice:
return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ))
case reflect.Map:
return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ))
case reflect.Ptr:
return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ))
default:
return nil, fmt.Errorf("unsupported type: %v", typ)
}
}
func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
elemType := typ.Elem()
decoder, err := decoderOfType(cfg, elemType)
if err != nil {
return nil, err
}
return &optionalDecoder{elemType, decoder}, nil
}
func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
elemType := typ.Elem()
elemEncoder, err := encoderOfType(cfg, elemType)
if err != nil {
return nil, err
}
encoder := &optionalEncoder{elemEncoder}
if elemType.Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
}
return encoder, nil
}
func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
decoder, err := decoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
mapInterface := reflect.New(typ).Interface()
return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil
}
func extractInterface(val interface{}) emptyInterface {
return *((*emptyInterface)(unsafe.Pointer(&val)))
}
func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
elemType := typ.Elem()
encoder, err := encoderOfType(cfg, elemType)
if err != nil {
return nil, err
}
mapInterface := reflect.New(typ).Elem().Interface()
if cfg.sortMapKeys {
return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil
}
return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil
}

View File

@ -0,0 +1,99 @@
package jsoniter
import (
"fmt"
"io"
"reflect"
"unsafe"
)
func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
decoder, err := decoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
return &arrayDecoder{typ, typ.Elem(), decoder}, nil
}
func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
encoder, err := encoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
if typ.Elem().Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
}
return &arrayEncoder{typ, typ.Elem(), encoder}, nil
}
type arrayEncoder struct {
arrayType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
}
func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteArrayStart()
elemPtr := unsafe.Pointer(ptr)
encoder.elemEncoder.Encode(elemPtr, stream)
for i := 1; i < encoder.arrayType.Len(); i++ {
stream.WriteMore()
elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size())
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
}
stream.WriteArrayEnd()
if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
}
}
func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) {
// special optimization for interface{}
e := (*emptyInterface)(unsafe.Pointer(&val))
if e.word == nil {
stream.WriteArrayStart()
stream.WriteNil()
stream.WriteArrayEnd()
return
}
elemType := encoder.arrayType.Elem()
if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) {
ptr := uintptr(e.word)
e.word = unsafe.Pointer(&ptr)
}
if reflect.TypeOf(val).Kind() == reflect.Ptr {
encoder.Encode(unsafe.Pointer(&e.word), stream)
} else {
encoder.Encode(e.word, stream)
}
}
func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}
type arrayDecoder struct {
arrayType reflect.Type
elemType reflect.Type
elemDecoder ValDecoder
}
func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.doDecode(ptr, iter)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
}
}
func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
offset := uintptr(0)
iter.ReadArrayCB(func(iter *Iterator) bool {
if offset < decoder.arrayType.Size() {
decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter)
offset += decoder.elemType.Size()
} else {
iter.Skip()
}
return true
})
}

View File

@ -0,0 +1,413 @@
package jsoniter
import (
"fmt"
"reflect"
"sort"
"strings"
"unicode"
"unsafe"
)
var typeDecoders = map[string]ValDecoder{}
var fieldDecoders = map[string]ValDecoder{}
var typeEncoders = map[string]ValEncoder{}
var fieldEncoders = map[string]ValEncoder{}
var extensions = []Extension{}
// StructDescriptor describe how should we encode/decode the struct
type StructDescriptor struct {
onePtrEmbedded bool
onePtrOptimization bool
Type reflect.Type
Fields []*Binding
}
// GetField get one field from the descriptor by its name.
// Can not use map here to keep field orders.
func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
for _, binding := range structDescriptor.Fields {
if binding.Field.Name == fieldName {
return binding
}
}
return nil
}
// Binding describe how should we encode/decode the struct field
type Binding struct {
levels []int
Field *reflect.StructField
FromNames []string
ToNames []string
Encoder ValEncoder
Decoder ValDecoder
}
// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
// Can also rename fields by UpdateStructDescriptor.
type Extension interface {
UpdateStructDescriptor(structDescriptor *StructDescriptor)
CreateDecoder(typ reflect.Type) ValDecoder
CreateEncoder(typ reflect.Type) ValEncoder
DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder
DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder
}
// DummyExtension embed this type get dummy implementation for all methods of Extension
type DummyExtension struct {
}
// UpdateStructDescriptor No-op
func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
}
// CreateDecoder No-op
func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder {
return nil
}
// CreateEncoder No-op
func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder {
return nil
}
// DecorateDecoder No-op
func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder {
return decoder
}
// DecorateEncoder No-op
func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder {
return encoder
}
type funcDecoder struct {
fun DecoderFunc
}
func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.fun(ptr, iter)
}
type funcEncoder struct {
fun EncoderFunc
isEmptyFunc func(ptr unsafe.Pointer) bool
}
func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.fun(ptr, stream)
}
func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
if encoder.isEmptyFunc == nil {
return false
}
return encoder.isEmptyFunc(ptr)
}
// DecoderFunc the function form of TypeDecoder
type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
// EncoderFunc the function form of TypeEncoder
type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
// RegisterTypeDecoderFunc register TypeDecoder for a type with function
func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
typeDecoders[typ] = &funcDecoder{fun}
}
// RegisterTypeDecoder register TypeDecoder for a typ
func RegisterTypeDecoder(typ string, decoder ValDecoder) {
typeDecoders[typ] = decoder
}
// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
RegisterFieldDecoder(typ, field, &funcDecoder{fun})
}
// RegisterFieldDecoder register TypeDecoder for a struct field
func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
}
// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
}
// RegisterTypeEncoder register TypeEncoder for a type
func RegisterTypeEncoder(typ string, encoder ValEncoder) {
typeEncoders[typ] = encoder
}
// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
}
// RegisterFieldEncoder register TypeEncoder for a struct field
func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
}
// RegisterExtension register extension
func RegisterExtension(extension Extension) {
extensions = append(extensions, extension)
}
func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {
decoder := _getTypeDecoderFromExtension(typ)
if decoder != nil {
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
}
return decoder
}
func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {
for _, extension := range extensions {
decoder := extension.CreateDecoder(typ)
if decoder != nil {
return decoder
}
}
typeName := typ.String()
decoder := typeDecoders[typeName]
if decoder != nil {
return decoder
}
if typ.Kind() == reflect.Ptr {
decoder := typeDecoders[typ.Elem().String()]
if decoder != nil {
return &optionalDecoder{typ.Elem(), decoder}
}
}
return nil
}
func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
encoder := _getTypeEncoderFromExtension(typ)
if encoder != nil {
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
}
return encoder
}
func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
for _, extension := range extensions {
encoder := extension.CreateEncoder(typ)
if encoder != nil {
return encoder
}
}
typeName := typ.String()
encoder := typeEncoders[typeName]
if encoder != nil {
return encoder
}
if typ.Kind() == reflect.Ptr {
encoder := typeEncoders[typ.Elem().String()]
if encoder != nil {
return &optionalEncoder{encoder}
}
}
return nil
}
func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) {
embeddedBindings := []*Binding{}
bindings := []*Binding{}
for i := 0; i < typ.NumField(); i++ {
field := typ.Field(i)
tag := field.Tag.Get(cfg.getTagKey())
tagParts := strings.Split(tag, ",")
if tag == "-" {
continue
}
if field.Anonymous && (tag == "" || tagParts[0] == "") {
if field.Type.Kind() == reflect.Struct {
structDescriptor, err := describeStruct(cfg, field.Type)
if err != nil {
return nil, err
}
for _, binding := range structDescriptor.Fields {
binding.levels = append([]int{i}, binding.levels...)
omitempty := binding.Encoder.(*structFieldEncoder).omitempty
binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty}
binding.Decoder = &structFieldDecoder{&field, binding.Decoder}
embeddedBindings = append(embeddedBindings, binding)
}
continue
} else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {
structDescriptor, err := describeStruct(cfg, field.Type.Elem())
if err != nil {
return nil, err
}
for _, binding := range structDescriptor.Fields {
binding.levels = append([]int{i}, binding.levels...)
omitempty := binding.Encoder.(*structFieldEncoder).omitempty
binding.Encoder = &optionalEncoder{binding.Encoder}
binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty}
binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder}
binding.Decoder = &structFieldDecoder{&field, binding.Decoder}
embeddedBindings = append(embeddedBindings, binding)
}
continue
}
}
fieldNames := calcFieldNames(field.Name, tagParts[0], tag)
fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name)
decoder := fieldDecoders[fieldCacheKey]
if decoder == nil {
var err error
decoder, err = decoderOfType(cfg, field.Type)
if err != nil {
return nil, err
}
}
encoder := fieldEncoders[fieldCacheKey]
if encoder == nil {
var err error
encoder, err = encoderOfType(cfg, field.Type)
if err != nil {
return nil, err
}
// map is stored as pointer in the struct
if field.Type.Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
}
}
binding := &Binding{
Field: &field,
FromNames: fieldNames,
ToNames: fieldNames,
Decoder: decoder,
Encoder: encoder,
}
binding.levels = []int{i}
bindings = append(bindings, binding)
}
return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil
}
func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
onePtrEmbedded := false
onePtrOptimization := false
if typ.NumField() == 1 {
firstField := typ.Field(0)
switch firstField.Type.Kind() {
case reflect.Ptr:
if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct {
onePtrEmbedded = true
}
fallthrough
case reflect.Map:
onePtrOptimization = true
case reflect.Struct:
onePtrOptimization = isStructOnePtr(firstField.Type)
}
}
structDescriptor := &StructDescriptor{
onePtrEmbedded: onePtrEmbedded,
onePtrOptimization: onePtrOptimization,
Type: typ,
Fields: bindings,
}
for _, extension := range extensions {
extension.UpdateStructDescriptor(structDescriptor)
}
processTags(structDescriptor, cfg)
// merge normal & embedded bindings & sort with original order
allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
sort.Sort(allBindings)
structDescriptor.Fields = allBindings
return structDescriptor
}
func isStructOnePtr(typ reflect.Type) bool {
if typ.NumField() == 1 {
firstField := typ.Field(0)
switch firstField.Type.Kind() {
case reflect.Ptr:
return true
case reflect.Map:
return true
case reflect.Struct:
return isStructOnePtr(firstField.Type)
}
}
return false
}
type sortableBindings []*Binding
func (bindings sortableBindings) Len() int {
return len(bindings)
}
func (bindings sortableBindings) Less(i, j int) bool {
left := bindings[i].levels
right := bindings[j].levels
k := 0
for {
if left[k] < right[k] {
return true
} else if left[k] > right[k] {
return false
}
k++
}
}
func (bindings sortableBindings) Swap(i, j int) {
bindings[i], bindings[j] = bindings[j], bindings[i]
}
func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
for _, binding := range structDescriptor.Fields {
shouldOmitEmpty := false
tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",")
for _, tagPart := range tagParts[1:] {
if tagPart == "omitempty" {
shouldOmitEmpty = true
} else if tagPart == "string" {
if binding.Field.Type.Kind() == reflect.String {
binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
} else {
binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
}
}
}
binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
}
}
func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
// ignore?
if wholeTag == "-" {
return []string{}
}
// rename?
var fieldNames []string
if tagProvidedFieldName == "" {
fieldNames = []string{originalFieldName}
} else {
fieldNames = []string{tagProvidedFieldName}
}
// private?
isNotExported := unicode.IsLower(rune(originalFieldName[0]))
if isNotExported {
fieldNames = []string{}
}
return fieldNames
}

View File

@ -0,0 +1,244 @@
package jsoniter
import (
"encoding"
"encoding/json"
"reflect"
"sort"
"strconv"
"unsafe"
)
type mapDecoder struct {
mapType reflect.Type
keyType reflect.Type
elemType reflect.Type
elemDecoder ValDecoder
mapInterface emptyInterface
}
func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
// dark magic to cast unsafe.Pointer back to interface{} using reflect.Type
mapInterface := decoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface).Elem()
if iter.ReadNil() {
realVal.Set(reflect.Zero(decoder.mapType))
return
}
if realVal.IsNil() {
realVal.Set(reflect.MakeMap(realVal.Type()))
}
iter.ReadMapCB(func(iter *Iterator, keyStr string) bool {
elem := reflect.New(decoder.elemType)
decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter)
// to put into map, we have to use reflection
keyType := decoder.keyType
// TODO: remove this from loop
switch {
case keyType.Kind() == reflect.String:
realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem())
return true
case keyType.Implements(textUnmarshalerType):
textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler)
err := textUnmarshaler.UnmarshalText([]byte(keyStr))
if err != nil {
iter.ReportError("read map key as TextUnmarshaler", err.Error())
return false
}
realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem())
return true
case reflect.PtrTo(keyType).Implements(textUnmarshalerType):
textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler)
err := textUnmarshaler.UnmarshalText([]byte(keyStr))
if err != nil {
iter.ReportError("read map key as TextUnmarshaler", err.Error())
return false
}
realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem())
return true
default:
switch keyType.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(keyStr, 10, 64)
if err != nil || reflect.Zero(keyType).OverflowInt(n) {
iter.ReportError("read map key as int64", "read int64 failed")
return false
}
realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem())
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(keyStr, 10, 64)
if err != nil || reflect.Zero(keyType).OverflowUint(n) {
iter.ReportError("read map key as uint64", "read uint64 failed")
return false
}
realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem())
return true
}
}
iter.ReportError("read map key", "unexpected map key type "+keyType.String())
return true
})
}
type mapEncoder struct {
mapType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
mapInterface emptyInterface
}
func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
stream.WriteObjectStart()
for i, key := range realVal.MapKeys() {
if i != 0 {
stream.WriteMore()
}
encodeMapKey(key, stream)
if stream.indention > 0 {
stream.writeTwoBytes(byte(':'), byte(' '))
} else {
stream.writeByte(':')
}
val := realVal.MapIndex(key).Interface()
encoder.elemEncoder.EncodeInterface(val, stream)
}
stream.WriteObjectEnd()
}
func encodeMapKey(key reflect.Value, stream *Stream) {
if key.Kind() == reflect.String {
stream.WriteString(key.String())
return
}
if tm, ok := key.Interface().(encoding.TextMarshaler); ok {
buf, err := tm.MarshalText()
if err != nil {
stream.Error = err
return
}
stream.writeByte('"')
stream.Write(buf)
stream.writeByte('"')
return
}
switch key.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
stream.writeByte('"')
stream.WriteInt64(key.Int())
stream.writeByte('"')
return
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
stream.writeByte('"')
stream.WriteUint64(key.Uint())
stream.writeByte('"')
return
}
stream.Error = &json.UnsupportedTypeError{Type: key.Type()}
}
func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
return realVal.Len() == 0
}
type sortKeysMapEncoder struct {
mapType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
mapInterface emptyInterface
}
func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
// Extract and sort the keys.
keys := realVal.MapKeys()
sv := stringValues(make([]reflectWithString, len(keys)))
for i, v := range keys {
sv[i].v = v
if err := sv[i].resolve(); err != nil {
stream.Error = err
return
}
}
sort.Sort(sv)
stream.WriteObjectStart()
for i, key := range sv {
if i != 0 {
stream.WriteMore()
}
stream.WriteVal(key.s) // might need html escape, so can not WriteString directly
if stream.indention > 0 {
stream.writeTwoBytes(byte(':'), byte(' '))
} else {
stream.writeByte(':')
}
val := realVal.MapIndex(key.v).Interface()
encoder.elemEncoder.EncodeInterface(val, stream)
}
stream.WriteObjectEnd()
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflectWithString
type reflectWithString struct {
v reflect.Value
s string
}
func (w *reflectWithString) resolve() error {
if w.v.Kind() == reflect.String {
w.s = w.v.String()
return nil
}
if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok {
buf, err := tm.MarshalText()
w.s = string(buf)
return err
}
switch w.v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
w.s = strconv.FormatInt(w.v.Int(), 10)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
w.s = strconv.FormatUint(w.v.Uint(), 10)
return nil
}
return &json.UnsupportedTypeError{Type: w.v.Type()}
}
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s }
func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
return realVal.Len() == 0
}

View File

@ -0,0 +1,672 @@
package jsoniter
import (
"encoding"
"encoding/base64"
"encoding/json"
"unsafe"
)
type stringCodec struct {
}
func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*string)(ptr)) = iter.ReadString()
}
func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
str := *((*string)(ptr))
stream.WriteString(str)
}
func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*string)(ptr)) == ""
}
type intCodec struct {
}
func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int)(ptr)) = iter.ReadInt()
}
func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt(*((*int)(ptr)))
}
func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int)(ptr)) == 0
}
type uintptrCodec struct {
}
func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uintptr)(ptr)) = uintptr(iter.ReadUint64())
}
func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint64(uint64(*((*uintptr)(ptr))))
}
func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uintptr)(ptr)) == 0
}
type int8Codec struct {
}
func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int8)(ptr)) = iter.ReadInt8()
}
func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt8(*((*int8)(ptr)))
}
func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int8)(ptr)) == 0
}
type int16Codec struct {
}
func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int16)(ptr)) = iter.ReadInt16()
}
func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt16(*((*int16)(ptr)))
}
func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int16)(ptr)) == 0
}
type int32Codec struct {
}
func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int32)(ptr)) = iter.ReadInt32()
}
func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt32(*((*int32)(ptr)))
}
func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int32)(ptr)) == 0
}
type int64Codec struct {
}
func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int64)(ptr)) = iter.ReadInt64()
}
func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt64(*((*int64)(ptr)))
}
func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int64)(ptr)) == 0
}
type uintCodec struct {
}
func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint)(ptr)) = iter.ReadUint()
}
func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint(*((*uint)(ptr)))
}
func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint)(ptr)) == 0
}
type uint8Codec struct {
}
func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint8)(ptr)) = iter.ReadUint8()
}
func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint8(*((*uint8)(ptr)))
}
func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint8)(ptr)) == 0
}
type uint16Codec struct {
}
func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint16)(ptr)) = iter.ReadUint16()
}
func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint16(*((*uint16)(ptr)))
}
func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint16)(ptr)) == 0
}
type uint32Codec struct {
}
func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint32)(ptr)) = iter.ReadUint32()
}
func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint32(*((*uint32)(ptr)))
}
func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint32)(ptr)) == 0
}
type uint64Codec struct {
}
func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint64)(ptr)) = iter.ReadUint64()
}
func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint64(*((*uint64)(ptr)))
}
func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint64)(ptr)) == 0
}
type float32Codec struct {
}
func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*float32)(ptr)) = iter.ReadFloat32()
}
func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat32(*((*float32)(ptr)))
}
func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float32)(ptr)) == 0
}
type float64Codec struct {
}
func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*float64)(ptr)) = iter.ReadFloat64()
}
func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat64(*((*float64)(ptr)))
}
func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float64)(ptr)) == 0
}
type boolCodec struct {
}
func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*bool)(ptr)) = iter.ReadBool()
}
func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteBool(*((*bool)(ptr)))
}
func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
return !(*((*bool)(ptr)))
}
type emptyInterfaceCodec struct {
}
func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*interface{})(ptr)) = iter.Read()
}
func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteVal(*((*interface{})(ptr)))
}
func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteVal(val)
}
func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
return ptr == nil
}
type nonEmptyInterfaceCodec struct {
}
func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
nonEmptyInterface := (*nonEmptyInterface)(ptr)
if nonEmptyInterface.itab == nil {
iter.ReportError("read non-empty interface", "do not know which concrete type to decode to")
return
}
var i interface{}
e := (*emptyInterface)(unsafe.Pointer(&i))
e.typ = nonEmptyInterface.itab.typ
e.word = nonEmptyInterface.word
iter.ReadVal(&i)
nonEmptyInterface.word = e.word
}
func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
nonEmptyInterface := (*nonEmptyInterface)(ptr)
var i interface{}
e := (*emptyInterface)(unsafe.Pointer(&i))
e.typ = nonEmptyInterface.itab.typ
e.word = nonEmptyInterface.word
stream.WriteVal(i)
}
func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteVal(val)
}
func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
nonEmptyInterface := (*nonEmptyInterface)(ptr)
return nonEmptyInterface.word == nil
}
type anyCodec struct {
}
func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*Any)(ptr)) = iter.ReadAny()
}
func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
(*((*Any)(ptr))).WriteTo(stream)
}
func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) {
(val.(Any)).WriteTo(stream)
}
func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
return (*((*Any)(ptr))).Size() == 0
}
type jsonNumberCodec struct {
}
func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
}
func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*json.Number)(ptr))))
}
func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(json.Number)))
}
func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*json.Number)(ptr))) == 0
}
type jsoniterNumberCodec struct {
}
func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
}
func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*Number)(ptr))))
}
func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(Number)))
}
func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*Number)(ptr))) == 0
}
type jsonRawMessageCodec struct {
}
func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
}
func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
}
func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(json.RawMessage)))
}
func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*json.RawMessage)(ptr))) == 0
}
type jsoniterRawMessageCodec struct {
}
func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
}
func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*RawMessage)(ptr))))
}
func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(RawMessage)))
}
func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*RawMessage)(ptr))) == 0
}
type base64Codec struct {
sliceDecoder ValDecoder
}
func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
ptrSlice := (*sliceHeader)(ptr)
ptrSlice.Len = 0
ptrSlice.Cap = 0
ptrSlice.Data = nil
return
}
switch iter.WhatIsNext() {
case StringValue:
encoding := base64.StdEncoding
src := iter.SkipAndReturnBytes()
src = src[1 : len(src)-1]
decodedLen := encoding.DecodedLen(len(src))
dst := make([]byte, decodedLen)
len, err := encoding.Decode(dst, src)
if err != nil {
iter.ReportError("decode base64", err.Error())
} else {
dst = dst[:len]
dstSlice := (*sliceHeader)(unsafe.Pointer(&dst))
ptrSlice := (*sliceHeader)(ptr)
ptrSlice.Data = dstSlice.Data
ptrSlice.Cap = dstSlice.Cap
ptrSlice.Len = dstSlice.Len
}
case ArrayValue:
codec.sliceDecoder.Decode(ptr, iter)
default:
iter.ReportError("base64Codec", "invalid input")
}
}
func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
src := *((*[]byte)(ptr))
if len(src) == 0 {
stream.WriteNil()
return
}
encoding := base64.StdEncoding
stream.writeByte('"')
toGrow := encoding.EncodedLen(len(src))
stream.ensure(toGrow)
encoding.Encode(stream.buf[stream.n:], src)
stream.n += toGrow
stream.writeByte('"')
}
func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) {
ptr := extractInterface(val).word
src := *((*[]byte)(ptr))
if len(src) == 0 {
stream.WriteNil()
return
}
encoding := base64.StdEncoding
stream.writeByte('"')
toGrow := encoding.EncodedLen(len(src))
stream.ensure(toGrow)
encoding.Encode(stream.buf[stream.n:], src)
stream.n += toGrow
stream.writeByte('"')
}
func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*[]byte)(ptr))) == 0
}
type stringModeNumberDecoder struct {
elemDecoder ValDecoder
}
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
c := iter.nextToken()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect "`)
return
}
decoder.elemDecoder.Decode(ptr, iter)
if iter.Error != nil {
return
}
c = iter.readByte()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect "`)
return
}
}
type stringModeStringDecoder struct {
elemDecoder ValDecoder
cfg *frozenConfig
}
func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.elemDecoder.Decode(ptr, iter)
str := *((*string)(ptr))
tempIter := decoder.cfg.BorrowIterator([]byte(str))
defer decoder.cfg.ReturnIterator(tempIter)
*((*string)(ptr)) = tempIter.ReadString()
}
type stringModeNumberEncoder struct {
elemEncoder ValEncoder
}
func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.writeByte('"')
encoder.elemEncoder.Encode(ptr, stream)
stream.writeByte('"')
}
func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.elemEncoder.IsEmpty(ptr)
}
type stringModeStringEncoder struct {
elemEncoder ValEncoder
cfg *frozenConfig
}
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
tempStream := encoder.cfg.BorrowStream(nil)
defer encoder.cfg.ReturnStream(tempStream)
encoder.elemEncoder.Encode(ptr, tempStream)
stream.WriteString(string(tempStream.Buffer()))
}
func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.elemEncoder.IsEmpty(ptr)
}
type marshalerEncoder struct {
templateInterface emptyInterface
checkIsEmpty checkIsEmpty
}
func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
templateInterface := encoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
marshaler := (*realInterface).(json.Marshaler)
bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err
} else {
stream.Write(bytes)
}
}
func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type textMarshalerEncoder struct {
templateInterface emptyInterface
checkIsEmpty checkIsEmpty
}
func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
templateInterface := encoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
marshaler := (*realInterface).(encoding.TextMarshaler)
bytes, err := marshaler.MarshalText()
if err != nil {
stream.Error = err
} else {
stream.WriteString(string(bytes))
}
}
func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type unmarshalerDecoder struct {
templateInterface emptyInterface
}
func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
templateInterface := decoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
unmarshaler := (*realInterface).(json.Unmarshaler)
iter.nextToken()
iter.unreadByte() // skip spaces
bytes := iter.SkipAndReturnBytes()
err := unmarshaler.UnmarshalJSON(bytes)
if err != nil {
iter.ReportError("unmarshalerDecoder", err.Error())
}
}
type textUnmarshalerDecoder struct {
templateInterface emptyInterface
}
func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
templateInterface := decoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
unmarshaler := (*realInterface).(encoding.TextUnmarshaler)
str := iter.ReadString()
err := unmarshaler.UnmarshalText([]byte(str))
if err != nil {
iter.ReportError("textUnmarshalerDecoder", err.Error())
}
}

View File

@ -0,0 +1,196 @@
package jsoniter
import (
"fmt"
"io"
"reflect"
"strings"
"unsafe"
)
func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
type bindingTo struct {
binding *Binding
toName string
ignored bool
}
orderedBindings := []*bindingTo{}
structDescriptor, err := describeStruct(cfg, typ)
if err != nil {
return nil, err
}
for _, binding := range structDescriptor.Fields {
for _, toName := range binding.ToNames {
new := &bindingTo{
binding: binding,
toName: toName,
}
for _, old := range orderedBindings {
if old.toName != toName {
continue
}
old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding)
}
orderedBindings = append(orderedBindings, new)
}
}
if len(orderedBindings) == 0 {
return &emptyStructEncoder{}, nil
}
finalOrderedFields := []structFieldTo{}
for _, bindingTo := range orderedBindings {
if !bindingTo.ignored {
finalOrderedFields = append(finalOrderedFields, structFieldTo{
encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
toName: bindingTo.toName,
})
}
}
return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil
}
func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
newTagged := new.Field.Tag.Get(cfg.getTagKey()) != ""
oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != ""
if newTagged {
if oldTagged {
if len(old.levels) > len(new.levels) {
return true, false
} else if len(new.levels) > len(old.levels) {
return false, true
} else {
return true, true
}
} else {
return true, false
}
} else {
if oldTagged {
return true, false
}
if len(old.levels) > len(new.levels) {
return true, false
} else if len(new.levels) > len(old.levels) {
return false, true
} else {
return true, true
}
}
}
func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
bindings := map[string]*Binding{}
structDescriptor, err := describeStruct(cfg, typ)
if err != nil {
return nil, err
}
for _, binding := range structDescriptor.Fields {
for _, fromName := range binding.FromNames {
old := bindings[fromName]
if old == nil {
bindings[fromName] = binding
continue
}
ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding)
if ignoreOld {
delete(bindings, fromName)
}
if !ignoreNew {
bindings[fromName] = binding
}
}
}
fields := map[string]*structFieldDecoder{}
for k, binding := range bindings {
fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
}
return createStructDecoder(typ, fields)
}
type structFieldEncoder struct {
field *reflect.StructField
fieldEncoder ValEncoder
omitempty bool
}
func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset)
encoder.fieldEncoder.Encode(fieldPtr, stream)
if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error())
}
}
func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset)
return encoder.fieldEncoder.IsEmpty(fieldPtr)
}
type structEncoder struct {
onePtrEmbedded bool
onePtrOptimization bool
fields []structFieldTo
}
type structFieldTo struct {
encoder *structFieldEncoder
toName string
}
func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteObjectStart()
isNotFirst := false
for _, field := range encoder.fields {
if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
continue
}
if isNotFirst {
stream.WriteMore()
}
stream.WriteObjectField(field.toName)
field.encoder.Encode(ptr, stream)
isNotFirst = true
}
stream.WriteObjectEnd()
}
func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) {
e := (*emptyInterface)(unsafe.Pointer(&val))
if encoder.onePtrOptimization {
if e.word == nil && encoder.onePtrEmbedded {
stream.WriteObjectStart()
stream.WriteObjectEnd()
return
}
ptr := uintptr(e.word)
e.word = unsafe.Pointer(&ptr)
}
if reflect.TypeOf(val).Kind() == reflect.Ptr {
encoder.Encode(unsafe.Pointer(&e.word), stream)
} else {
encoder.Encode(e.word, stream)
}
}
func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}
type emptyStructEncoder struct {
}
func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteEmptyObject()
}
func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}

View File

@ -0,0 +1,149 @@
package jsoniter
import (
"fmt"
"io"
"reflect"
"unsafe"
)
func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
decoder, err := decoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
return &sliceDecoder{typ, typ.Elem(), decoder}, nil
}
func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
encoder, err := encoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
if typ.Elem().Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
}
return &sliceEncoder{typ, typ.Elem(), encoder}, nil
}
type sliceEncoder struct {
sliceType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
}
func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
slice := (*sliceHeader)(ptr)
if slice.Data == nil {
stream.WriteNil()
return
}
if slice.Len == 0 {
stream.WriteEmptyArray()
return
}
stream.WriteArrayStart()
elemPtr := unsafe.Pointer(slice.Data)
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
for i := 1; i < slice.Len; i++ {
stream.WriteMore()
elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size())
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
}
stream.WriteArrayEnd()
if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
}
}
func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
slice := (*sliceHeader)(ptr)
return slice.Len == 0
}
type sliceDecoder struct {
sliceType reflect.Type
elemType reflect.Type
elemDecoder ValDecoder
}
// sliceHeader is a safe version of SliceHeader used within this package.
type sliceHeader struct {
Data unsafe.Pointer
Len int
Cap int
}
func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.doDecode(ptr, iter)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
}
}
func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
slice := (*sliceHeader)(ptr)
if iter.ReadNil() {
slice.Len = 0
slice.Cap = 0
slice.Data = nil
return
}
reuseSlice(slice, decoder.sliceType, 4)
slice.Len = 0
offset := uintptr(0)
iter.ReadArrayCB(func(iter *Iterator) bool {
growOne(slice, decoder.sliceType, decoder.elemType)
decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter)
offset += decoder.elemType.Size()
return true
})
}
// grow grows the slice s so that it can hold extra more values, allocating
// more capacity if needed. It also returns the old and new slice lengths.
func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) {
newLen := slice.Len + 1
if newLen <= slice.Cap {
slice.Len = newLen
return
}
newCap := slice.Cap
if newCap == 0 {
newCap = 1
} else {
for newCap < newLen {
if slice.Len < 1024 {
newCap += newCap
} else {
newCap += newCap / 4
}
}
}
newVal := reflect.MakeSlice(sliceType, newLen, newCap)
dst := unsafe.Pointer(newVal.Pointer())
// copy old array into new array
originalBytesCount := uintptr(slice.Len) * elementType.Size()
srcPtr := (*[1 << 30]byte)(slice.Data)
dstPtr := (*[1 << 30]byte)(dst)
for i := uintptr(0); i < originalBytesCount; i++ {
dstPtr[i] = srcPtr[i]
}
slice.Data = dst
slice.Len = newLen
slice.Cap = newCap
}
func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) {
if expectedCap <= slice.Cap {
return
}
newVal := reflect.MakeSlice(sliceType, 0, expectedCap)
dst := unsafe.Pointer(newVal.Pointer())
slice.Data = dst
slice.Cap = expectedCap
}

View File

@ -0,0 +1,916 @@
package jsoniter
import (
"fmt"
"io"
"reflect"
"strings"
"unsafe"
)
func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) {
knownHash := map[int32]struct{}{
0: {},
}
switch len(fields) {
case 0:
return &skipObjectDecoder{typ}, nil
case 1:
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil
}
case 2:
var fieldHash1 int32
var fieldHash2 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldHash1 == 0 {
fieldHash1 = fieldHash
fieldDecoder1 = fieldDecoder
} else {
fieldHash2 = fieldHash
fieldDecoder2 = fieldDecoder
}
}
return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil
case 3:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
}
}
return &threeFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil
case 4:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldName4 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else if fieldName3 == 0 {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
} else {
fieldName4 = fieldHash
fieldDecoder4 = fieldDecoder
}
}
return &fourFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4}, nil
case 5:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldName4 int32
var fieldName5 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder
var fieldDecoder5 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else if fieldName3 == 0 {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
} else if fieldName4 == 0 {
fieldName4 = fieldHash
fieldDecoder4 = fieldDecoder
} else {
fieldName5 = fieldHash
fieldDecoder5 = fieldDecoder
}
}
return &fiveFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil
case 6:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldName4 int32
var fieldName5 int32
var fieldName6 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder
var fieldDecoder5 *structFieldDecoder
var fieldDecoder6 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else if fieldName3 == 0 {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
} else if fieldName4 == 0 {
fieldName4 = fieldHash
fieldDecoder4 = fieldDecoder
} else if fieldName5 == 0 {
fieldName5 = fieldHash
fieldDecoder5 = fieldDecoder
} else {
fieldName6 = fieldHash
fieldDecoder6 = fieldDecoder
}
}
return &sixFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil
case 7:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldName4 int32
var fieldName5 int32
var fieldName6 int32
var fieldName7 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder
var fieldDecoder5 *structFieldDecoder
var fieldDecoder6 *structFieldDecoder
var fieldDecoder7 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else if fieldName3 == 0 {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
} else if fieldName4 == 0 {
fieldName4 = fieldHash
fieldDecoder4 = fieldDecoder
} else if fieldName5 == 0 {
fieldName5 = fieldHash
fieldDecoder5 = fieldDecoder
} else if fieldName6 == 0 {
fieldName6 = fieldHash
fieldDecoder6 = fieldDecoder
} else {
fieldName7 = fieldHash
fieldDecoder7 = fieldDecoder
}
}
return &sevenFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7}, nil
case 8:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldName4 int32
var fieldName5 int32
var fieldName6 int32
var fieldName7 int32
var fieldName8 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder
var fieldDecoder5 *structFieldDecoder
var fieldDecoder6 *structFieldDecoder
var fieldDecoder7 *structFieldDecoder
var fieldDecoder8 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else if fieldName3 == 0 {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
} else if fieldName4 == 0 {
fieldName4 = fieldHash
fieldDecoder4 = fieldDecoder
} else if fieldName5 == 0 {
fieldName5 = fieldHash
fieldDecoder5 = fieldDecoder
} else if fieldName6 == 0 {
fieldName6 = fieldHash
fieldDecoder6 = fieldDecoder
} else if fieldName7 == 0 {
fieldName7 = fieldHash
fieldDecoder7 = fieldDecoder
} else {
fieldName8 = fieldHash
fieldDecoder8 = fieldDecoder
}
}
return &eightFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil
case 9:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldName4 int32
var fieldName5 int32
var fieldName6 int32
var fieldName7 int32
var fieldName8 int32
var fieldName9 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder
var fieldDecoder5 *structFieldDecoder
var fieldDecoder6 *structFieldDecoder
var fieldDecoder7 *structFieldDecoder
var fieldDecoder8 *structFieldDecoder
var fieldDecoder9 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else if fieldName3 == 0 {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
} else if fieldName4 == 0 {
fieldName4 = fieldHash
fieldDecoder4 = fieldDecoder
} else if fieldName5 == 0 {
fieldName5 = fieldHash
fieldDecoder5 = fieldDecoder
} else if fieldName6 == 0 {
fieldName6 = fieldHash
fieldDecoder6 = fieldDecoder
} else if fieldName7 == 0 {
fieldName7 = fieldHash
fieldDecoder7 = fieldDecoder
} else if fieldName8 == 0 {
fieldName8 = fieldHash
fieldDecoder8 = fieldDecoder
} else {
fieldName9 = fieldHash
fieldDecoder9 = fieldDecoder
}
}
return &nineFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil
case 10:
var fieldName1 int32
var fieldName2 int32
var fieldName3 int32
var fieldName4 int32
var fieldName5 int32
var fieldName6 int32
var fieldName7 int32
var fieldName8 int32
var fieldName9 int32
var fieldName10 int32
var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder
var fieldDecoder5 *structFieldDecoder
var fieldDecoder6 *structFieldDecoder
var fieldDecoder7 *structFieldDecoder
var fieldDecoder8 *structFieldDecoder
var fieldDecoder9 *structFieldDecoder
var fieldDecoder10 *structFieldDecoder
for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName)
_, known := knownHash[fieldHash]
if known {
return &generalStructDecoder{typ, fields}, nil
}
knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 {
fieldName1 = fieldHash
fieldDecoder1 = fieldDecoder
} else if fieldName2 == 0 {
fieldName2 = fieldHash
fieldDecoder2 = fieldDecoder
} else if fieldName3 == 0 {
fieldName3 = fieldHash
fieldDecoder3 = fieldDecoder
} else if fieldName4 == 0 {
fieldName4 = fieldHash
fieldDecoder4 = fieldDecoder
} else if fieldName5 == 0 {
fieldName5 = fieldHash
fieldDecoder5 = fieldDecoder
} else if fieldName6 == 0 {
fieldName6 = fieldHash
fieldDecoder6 = fieldDecoder
} else if fieldName7 == 0 {
fieldName7 = fieldHash
fieldDecoder7 = fieldDecoder
} else if fieldName8 == 0 {
fieldName8 = fieldHash
fieldDecoder8 = fieldDecoder
} else if fieldName9 == 0 {
fieldName9 = fieldHash
fieldDecoder9 = fieldDecoder
} else {
fieldName10 = fieldHash
fieldDecoder10 = fieldDecoder
}
}
return &tenFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9,
fieldName10, fieldDecoder10}, nil
}
return &generalStructDecoder{typ, fields}, nil
}
type generalStructDecoder struct {
typ reflect.Type
fields map[string]*structFieldDecoder
}
func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
fieldBytes := iter.readObjectFieldAsBytes()
field := *(*string)(unsafe.Pointer(&fieldBytes))
fieldDecoder := decoder.fields[strings.ToLower(field)]
if fieldDecoder == nil {
iter.Skip()
} else {
fieldDecoder.Decode(ptr, iter)
}
for iter.nextToken() == ',' {
fieldBytes = iter.readObjectFieldAsBytes()
field = *(*string)(unsafe.Pointer(&fieldBytes))
fieldDecoder = decoder.fields[strings.ToLower(field)]
if fieldDecoder == nil {
iter.Skip()
} else {
fieldDecoder.Decode(ptr, iter)
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type skipObjectDecoder struct {
typ reflect.Type
}
func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
valueType := iter.WhatIsNext()
if valueType != ObjectValue && valueType != NilValue {
iter.ReportError("skipObjectDecoder", "expect object or null")
return
}
iter.Skip()
}
type oneFieldStructDecoder struct {
typ reflect.Type
fieldHash int32
fieldDecoder *structFieldDecoder
}
func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
if iter.readFieldHash() == decoder.fieldHash {
decoder.fieldDecoder.Decode(ptr, iter)
} else {
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type twoFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
}
func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type threeFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
}
func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type fourFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
fieldHash4 int32
fieldDecoder4 *structFieldDecoder
}
func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
case decoder.fieldHash4:
decoder.fieldDecoder4.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type fiveFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
fieldHash4 int32
fieldDecoder4 *structFieldDecoder
fieldHash5 int32
fieldDecoder5 *structFieldDecoder
}
func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
case decoder.fieldHash4:
decoder.fieldDecoder4.Decode(ptr, iter)
case decoder.fieldHash5:
decoder.fieldDecoder5.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type sixFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
fieldHash4 int32
fieldDecoder4 *structFieldDecoder
fieldHash5 int32
fieldDecoder5 *structFieldDecoder
fieldHash6 int32
fieldDecoder6 *structFieldDecoder
}
func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
case decoder.fieldHash4:
decoder.fieldDecoder4.Decode(ptr, iter)
case decoder.fieldHash5:
decoder.fieldDecoder5.Decode(ptr, iter)
case decoder.fieldHash6:
decoder.fieldDecoder6.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type sevenFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
fieldHash4 int32
fieldDecoder4 *structFieldDecoder
fieldHash5 int32
fieldDecoder5 *structFieldDecoder
fieldHash6 int32
fieldDecoder6 *structFieldDecoder
fieldHash7 int32
fieldDecoder7 *structFieldDecoder
}
func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
case decoder.fieldHash4:
decoder.fieldDecoder4.Decode(ptr, iter)
case decoder.fieldHash5:
decoder.fieldDecoder5.Decode(ptr, iter)
case decoder.fieldHash6:
decoder.fieldDecoder6.Decode(ptr, iter)
case decoder.fieldHash7:
decoder.fieldDecoder7.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type eightFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
fieldHash4 int32
fieldDecoder4 *structFieldDecoder
fieldHash5 int32
fieldDecoder5 *structFieldDecoder
fieldHash6 int32
fieldDecoder6 *structFieldDecoder
fieldHash7 int32
fieldDecoder7 *structFieldDecoder
fieldHash8 int32
fieldDecoder8 *structFieldDecoder
}
func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
case decoder.fieldHash4:
decoder.fieldDecoder4.Decode(ptr, iter)
case decoder.fieldHash5:
decoder.fieldDecoder5.Decode(ptr, iter)
case decoder.fieldHash6:
decoder.fieldDecoder6.Decode(ptr, iter)
case decoder.fieldHash7:
decoder.fieldDecoder7.Decode(ptr, iter)
case decoder.fieldHash8:
decoder.fieldDecoder8.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type nineFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
fieldHash4 int32
fieldDecoder4 *structFieldDecoder
fieldHash5 int32
fieldDecoder5 *structFieldDecoder
fieldHash6 int32
fieldDecoder6 *structFieldDecoder
fieldHash7 int32
fieldDecoder7 *structFieldDecoder
fieldHash8 int32
fieldDecoder8 *structFieldDecoder
fieldHash9 int32
fieldDecoder9 *structFieldDecoder
}
func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
case decoder.fieldHash4:
decoder.fieldDecoder4.Decode(ptr, iter)
case decoder.fieldHash5:
decoder.fieldDecoder5.Decode(ptr, iter)
case decoder.fieldHash6:
decoder.fieldDecoder6.Decode(ptr, iter)
case decoder.fieldHash7:
decoder.fieldDecoder7.Decode(ptr, iter)
case decoder.fieldHash8:
decoder.fieldDecoder8.Decode(ptr, iter)
case decoder.fieldHash9:
decoder.fieldDecoder9.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type tenFieldsStructDecoder struct {
typ reflect.Type
fieldHash1 int32
fieldDecoder1 *structFieldDecoder
fieldHash2 int32
fieldDecoder2 *structFieldDecoder
fieldHash3 int32
fieldDecoder3 *structFieldDecoder
fieldHash4 int32
fieldDecoder4 *structFieldDecoder
fieldHash5 int32
fieldDecoder5 *structFieldDecoder
fieldHash6 int32
fieldDecoder6 *structFieldDecoder
fieldHash7 int32
fieldDecoder7 *structFieldDecoder
fieldHash8 int32
fieldDecoder8 *structFieldDecoder
fieldHash9 int32
fieldDecoder9 *structFieldDecoder
fieldHash10 int32
fieldDecoder10 *structFieldDecoder
}
func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() {
return
}
for {
switch iter.readFieldHash() {
case decoder.fieldHash1:
decoder.fieldDecoder1.Decode(ptr, iter)
case decoder.fieldHash2:
decoder.fieldDecoder2.Decode(ptr, iter)
case decoder.fieldHash3:
decoder.fieldDecoder3.Decode(ptr, iter)
case decoder.fieldHash4:
decoder.fieldDecoder4.Decode(ptr, iter)
case decoder.fieldHash5:
decoder.fieldDecoder5.Decode(ptr, iter)
case decoder.fieldHash6:
decoder.fieldDecoder6.Decode(ptr, iter)
case decoder.fieldHash7:
decoder.fieldDecoder7.Decode(ptr, iter)
case decoder.fieldHash8:
decoder.fieldDecoder8.Decode(ptr, iter)
case decoder.fieldHash9:
decoder.fieldDecoder9.Decode(ptr, iter)
case decoder.fieldHash10:
decoder.fieldDecoder10.Decode(ptr, iter)
default:
iter.Skip()
}
if iter.isObjectEnd() {
break
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
}
}
type structFieldDecoder struct {
field *reflect.StructField
fieldDecoder ValDecoder
}
func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset)
decoder.fieldDecoder.Decode(fieldPtr, iter)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error())
}
}

305
vendor/github.com/json-iterator/go/feature_stream.go generated vendored Normal file
View File

@ -0,0 +1,305 @@
package jsoniter
import (
"io"
)
// Stream is a io.Writer like object, with JSON specific write functions.
// Error is not returned as return value, but stored as Error member on this stream instance.
type Stream struct {
cfg *frozenConfig
out io.Writer
buf []byte
n int
Error error
indention int
}
// NewStream create new stream instance.
// cfg can be jsoniter.ConfigDefault.
// out can be nil if write to internal buffer.
// bufSize is the initial size for the internal buffer in bytes.
func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
return &Stream{
cfg: cfg.(*frozenConfig),
out: out,
buf: make([]byte, bufSize),
n: 0,
Error: nil,
indention: 0,
}
}
// Pool returns a pool can provide more stream with same configuration
func (stream *Stream) Pool() StreamPool {
return stream.cfg
}
// Reset reuse this stream instance by assign a new writer
func (stream *Stream) Reset(out io.Writer) {
stream.out = out
stream.n = 0
}
// Available returns how many bytes are unused in the buffer.
func (stream *Stream) Available() int {
return len(stream.buf) - stream.n
}
// Buffered returns the number of bytes that have been written into the current buffer.
func (stream *Stream) Buffered() int {
return stream.n
}
// Buffer if writer is nil, use this method to take the result
func (stream *Stream) Buffer() []byte {
return stream.buf[:stream.n]
}
// Write writes the contents of p into the buffer.
// It returns the number of bytes written.
// If nn < len(p), it also returns an error explaining
// why the write is short.
func (stream *Stream) Write(p []byte) (nn int, err error) {
for len(p) > stream.Available() && stream.Error == nil {
if stream.out == nil {
stream.growAtLeast(len(p))
} else {
var n int
if stream.Buffered() == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, stream.Error = stream.out.Write(p)
} else {
n = copy(stream.buf[stream.n:], p)
stream.n += n
stream.Flush()
}
nn += n
p = p[n:]
}
}
if stream.Error != nil {
return nn, stream.Error
}
n := copy(stream.buf[stream.n:], p)
stream.n += n
nn += n
return nn, nil
}
// WriteByte writes a single byte.
func (stream *Stream) writeByte(c byte) {
if stream.Error != nil {
return
}
if stream.Available() < 1 {
stream.growAtLeast(1)
}
stream.buf[stream.n] = c
stream.n++
}
func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
if stream.Error != nil {
return
}
if stream.Available() < 2 {
stream.growAtLeast(2)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.n += 2
}
func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
if stream.Error != nil {
return
}
if stream.Available() < 3 {
stream.growAtLeast(3)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.buf[stream.n+2] = c3
stream.n += 3
}
func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
if stream.Error != nil {
return
}
if stream.Available() < 4 {
stream.growAtLeast(4)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.buf[stream.n+2] = c3
stream.buf[stream.n+3] = c4
stream.n += 4
}
func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
if stream.Error != nil {
return
}
if stream.Available() < 5 {
stream.growAtLeast(5)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.buf[stream.n+2] = c3
stream.buf[stream.n+3] = c4
stream.buf[stream.n+4] = c5
stream.n += 5
}
// Flush writes any buffered data to the underlying io.Writer.
func (stream *Stream) Flush() error {
if stream.out == nil {
return nil
}
if stream.Error != nil {
return stream.Error
}
if stream.n == 0 {
return nil
}
n, err := stream.out.Write(stream.buf[0:stream.n])
if n < stream.n && err == nil {
err = io.ErrShortWrite
}
if err != nil {
if n > 0 && n < stream.n {
copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n])
}
stream.n -= n
stream.Error = err
return err
}
stream.n = 0
return nil
}
func (stream *Stream) ensure(minimal int) {
available := stream.Available()
if available < minimal {
stream.growAtLeast(minimal)
}
}
func (stream *Stream) growAtLeast(minimal int) {
if stream.out != nil {
stream.Flush()
}
toGrow := len(stream.buf)
if toGrow < minimal {
toGrow = minimal
}
newBuf := make([]byte, len(stream.buf)+toGrow)
copy(newBuf, stream.Buffer())
stream.buf = newBuf
}
// WriteRaw write string out without quotes, just like []byte
func (stream *Stream) WriteRaw(s string) {
stream.ensure(len(s))
if stream.Error != nil {
return
}
n := copy(stream.buf[stream.n:], s)
stream.n += n
}
// WriteNil write null to stream
func (stream *Stream) WriteNil() {
stream.writeFourBytes('n', 'u', 'l', 'l')
}
// WriteTrue write true to stream
func (stream *Stream) WriteTrue() {
stream.writeFourBytes('t', 'r', 'u', 'e')
}
// WriteFalse write false to stream
func (stream *Stream) WriteFalse() {
stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
}
// WriteBool write true or false into stream
func (stream *Stream) WriteBool(val bool) {
if val {
stream.WriteTrue()
} else {
stream.WriteFalse()
}
}
// WriteObjectStart write { with possible indention
func (stream *Stream) WriteObjectStart() {
stream.indention += stream.cfg.indentionStep
stream.writeByte('{')
stream.writeIndention(0)
}
// WriteObjectField write "field": with possible indention
func (stream *Stream) WriteObjectField(field string) {
stream.WriteString(field)
if stream.indention > 0 {
stream.writeTwoBytes(':', ' ')
} else {
stream.writeByte(':')
}
}
// WriteObjectEnd write } with possible indention
func (stream *Stream) WriteObjectEnd() {
stream.writeIndention(stream.cfg.indentionStep)
stream.indention -= stream.cfg.indentionStep
stream.writeByte('}')
}
// WriteEmptyObject write {}
func (stream *Stream) WriteEmptyObject() {
stream.writeByte('{')
stream.writeByte('}')
}
// WriteMore write , with possible indention
func (stream *Stream) WriteMore() {
stream.writeByte(',')
stream.writeIndention(0)
}
// WriteArrayStart write [ with possible indention
func (stream *Stream) WriteArrayStart() {
stream.indention += stream.cfg.indentionStep
stream.writeByte('[')
stream.writeIndention(0)
}
// WriteEmptyArray write []
func (stream *Stream) WriteEmptyArray() {
stream.writeByte('[')
stream.writeByte(']')
}
// WriteArrayEnd write ] with possible indention
func (stream *Stream) WriteArrayEnd() {
stream.writeIndention(stream.cfg.indentionStep)
stream.indention -= stream.cfg.indentionStep
stream.writeByte(']')
}
func (stream *Stream) writeIndention(delta int) {
if stream.indention == 0 {
return
}
stream.writeByte('\n')
toWrite := stream.indention - delta
stream.ensure(toWrite)
for i := 0; i < toWrite && stream.n < len(stream.buf); i++ {
stream.buf[stream.n] = ' '
stream.n++
}
}

View File

@ -0,0 +1,96 @@
package jsoniter
import (
"math"
"strconv"
)
var pow10 []uint64
func init() {
pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
}
// WriteFloat32 write float32 to stream
func (stream *Stream) WriteFloat32(val float32) {
abs := math.Abs(float64(val))
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs != 0 {
if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
fmt = 'e'
}
}
stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32))
}
// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
func (stream *Stream) WriteFloat32Lossy(val float32) {
if val < 0 {
stream.writeByte('-')
val = -val
}
if val > 0x4ffffff {
stream.WriteFloat32(val)
return
}
precision := 6
exp := uint64(1000000) // 6
lval := uint64(float64(val)*float64(exp) + 0.5)
stream.WriteUint64(lval / exp)
fval := lval % exp
if fval == 0 {
return
}
stream.writeByte('.')
stream.ensure(10)
for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
stream.writeByte('0')
}
stream.WriteUint64(fval)
for stream.buf[stream.n-1] == '0' {
stream.n--
}
}
// WriteFloat64 write float64 to stream
func (stream *Stream) WriteFloat64(val float64) {
abs := math.Abs(val)
fmt := byte('f')
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
if abs != 0 {
if abs < 1e-6 || abs >= 1e21 {
fmt = 'e'
}
}
stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64))
}
// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
func (stream *Stream) WriteFloat64Lossy(val float64) {
if val < 0 {
stream.writeByte('-')
val = -val
}
if val > 0x4ffffff {
stream.WriteFloat64(val)
return
}
precision := 6
exp := uint64(1000000) // 6
lval := uint64(val*float64(exp) + 0.5)
stream.WriteUint64(lval / exp)
fval := lval % exp
if fval == 0 {
return
}
stream.writeByte('.')
stream.ensure(10)
for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
stream.writeByte('0')
}
stream.WriteUint64(fval)
for stream.buf[stream.n-1] == '0' {
stream.n--
}
}

View File

@ -0,0 +1,320 @@
package jsoniter
var digits []uint32
func init() {
digits = make([]uint32, 1000)
for i := uint32(0); i < 1000; i++ {
digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
if i < 10 {
digits[i] += 2 << 24
} else if i < 100 {
digits[i] += 1 << 24
}
}
}
func writeFirstBuf(buf []byte, v uint32, n int) int {
start := v >> 24
if start == 0 {
buf[n] = byte(v >> 16)
n++
buf[n] = byte(v >> 8)
n++
} else if start == 1 {
buf[n] = byte(v >> 8)
n++
}
buf[n] = byte(v)
n++
return n
}
func writeBuf(buf []byte, v uint32, n int) {
buf[n] = byte(v >> 16)
buf[n+1] = byte(v >> 8)
buf[n+2] = byte(v)
}
// WriteUint8 write uint8 to stream
func (stream *Stream) WriteUint8(val uint8) {
stream.ensure(3)
stream.n = writeFirstBuf(stream.buf, digits[val], stream.n)
}
// WriteInt8 write int8 to stream
func (stream *Stream) WriteInt8(nval int8) {
stream.ensure(4)
n := stream.n
var val uint8
if nval < 0 {
val = uint8(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint8(nval)
}
stream.n = writeFirstBuf(stream.buf, digits[val], n)
}
// WriteUint16 write uint16 to stream
func (stream *Stream) WriteUint16(val uint16) {
stream.ensure(5)
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], stream.n)
return
}
r1 := val - q1*1000
n := writeFirstBuf(stream.buf, digits[q1], stream.n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
// WriteInt16 write int16 to stream
func (stream *Stream) WriteInt16(nval int16) {
stream.ensure(6)
n := stream.n
var val uint16
if nval < 0 {
val = uint16(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint16(nval)
}
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
n = writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
// WriteUint32 write uint32 to stream
func (stream *Stream) WriteUint32(val uint32) {
stream.ensure(10)
n := stream.n
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
} else {
r3 := q2 - q3*1000
stream.buf[n] = byte(q3 + '0')
n++
writeBuf(stream.buf, digits[r3], n)
n += 3
}
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
}
// WriteInt32 write int32 to stream
func (stream *Stream) WriteInt32(nval int32) {
stream.ensure(11)
n := stream.n
var val uint32
if nval < 0 {
val = uint32(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint32(nval)
}
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
} else {
r3 := q2 - q3*1000
stream.buf[n] = byte(q3 + '0')
n++
writeBuf(stream.buf, digits[r3], n)
n += 3
}
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
}
// WriteUint64 write uint64 to stream
func (stream *Stream) WriteUint64(val uint64) {
stream.ensure(20)
n := stream.n
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
return
}
r3 := q2 - q3*1000
q4 := q3 / 1000
if q4 == 0 {
n = writeFirstBuf(stream.buf, digits[q3], n)
writeBuf(stream.buf, digits[r3], n)
writeBuf(stream.buf, digits[r2], n+3)
writeBuf(stream.buf, digits[r1], n+6)
stream.n = n + 9
return
}
r4 := q3 - q4*1000
q5 := q4 / 1000
if q5 == 0 {
n = writeFirstBuf(stream.buf, digits[q4], n)
writeBuf(stream.buf, digits[r4], n)
writeBuf(stream.buf, digits[r3], n+3)
writeBuf(stream.buf, digits[r2], n+6)
writeBuf(stream.buf, digits[r1], n+9)
stream.n = n + 12
return
}
r5 := q4 - q5*1000
q6 := q5 / 1000
if q6 == 0 {
n = writeFirstBuf(stream.buf, digits[q5], n)
} else {
n = writeFirstBuf(stream.buf, digits[q6], n)
r6 := q5 - q6*1000
writeBuf(stream.buf, digits[r6], n)
n += 3
}
writeBuf(stream.buf, digits[r5], n)
writeBuf(stream.buf, digits[r4], n+3)
writeBuf(stream.buf, digits[r3], n+6)
writeBuf(stream.buf, digits[r2], n+9)
writeBuf(stream.buf, digits[r1], n+12)
stream.n = n + 15
}
// WriteInt64 write int64 to stream
func (stream *Stream) WriteInt64(nval int64) {
stream.ensure(20)
n := stream.n
var val uint64
if nval < 0 {
val = uint64(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint64(nval)
}
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
return
}
r3 := q2 - q3*1000
q4 := q3 / 1000
if q4 == 0 {
n = writeFirstBuf(stream.buf, digits[q3], n)
writeBuf(stream.buf, digits[r3], n)
writeBuf(stream.buf, digits[r2], n+3)
writeBuf(stream.buf, digits[r1], n+6)
stream.n = n + 9
return
}
r4 := q3 - q4*1000
q5 := q4 / 1000
if q5 == 0 {
n = writeFirstBuf(stream.buf, digits[q4], n)
writeBuf(stream.buf, digits[r4], n)
writeBuf(stream.buf, digits[r3], n+3)
writeBuf(stream.buf, digits[r2], n+6)
writeBuf(stream.buf, digits[r1], n+9)
stream.n = n + 12
return
}
r5 := q4 - q5*1000
q6 := q5 / 1000
if q6 == 0 {
n = writeFirstBuf(stream.buf, digits[q5], n)
} else {
stream.buf[n] = byte(q6 + '0')
n++
r6 := q5 - q6*1000
writeBuf(stream.buf, digits[r6], n)
n += 3
}
writeBuf(stream.buf, digits[r5], n)
writeBuf(stream.buf, digits[r4], n+3)
writeBuf(stream.buf, digits[r3], n+6)
writeBuf(stream.buf, digits[r2], n+9)
writeBuf(stream.buf, digits[r1], n+12)
stream.n = n + 15
}
// WriteInt write int to stream
func (stream *Stream) WriteInt(val int) {
stream.WriteInt64(int64(val))
}
// WriteUint write uint to stream
func (stream *Stream) WriteUint(val uint) {
stream.WriteUint64(uint64(val))
}

View File

@ -0,0 +1,396 @@
package jsoniter
import (
"unicode/utf8"
)
// htmlSafeSet holds the value true if the ASCII character with the given
// array position can be safely represented inside a JSON string, embedded
// inside of HTML <script> tags, without any additional escaping.
//
// All values are true except for the ASCII control characters (0-31), the
// double quote ("), the backslash character ("\"), HTML opening and closing
// tags ("<" and ">"), and the ampersand ("&").
var htmlSafeSet = [utf8.RuneSelf]bool{
' ': true,
'!': true,
'"': false,
'#': true,
'$': true,
'%': true,
'&': false,
'\'': true,
'(': true,
')': true,
'*': true,
'+': true,
',': true,
'-': true,
'.': true,
'/': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
':': true,
';': true,
'<': false,
'=': true,
'>': false,
'?': true,
'@': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'V': true,
'W': true,
'X': true,
'Y': true,
'Z': true,
'[': true,
'\\': false,
']': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'{': true,
'|': true,
'}': true,
'~': true,
'\u007f': true,
}
// safeSet holds the value true if the ASCII character with the given array
// position can be represented inside a JSON string without any further
// escaping.
//
// All values are true except for the ASCII control characters (0-31), the
// double quote ("), and the backslash character ("\").
var safeSet = [utf8.RuneSelf]bool{
' ': true,
'!': true,
'"': false,
'#': true,
'$': true,
'%': true,
'&': true,
'\'': true,
'(': true,
')': true,
'*': true,
'+': true,
',': true,
'-': true,
'.': true,
'/': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
':': true,
';': true,
'<': true,
'=': true,
'>': true,
'?': true,
'@': true,
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'V': true,
'W': true,
'X': true,
'Y': true,
'Z': true,
'[': true,
'\\': false,
']': true,
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
'{': true,
'|': true,
'}': true,
'~': true,
'\u007f': true,
}
var hex = "0123456789abcdef"
// WriteStringWithHTMLEscaped write string to stream with html special characters escaped
func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
stream.ensure(32)
valLen := len(s)
toWriteLen := valLen
bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes
if stream.n+toWriteLen > bufLengthMinusTwo {
toWriteLen = bufLengthMinusTwo - stream.n
}
n := stream.n
stream.buf[n] = '"'
n++
// write string, the fast path, without utf8 and escape support
i := 0
for ; i < toWriteLen; i++ {
c := s[i]
if c < utf8.RuneSelf && htmlSafeSet[c] {
stream.buf[n] = c
n++
} else {
break
}
}
if i == valLen {
stream.buf[n] = '"'
n++
stream.n = n
return
}
stream.n = n
writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
}
func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen int) {
start := i
// for the remaining parts, we process them char by char
for i < valLen {
if b := s[i]; b < utf8.RuneSelf {
if htmlSafeSet[b] {
i++
continue
}
if start < i {
stream.WriteRaw(s[start:i])
}
switch b {
case '\\', '"':
stream.writeTwoBytes('\\', b)
case '\n':
stream.writeTwoBytes('\\', 'n')
case '\r':
stream.writeTwoBytes('\\', 'r')
case '\t':
stream.writeTwoBytes('\\', 't')
default:
// This encodes bytes < 0x20 except for \t, \n and \r.
// If escapeHTML is set, it also escapes <, >, and &
// because they can lead to security holes when
// user-controlled strings are rendered into JSON
// and served to some browsers.
stream.WriteRaw(`\u00`)
stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
stream.WriteRaw(s[start:i])
}
stream.WriteRaw(`\ufffd`)
i++
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
if c == '\u2028' || c == '\u2029' {
if start < i {
stream.WriteRaw(s[start:i])
}
stream.WriteRaw(`\u202`)
stream.writeByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
stream.WriteRaw(s[start:])
}
stream.writeByte('"')
}
// WriteString write string to stream without html escape
func (stream *Stream) WriteString(s string) {
stream.ensure(32)
valLen := len(s)
toWriteLen := valLen
bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes
if stream.n+toWriteLen > bufLengthMinusTwo {
toWriteLen = bufLengthMinusTwo - stream.n
}
n := stream.n
stream.buf[n] = '"'
n++
// write string, the fast path, without utf8 and escape support
i := 0
for ; i < toWriteLen; i++ {
c := s[i]
if c > 31 && c != '"' && c != '\\' {
stream.buf[n] = c
n++
} else {
break
}
}
if i == valLen {
stream.buf[n] = '"'
n++
stream.n = n
return
}
stream.n = n
writeStringSlowPath(stream, i, s, valLen)
}
func writeStringSlowPath(stream *Stream, i int, s string, valLen int) {
start := i
// for the remaining parts, we process them char by char
for i < valLen {
if b := s[i]; b < utf8.RuneSelf {
if safeSet[b] {
i++
continue
}
if start < i {
stream.WriteRaw(s[start:i])
}
switch b {
case '\\', '"':
stream.writeTwoBytes('\\', b)
case '\n':
stream.writeTwoBytes('\\', 'n')
case '\r':
stream.writeTwoBytes('\\', 'r')
case '\t':
stream.writeTwoBytes('\\', 't')
default:
// This encodes bytes < 0x20 except for \t, \n and \r.
// If escapeHTML is set, it also escapes <, >, and &
// because they can lead to security holes when
// user-controlled strings are rendered into JSON
// and served to some browsers.
stream.WriteRaw(`\u00`)
stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
}
i++
start = i
continue
}
i++
continue
}
if start < len(s) {
stream.WriteRaw(s[start:])
}
stream.writeByte('"')
}

18
vendor/github.com/json-iterator/go/jsoniter.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// Package jsoniter implements encoding and decoding of JSON as defined in
// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
// and variable type declarations (if any).
// jsoniter interfaces gives 100% compatibility with code using standard lib.
//
// "JSON and Go"
// (https://golang.org/doc/articles/json_and_go.html)
// gives a description of how Marshal/Unmarshal operate
// between arbitrary or predefined json objects and bytes,
// and it applies to jsoniter.Marshal/Unmarshal as well.
//
// Besides, jsoniter.Iterator provides a different set of interfaces
// iterating given bytes/string/reader
// and yielding parsed elements one by one.
// This set of interfaces reads input as required and gives
// better performance.
package jsoniter

19
vendor/github.com/peterbourgon/diskv/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2011-2012 Peter Bourgon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

141
vendor/github.com/peterbourgon/diskv/README.md generated vendored Normal file
View File

@ -0,0 +1,141 @@
# What is diskv?
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
language. It starts with an incredibly simple API for storing arbitrary data on
a filesystem by key, and builds several layers of performance-enhancing
abstraction on top. The end result is a conceptually simple, but highly
performant, disk-backed storage system.
[![Build Status][1]][2]
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
# Installing
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
Then,
```bash
$ go get github.com/peterbourgon/diskv
```
[3]: http://golang.org
[4]: http://golang.org/doc/install/source
[5]: http://golang.org/doc/install
# Usage
```go
package main
import (
"fmt"
"github.com/peterbourgon/diskv"
)
func main() {
// Simplest transform function: put all the data files into the base dir.
flatTransform := func(s string) []string { return []string{} }
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
d := diskv.New(diskv.Options{
BasePath: "my-data-dir",
Transform: flatTransform,
CacheSizeMax: 1024 * 1024,
})
// Write three bytes to the key "alpha".
key := "alpha"
d.Write(key, []byte{'1', '2', '3'})
// Read the value back out of the store.
value, _ := d.Read(key)
fmt.Printf("%v\n", value)
// Erase the key+value from the store (and the disk).
d.Erase(key)
}
```
More complex examples can be found in the "examples" subdirectory.
# Theory
## Basic idea
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
The data is written to a single file on disk, with the same name as the key.
The key determines where that file will be stored, via a user-provided
`TransformFunc`, which takes a key and returns a slice (`[]string`)
corresponding to a path list where the key file will be stored. The simplest
TransformFunc,
```go
func SimpleTransform (key string) []string {
return []string{}
}
```
will place all keys in the same, base directory. The design is inspired by
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
behavior is available in the content-addressable-storage example.
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
**Note** that your TransformFunc should ensure that one valid key doesn't
transform to a subset of another valid key. That is, it shouldn't be possible
to construct valid keys that resolve to directory names. As a concrete example,
if your TransformFunc splits on every 3 characters, then
```go
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
```
This will be addressed in an upcoming version of diskv.
Probably the most important design principle behind diskv is that your data is
always flatly available on the disk. diskv will never do anything that would
prevent you from accessing, copying, backing up, or otherwise interacting with
your data via common UNIX commandline tools.
## Adding a cache
An in-memory caching layer is provided by combining the BasicStore
functionality with a simple map structure, and keeping it up-to-date as
appropriate. Since the map structure in Go is not threadsafe, it's combined
with a RWMutex to provide safe concurrent access.
## Adding order
diskv is a key-value store and therefore inherently unordered. An ordering
system can be injected into the store by passing something which satisfies the
diskv.Index interface. (A default implementation, using Google's
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
user-provided Less function) index of the keys, which can be queried.
[7]: https://github.com/google/btree
## Adding compression
Something which implements the diskv.Compression interface may be passed
during store creation, so that all Writes and Reads are filtered through
a compression/decompression pipeline. Several default implementations,
using stdlib compression algorithms, are provided. Note that data is cached
compressed; the cost of decompression is borne with each Read.
## Streaming
diskv also now provides ReadStream and WriteStream methods, to allow very large
data to be handled efficiently.
# Future plans
* Needs plenty of robust testing: huge datasets, etc...
* More thorough benchmarking
* Your suggestions for use-cases I haven't thought of

64
vendor/github.com/peterbourgon/diskv/compression.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package diskv
import (
"compress/flate"
"compress/gzip"
"compress/zlib"
"io"
)
// Compression is an interface that Diskv uses to implement compression of
// data. Writer takes a destination io.Writer and returns a WriteCloser that
// compresses all data written through it. Reader takes a source io.Reader and
// returns a ReadCloser that decompresses all data read through it. You may
// define these methods on your own type, or use one of the NewCompression
// helpers.
type Compression interface {
Writer(dst io.Writer) (io.WriteCloser, error)
Reader(src io.Reader) (io.ReadCloser, error)
}
// NewGzipCompression returns a Gzip-based Compression.
func NewGzipCompression() Compression {
return NewGzipCompressionLevel(flate.DefaultCompression)
}
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
func NewGzipCompressionLevel(level int) Compression {
return &genericCompression{
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
}
}
// NewZlibCompression returns a Zlib-based Compression.
func NewZlibCompression() Compression {
return NewZlibCompressionLevel(flate.DefaultCompression)
}
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
func NewZlibCompressionLevel(level int) Compression {
return NewZlibCompressionLevelDict(level, nil)
}
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
// level, based on the given dictionary.
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
return &genericCompression{
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
}
}
type genericCompression struct {
wf func(w io.Writer) (io.WriteCloser, error)
rf func(r io.Reader) (io.ReadCloser, error)
}
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
return g.wf(dst)
}
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
return g.rf(src)
}

624
vendor/github.com/peterbourgon/diskv/diskv.go generated vendored Normal file
View File

@ -0,0 +1,624 @@
// Diskv (disk-vee) is a simple, persistent, key-value store.
// It stores all data flatly on the filesystem.
package diskv
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
)
const (
defaultBasePath = "diskv"
defaultFilePerm os.FileMode = 0666
defaultPathPerm os.FileMode = 0777
)
var (
defaultTransform = func(s string) []string { return []string{} }
errCanceled = errors.New("canceled")
errEmptyKey = errors.New("empty key")
errBadKey = errors.New("bad key")
errImportDirectory = errors.New("can't import a directory")
)
// TransformFunction transforms a key into a slice of strings, with each
// element in the slice representing a directory in the file path where the
// key's entry will eventually be stored.
//
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
type TransformFunction func(s string) []string
// Options define a set of properties that dictate Diskv behavior.
// All values are optional.
type Options struct {
BasePath string
Transform TransformFunction
CacheSizeMax uint64 // bytes
PathPerm os.FileMode
FilePerm os.FileMode
// If TempDir is set, it will enable filesystem atomic writes by
// writing temporary files to that location before being moved
// to BasePath.
// Note that TempDir MUST be on the same device/partition as
// BasePath.
TempDir string
Index Index
IndexLess LessFunction
Compression Compression
}
// Diskv implements the Diskv interface. You shouldn't construct Diskv
// structures directly; instead, use the New constructor.
type Diskv struct {
Options
mu sync.RWMutex
cache map[string][]byte
cacheSize uint64
}
// New returns an initialized Diskv structure, ready to use.
// If the path identified by baseDir already contains data,
// it will be accessible, but not yet cached.
func New(o Options) *Diskv {
if o.BasePath == "" {
o.BasePath = defaultBasePath
}
if o.Transform == nil {
o.Transform = defaultTransform
}
if o.PathPerm == 0 {
o.PathPerm = defaultPathPerm
}
if o.FilePerm == 0 {
o.FilePerm = defaultFilePerm
}
d := &Diskv{
Options: o,
cache: map[string][]byte{},
cacheSize: 0,
}
if d.Index != nil && d.IndexLess != nil {
d.Index.Initialize(d.IndexLess, d.Keys(nil))
}
return d
}
// Write synchronously writes the key-value pair to disk, making it immediately
// available for reads. Write relies on the filesystem to perform an eventual
// sync to physical media. If you need stronger guarantees, see WriteStream.
func (d *Diskv) Write(key string, val []byte) error {
return d.WriteStream(key, bytes.NewBuffer(val), false)
}
// WriteStream writes the data represented by the io.Reader to the disk, under
// the provided key. If sync is true, WriteStream performs an explicit sync on
// the file as soon as it's written.
//
// bytes.Buffer provides io.Reader semantics for basic data types.
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
if len(key) <= 0 {
return errEmptyKey
}
d.mu.Lock()
defer d.mu.Unlock()
return d.writeStreamWithLock(key, r, sync)
}
// createKeyFileWithLock either creates the key file directly, or
// creates a temporary file in TempDir if it is set.
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
if d.TempDir != "" {
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
return nil, fmt.Errorf("temp mkdir: %s", err)
}
f, err := ioutil.TempFile(d.TempDir, "")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
if err := f.Chmod(d.FilePerm); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return nil, fmt.Errorf("chmod: %s", err)
}
return f, nil
}
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
if err != nil {
return nil, fmt.Errorf("open file: %s", err)
}
return f, nil
}
// writeStream does no input validation checking.
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
if err := d.ensurePathWithLock(key); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
f, err := d.createKeyFileWithLock(key)
if err != nil {
return fmt.Errorf("create key file: %s", err)
}
wc := io.WriteCloser(&nopWriteCloser{f})
if d.Compression != nil {
wc, err = d.Compression.Writer(f)
if err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression writer: %s", err)
}
}
if _, err := io.Copy(wc, r); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("i/o copy: %s", err)
}
if err := wc.Close(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression close: %s", err)
}
if sync {
if err := f.Sync(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("file sync: %s", err)
}
}
if err := f.Close(); err != nil {
return fmt.Errorf("file close: %s", err)
}
if f.Name() != d.completeFilename(key) {
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("rename: %s", err)
}
}
if d.Index != nil {
d.Index.Insert(key)
}
d.bustCacheWithLock(key) // cache only on read
return nil
}
// Import imports the source file into diskv under the destination key. If the
// destination key already exists, it's overwritten. If move is true, the
// source file is removed after a successful import.
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
if dstKey == "" {
return errEmptyKey
}
if fi, err := os.Stat(srcFilename); err != nil {
return err
} else if fi.IsDir() {
return errImportDirectory
}
d.mu.Lock()
defer d.mu.Unlock()
if err := d.ensurePathWithLock(dstKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
if move {
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
d.bustCacheWithLock(dstKey)
return nil
} else if err != syscall.EXDEV {
// If it failed due to being on a different device, fall back to copying
return err
}
}
f, err := os.Open(srcFilename)
if err != nil {
return err
}
defer f.Close()
err = d.writeStreamWithLock(dstKey, f, false)
if err == nil && move {
err = os.Remove(srcFilename)
}
return err
}
// Read reads the key and returns the value.
// If the key is available in the cache, Read won't touch the disk.
// If the key is not in the cache, Read will have the side-effect of
// lazily caching the value.
func (d *Diskv) Read(key string) ([]byte, error) {
rc, err := d.ReadStream(key, false)
if err != nil {
return []byte{}, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
// If the value is cached from a previous read, and direct is false,
// ReadStream will use the cached value. Otherwise, it will return a handle to
// the file on disk, and cache the data on read.
//
// If direct is true, ReadStream will lazily delete any cached value for the
// key, and return a direct handle to the file on disk.
//
// If compression is enabled, ReadStream taps into the io.Reader stream prior
// to decompression, and caches the compressed data.
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
d.mu.RLock()
defer d.mu.RUnlock()
if val, ok := d.cache[key]; ok {
if !direct {
buf := bytes.NewBuffer(val)
if d.Compression != nil {
return d.Compression.Reader(buf)
}
return ioutil.NopCloser(buf), nil
}
go func() {
d.mu.Lock()
defer d.mu.Unlock()
d.uncacheWithLock(key, uint64(len(val)))
}()
}
return d.readWithRLock(key)
}
// read ignores the cache, and returns an io.ReadCloser representing the
// decompressed data for the given key, streamed from the disk. Clients should
// acquire a read lock on the Diskv and check the cache themselves before
// calling read.
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
filename := d.completeFilename(key)
fi, err := os.Stat(filename)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, os.ErrNotExist
}
f, err := os.Open(filename)
if err != nil {
return nil, err
}
var r io.Reader
if d.CacheSizeMax > 0 {
r = newSiphon(f, d, key)
} else {
r = &closingReader{f}
}
var rc = io.ReadCloser(ioutil.NopCloser(r))
if d.Compression != nil {
rc, err = d.Compression.Reader(r)
if err != nil {
return nil, err
}
}
return rc, nil
}
// closingReader provides a Reader that automatically closes the
// embedded ReadCloser when it reaches EOF
type closingReader struct {
rc io.ReadCloser
}
func (cr closingReader) Read(p []byte) (int, error) {
n, err := cr.rc.Read(p)
if err == io.EOF {
if closeErr := cr.rc.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
}
return n, err
}
// siphon is like a TeeReader: it copies all data read through it to an
// internal buffer, and moves that buffer to the cache at EOF.
type siphon struct {
f *os.File
d *Diskv
key string
buf *bytes.Buffer
}
// newSiphon constructs a siphoning reader that represents the passed file.
// When a successful series of reads ends in an EOF, the siphon will write
// the buffered data to Diskv's cache under the given key.
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
return &siphon{
f: f,
d: d,
key: key,
buf: &bytes.Buffer{},
}
}
// Read implements the io.Reader interface for siphon.
func (s *siphon) Read(p []byte) (int, error) {
n, err := s.f.Read(p)
if err == nil {
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
}
if err == io.EOF {
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
if closeErr := s.f.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
return n, err
}
return n, err
}
// Erase synchronously erases the given key from the disk and the cache.
func (d *Diskv) Erase(key string) error {
d.mu.Lock()
defer d.mu.Unlock()
d.bustCacheWithLock(key)
// erase from index
if d.Index != nil {
d.Index.Delete(key)
}
// erase from disk
filename := d.completeFilename(key)
if s, err := os.Stat(filename); err == nil {
if s.IsDir() {
return errBadKey
}
if err = os.Remove(filename); err != nil {
return err
}
} else {
// Return err as-is so caller can do os.IsNotExist(err).
return err
}
// clean up and return
d.pruneDirsWithLock(key)
return nil
}
// EraseAll will delete all of the data from the store, both in the cache and on
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
// diskv-related data. Care should be taken to always specify a diskv base
// directory that is exclusively for diskv data.
func (d *Diskv) EraseAll() error {
d.mu.Lock()
defer d.mu.Unlock()
d.cache = make(map[string][]byte)
d.cacheSize = 0
if d.TempDir != "" {
os.RemoveAll(d.TempDir) // errors ignored
}
return os.RemoveAll(d.BasePath)
}
// Has returns true if the given key exists.
func (d *Diskv) Has(key string) bool {
d.mu.Lock()
defer d.mu.Unlock()
if _, ok := d.cache[key]; ok {
return true
}
filename := d.completeFilename(key)
s, err := os.Stat(filename)
if err != nil {
return false
}
if s.IsDir() {
return false
}
return true
}
// Keys returns a channel that will yield every key accessible by the store,
// in undefined order. If a cancel channel is provided, closing it will
// terminate and close the keys channel.
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
return d.KeysPrefix("", cancel)
}
// KeysPrefix returns a channel that will yield every key accessible by the
// store with the given prefix, in undefined order. If a cancel channel is
// provided, closing it will terminate and close the keys channel. If the
// provided prefix is the empty string, all keys will be yielded.
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
var prepath string
if prefix == "" {
prepath = d.BasePath
} else {
prepath = d.pathFor(prefix)
}
c := make(chan string)
go func() {
filepath.Walk(prepath, walker(c, prefix, cancel))
close(c)
}()
return c
}
// walker returns a function which satisfies the filepath.WalkFunc interface.
// It sends every non-directory file entry down the channel c.
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
return nil // "pass"
}
select {
case c <- info.Name():
case <-cancel:
return errCanceled
}
return nil
}
}
// pathFor returns the absolute path for location on the filesystem where the
// data for the given key will be stored.
func (d *Diskv) pathFor(key string) string {
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
}
// ensurePathWithLock is a helper function that generates all necessary
// directories on the filesystem for the given key.
func (d *Diskv) ensurePathWithLock(key string) error {
return os.MkdirAll(d.pathFor(key), d.PathPerm)
}
// completeFilename returns the absolute path to the file for the given key.
func (d *Diskv) completeFilename(key string) string {
return filepath.Join(d.pathFor(key), key)
}
// cacheWithLock attempts to cache the given key-value pair in the store's
// cache. It can fail if the value is larger than the cache's maximum size.
func (d *Diskv) cacheWithLock(key string, val []byte) error {
valueSize := uint64(len(val))
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
return fmt.Errorf("%s; not caching", err)
}
// be very strict about memory guarantees
if (d.cacheSize + valueSize) > d.CacheSizeMax {
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
}
d.cache[key] = val
d.cacheSize += valueSize
return nil
}
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.cacheWithLock(key, val)
}
func (d *Diskv) bustCacheWithLock(key string) {
if val, ok := d.cache[key]; ok {
d.uncacheWithLock(key, uint64(len(val)))
}
}
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
d.cacheSize -= sz
delete(d.cache, key)
}
// pruneDirsWithLock deletes empty directories in the path walk leading to the
// key k. Typically this function is called after an Erase is made.
func (d *Diskv) pruneDirsWithLock(key string) error {
pathlist := d.Transform(key)
for i := range pathlist {
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
// thanks to Steven Blenkinsop for this snippet
switch fi, err := os.Stat(dir); true {
case err != nil:
return err
case !fi.IsDir():
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
}
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
if err != nil {
return err
} else if len(nlinks) > 0 {
return nil // has subdirs -- do not prune
}
if err = os.Remove(dir); err != nil {
return err
}
}
return nil
}
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
// until the cache has at least valueSize bytes available.
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
if valueSize > d.CacheSizeMax {
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
}
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
for key, val := range d.cache {
if safe() {
break
}
d.uncacheWithLock(key, uint64(len(val)))
}
if !safe() {
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
}
return nil
}
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
// satisfy the io.WriteCloser interface.
type nopWriteCloser struct {
io.Writer
}
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
func (wc *nopWriteCloser) Close() error { return nil }

115
vendor/github.com/peterbourgon/diskv/index.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package diskv
import (
"sync"
"github.com/google/btree"
)
// Index is a generic interface for things that can
// provide an ordered list of keys.
type Index interface {
Initialize(less LessFunction, keys <-chan string)
Insert(key string)
Delete(key string)
Keys(from string, n int) []string
}
// LessFunction is used to initialize an Index of keys in a specific order.
type LessFunction func(string, string) bool
// btreeString is a custom data type that satisfies the BTree Less interface,
// making the strings it wraps sortable by the BTree package.
type btreeString struct {
s string
l LessFunction
}
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
func (s btreeString) Less(i btree.Item) bool {
return s.l(s.s, i.(btreeString).s)
}
// BTreeIndex is an implementation of the Index interface using google/btree.
type BTreeIndex struct {
sync.RWMutex
LessFunction
*btree.BTree
}
// Initialize populates the BTree tree with data from the keys channel,
// according to the passed less function. It's destructive to the BTreeIndex.
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
i.Lock()
defer i.Unlock()
i.LessFunction = less
i.BTree = rebuild(less, keys)
}
// Insert inserts the given key (only) into the BTree tree.
func (i *BTreeIndex) Insert(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
}
// Delete removes the given key (only) from the BTree tree.
func (i *BTreeIndex) Delete(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
}
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
// first key in the returned slice will be the key that immediately follows the
// passed key, in key order.
func (i *BTreeIndex) Keys(from string, n int) []string {
i.RLock()
defer i.RUnlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
if i.BTree.Len() <= 0 {
return []string{}
}
btreeFrom := btreeString{s: from, l: i.LessFunction}
skipFirst := true
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
// no such key, so fabricate an always-smallest item
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
skipFirst = false
}
keys := []string{}
iterator := func(i btree.Item) bool {
keys = append(keys, i.(btreeString).s)
return len(keys) < n
}
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
if skipFirst && len(keys) > 0 {
keys = keys[1:]
}
return keys
}
// rebuildIndex does the work of regenerating the index
// with the given keys.
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
tree := btree.New(2)
for key := range keys {
tree.ReplaceOrInsert(btreeString{s: key, l: less})
}
return tree
}

View File

@ -1,20 +0,0 @@
# go/codec
This repository contains the `go-codec` library,
a High Performance and Feature-Rich Idiomatic encode/decode and rpc library for
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
For more information:
- [see the codec/Readme for quick usage information](https://github.com/ugorji/go/tree/master/codec#readme)
- [view the API on godoc](http://godoc.org/github.com/ugorji/go/codec)
- [read the detailed usage/how-to primer](http://ugorji.net/blog/go-codec-primer)
Install using:
go get github.com/ugorji/go/codec

View File

@ -1,199 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
/*
High Performance, Feature-Rich Idiomatic Go codec/encoding library for
binc, msgpack, cbor, json.
Supported Serialization formats are:
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
To install:
go get github.com/ugorji/go/codec
This package understands the 'unsafe' tag, to allow using unsafe semantics:
- When decoding into a struct, you need to read the field name as a string
so you can find the struct field it is mapped to.
Using `unsafe` will bypass the allocation and copying overhead of []byte->string conversion.
To install using unsafe, pass the 'unsafe' tag:
go get -tags=unsafe github.com/ugorji/go/codec
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
The idiomatic Go support is as seen in other encoding packages in
the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Multiple conversions:
Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags
- Support for omitting empty fields during an encoding
- Encoding from any value and decoding into pointer to any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Encode a struct as an array, and decode struct from an array in the data stream
- Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance.
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic.
- NIL in data stream decoded as zero value
- Never silently skip data when decoding.
User decides whether to return an error or silently skip data when keys or indexes
in the data stream do not map to fields in the struct.
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
Extension Support
Users can register a function to handle the encoding or decoding of
their custom types.
There are no restrictions on what the custom type can be. Some examples:
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
As an illustration, MyStructWithUnexportedFields would normally be
encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
with the standard net/rpc package.
Usage
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
The Encoder and Decoder are NOT safe for concurrent use.
Consequently, the usage model is basically:
- Create and initialize the Handle before any use.
Once created, DO NOT modify it.
- Multiple Encoders or Decoders can now use the Handle concurrently.
They only read information off the Handle (never write).
- However, each Encoder or Decoder MUST not be used concurrently
- To re-use an Encoder/Decoder, call Reset(...) on it first.
This allows you use state maintained on the Encoder/Decoder.
Sample usage model:
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
ch codec.CborHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
*/
package codec
// Benefits of go-codec:
//
// - encoding/json always reads whole file into memory first.
// This makes it unsuitable for parsing very large files.
// - encoding/xml cannot parse into a map[string]interface{}
// I found this out on reading https://github.com/clbanning/mxj
// TODO:
//
// - optimization for codecgen:
// if len of entity is <= 3 words, then support a value receiver for encode.
// - (En|De)coder should store an error when it occurs.
// Until reset, subsequent calls return that error that was stored.
// This means that free panics must go away.
// All errors must be raised through errorf method.
// - Decoding using a chan is good, but incurs concurrency costs.
// This is because there's no fast way to use a channel without it
// having to switch goroutines constantly.
// Callback pattern is still the best. Maybe consider supporting something like:
// type X struct {
// Name string
// Ys []Y
// Ys chan <- Y
// Ys func(Y) -> call this function for each entry
// }
// - Consider adding a isZeroer interface { isZero() bool }
// It is used within isEmpty, for omitEmpty support.
// - Consider making Handle used AS-IS within the encoding/decoding session.
// This means that we don't cache Handle information within the (En|De)coder,
// except we really need it at Reset(...)
// - Consider adding math/big support
// - Consider reducing the size of the generated functions:
// Maybe use one loop, and put the conditionals in the loop.
// for ... { if cLen > 0 { if j == cLen { break } } else if dd.CheckBreak() { break } }

View File

@ -1,148 +0,0 @@
# Codec
High Performance, Feature-Rich Idiomatic Go codec/encoding library for
binc, msgpack, cbor, json.
Supported Serialization formats are:
- msgpack: https://github.com/msgpack/msgpack
- binc: http://github.com/ugorji/binc
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
- json: http://json.org http://tools.ietf.org/html/rfc7159
- simple:
To install:
go get github.com/ugorji/go/codec
This package understands the `unsafe` tag, to allow using unsafe semantics:
- When decoding into a struct, you need to read the field name as a string
so you can find the struct field it is mapped to.
Using `unsafe` will bypass the allocation and copying overhead of `[]byte->string` conversion.
To use it, you must pass the `unsafe` tag during install:
```
go install -tags=unsafe github.com/ugorji/go/codec
```
Online documentation: http://godoc.org/github.com/ugorji/go/codec
Detailed Usage/How-to Primer: http://ugorji.net/blog/go-codec-primer
The idiomatic Go support is as seen in other encoding packages in
the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X.
- Multiple conversions:
Package coerces types where appropriate
e.g. decode an int in the stream into a float, etc.
- Corner Cases:
Overflows, nil maps/slices, nil values in streams are handled correctly
- Standard field renaming via tags
- Support for omitting empty fields during an encoding
- Encoding from any value and decoding into pointer to any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Extensions to support efficient encoding/decoding of any named types
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
- Decoding without a schema (into a interface{}).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Encode a struct as an array, and decode struct from an array in the data stream
- Comprehensive support for anonymous fields
- Fast (no-reflection) encoding/decoding of common maps and slices
- Code-generation for faster performance.
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
- Support indefinite-length formats to enable true streaming
(for formats which support it e.g. json, cbor)
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
This mostly applies to maps, where iteration order is non-deterministic.
- NIL in data stream decoded as zero value
- Never silently skip data when decoding.
User decides whether to return an error or silently skip data when keys or indexes
in the data stream do not map to fields in the struct.
- Encode/Decode from/to chan types (for iterative streaming support)
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Handle unique idiosyncrasies of codecs e.g.
- For messagepack, configure how ambiguities in handling raw bytes are resolved
- For messagepack, provide rpc server/client codec to support
msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
## Extension Support
Users can register a function to handle the encoding or decoding of
their custom types.
There are no restrictions on what the custom type can be. Some examples:
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
As an illustration, MyStructWithUnexportedFields would normally be
encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
## RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
with the standard net/rpc package.
## Usage
Typical usage model:
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
ch codec.CborHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)

View File

@ -1,929 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math"
"reflect"
"time"
)
const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
// vd as low 4 bits (there are 16 slots)
const (
bincVdSpecial byte = iota
bincVdPosInt
bincVdNegInt
bincVdFloat
bincVdString
bincVdByteArray
bincVdArray
bincVdMap
bincVdTimestamp
bincVdSmallInt
bincVdUnicodeOther
bincVdSymbol
bincVdDecimal
_ // open slot
_ // open slot
bincVdCustomExt = 0x0f
)
const (
bincSpNil byte = iota
bincSpFalse
bincSpTrue
bincSpNan
bincSpPosInf
bincSpNegInf
bincSpZeroFloat
bincSpZero
bincSpNegOne
)
const (
bincFlBin16 byte = iota
bincFlBin32
_ // bincFlBin32e
bincFlBin64
_ // bincFlBin64e
// others not currently supported
)
type bincEncDriver struct {
e *Encoder
w encWriter
m map[string]uint16 // symbols
b [scratchByteArrayLen]byte
s uint16 // symbols sequencer
encNoSeparator
}
func (e *bincEncDriver) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (e *bincEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {
if rt == timeTypId {
var bs []byte
switch x := v.(type) {
case time.Time:
bs = encodeTime(x)
case *time.Time:
bs = encodeTime(*x)
default:
e.e.errorf("binc error encoding builtin: expect time.Time, received %T", v)
}
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
e.w.writeb(bs)
}
}
func (e *bincEncDriver) EncodeNil() {
e.w.writen1(bincVdSpecial<<4 | bincSpNil)
}
func (e *bincEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
} else {
e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
}
}
func (e *bincEncDriver) EncodeFloat32(f float32) {
if f == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
return
}
e.w.writen1(bincVdFloat<<4 | bincFlBin32)
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *bincEncDriver) EncodeFloat64(f float64) {
if f == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
return
}
bigen.PutUint64(e.b[:8], math.Float64bits(f))
if bincDoPrune {
i := 7
for ; i >= 0 && (e.b[i] == 0); i-- {
}
i++
if i <= 6 {
e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
e.w.writen1(byte(i))
e.w.writeb(e.b[:i])
return
}
}
e.w.writen1(bincVdFloat<<4 | bincFlBin64)
e.w.writeb(e.b[:8])
}
func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
if lim == 4 {
bigen.PutUint32(e.b[:lim], uint32(v))
} else {
bigen.PutUint64(e.b[:lim], v)
}
if bincDoPrune {
i := pruneSignExt(e.b[:lim], pos)
e.w.writen1(bd | lim - 1 - byte(i))
e.w.writeb(e.b[i:lim])
} else {
e.w.writen1(bd | lim - 1)
e.w.writeb(e.b[:lim])
}
}
func (e *bincEncDriver) EncodeInt(v int64) {
const nbd byte = bincVdNegInt << 4
if v >= 0 {
e.encUint(bincVdPosInt<<4, true, uint64(v))
} else if v == -1 {
e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
} else {
e.encUint(bincVdNegInt<<4, false, uint64(-v))
}
}
func (e *bincEncDriver) EncodeUint(v uint64) {
e.encUint(bincVdPosInt<<4, true, v)
}
func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
if v == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZero)
} else if pos && v >= 1 && v <= 16 {
e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
} else if v <= math.MaxUint8 {
e.w.writen2(bd|0x0, byte(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd | 0x01)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.encIntegerPrune(bd, pos, v, 4)
} else {
e.encIntegerPrune(bd, pos, v, 8)
}
}
func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(rv)
if bs == nil {
e.EncodeNil()
return
}
e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs)
}
func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.w.writeb(re.Data)
}
func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
e.encLen(bincVdCustomExt<<4, uint64(length))
e.w.writen1(xtag)
}
func (e *bincEncDriver) EncodeArrayStart(length int) {
e.encLen(bincVdArray<<4, uint64(length))
}
func (e *bincEncDriver) EncodeMapStart(length int) {
e.encLen(bincVdMap<<4, uint64(length))
}
func (e *bincEncDriver) EncodeString(c charEncoding, v string) {
l := uint64(len(v))
e.encBytesLen(c, l)
if l > 0 {
e.w.writestr(v)
}
}
func (e *bincEncDriver) EncodeSymbol(v string) {
// if WriteSymbolsNoRefs {
// e.encodeString(c_UTF8, v)
// return
// }
//symbols only offer benefit when string length > 1.
//This is because strings with length 1 take only 2 bytes to store
//(bd with embedded length, and single byte for string val).
l := len(v)
if l == 0 {
e.encBytesLen(c_UTF8, 0)
return
} else if l == 1 {
e.encBytesLen(c_UTF8, 1)
e.w.writen1(v[0])
return
}
if e.m == nil {
e.m = make(map[string]uint16, 16)
}
ui, ok := e.m[v]
if ok {
if ui <= math.MaxUint8 {
e.w.writen2(bincVdSymbol<<4, byte(ui))
} else {
e.w.writen1(bincVdSymbol<<4 | 0x8)
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
}
} else {
e.s++
ui = e.s
//ui = uint16(atomic.AddUint32(&e.s, 1))
e.m[v] = ui
var lenprec uint8
if l <= math.MaxUint8 {
// lenprec = 0
} else if l <= math.MaxUint16 {
lenprec = 1
} else if int64(l) <= math.MaxUint32 {
lenprec = 2
} else {
lenprec = 3
}
if ui <= math.MaxUint8 {
e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
} else {
e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
bigenHelper{e.b[:2], e.w}.writeUint16(ui)
}
if lenprec == 0 {
e.w.writen1(byte(l))
} else if lenprec == 1 {
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l))
} else if lenprec == 2 {
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l))
} else {
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l))
}
e.w.writestr(v)
}
}
func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
l := uint64(len(v))
e.encBytesLen(c, l)
if l > 0 {
e.w.writeb(v)
}
}
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
//TODO: support bincUnicodeOther (for now, just use string or bytearray)
if c == c_RAW {
e.encLen(bincVdByteArray<<4, length)
} else {
e.encLen(bincVdString<<4, length)
}
}
func (e *bincEncDriver) encLen(bd byte, l uint64) {
if l < 12 {
e.w.writen1(bd | uint8(l+4))
} else {
e.encLenNumber(bd, l)
}
}
func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
if v <= math.MaxUint8 {
e.w.writen2(bd, byte(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd | 0x01)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd | 0x02)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
} else {
e.w.writen1(bd | 0x03)
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v))
}
}
//------------------------------------
type bincDecSymbol struct {
s string
b []byte
i uint16
}
type bincDecDriver struct {
d *Decoder
h *BincHandle
r decReader
br bool // bytes reader
bdRead bool
bd byte
vd byte
vs byte
noStreamingCodec
decNoSeparator
b [scratchByteArrayLen]byte
// linear searching on this slice is ok,
// because we typically expect < 32 symbols in each stream.
s []bincDecSymbol
}
func (d *bincDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.vd = d.bd >> 4
d.vs = d.bd & 0x0f
d.bdRead = true
}
func (d *bincDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *bincDecDriver) ContainerType() (vt valueType) {
if d.vd == bincVdSpecial && d.vs == bincSpNil {
return valueTypeNil
} else if d.vd == bincVdByteArray {
return valueTypeBytes
} else if d.vd == bincVdString {
return valueTypeString
} else if d.vd == bincVdArray {
return valueTypeArray
} else if d.vd == bincVdMap {
return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
}
return valueTypeUnset
}
func (d *bincDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
return true
}
return false
}
func (d *bincDecDriver) IsBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (d *bincDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {
if !d.bdRead {
d.readNextBd()
}
if rt == timeTypId {
if d.vd != bincVdTimestamp {
d.d.errorf("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
return
}
tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
var vt *time.Time = v.(*time.Time)
*vt = tt
d.bdRead = false
}
}
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
if vs&0x8 == 0 {
d.r.readb(d.b[0:defaultLen])
} else {
l := d.r.readn1()
if l > 8 {
d.d.errorf("At most 8 bytes used to represent float. Received: %v bytes", l)
return
}
for i := l; i < 8; i++ {
d.b[i] = 0
}
d.r.readb(d.b[0:l])
}
}
func (d *bincDecDriver) decFloat() (f float64) {
//if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; }
if x := d.vs & 0x7; x == bincFlBin32 {
d.decFloatPre(d.vs, 4)
f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
} else if x == bincFlBin64 {
d.decFloatPre(d.vs, 8)
f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
} else {
d.d.errorf("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
return
}
return
}
func (d *bincDecDriver) decUint() (v uint64) {
// need to inline the code (interface conversion and type assertion expensive)
switch d.vs {
case 0:
v = uint64(d.r.readn1())
case 1:
d.r.readb(d.b[6:8])
v = uint64(bigen.Uint16(d.b[6:8]))
case 2:
d.b[4] = 0
d.r.readb(d.b[5:8])
v = uint64(bigen.Uint32(d.b[4:8]))
case 3:
d.r.readb(d.b[4:8])
v = uint64(bigen.Uint32(d.b[4:8]))
case 4, 5, 6:
lim := int(7 - d.vs)
d.r.readb(d.b[lim:8])
for i := 0; i < lim; i++ {
d.b[i] = 0
}
v = uint64(bigen.Uint64(d.b[:8]))
case 7:
d.r.readb(d.b[:8])
v = uint64(bigen.Uint64(d.b[:8]))
default:
d.d.errorf("unsigned integers with greater than 64 bits of precision not supported")
return
}
return
}
func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) {
if !d.bdRead {
d.readNextBd()
}
vd, vs := d.vd, d.vs
if vd == bincVdPosInt {
ui = d.decUint()
} else if vd == bincVdNegInt {
ui = d.decUint()
neg = true
} else if vd == bincVdSmallInt {
ui = uint64(d.vs) + 1
} else if vd == bincVdSpecial {
if vs == bincSpZero {
//i = 0
} else if vs == bincSpNegOne {
neg = true
ui = 1
} else {
d.d.errorf("numeric decode fails for special value: d.vs: 0x%x", d.vs)
return
}
} else {
d.d.errorf("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
return
}
return
}
func (d *bincDecDriver) DecodeInt(bitsize uint8) (i int64) {
ui, neg := d.decCheckInteger()
i, overflow := chkOvf.SignedInt(ui)
if overflow {
d.d.errorf("simple: overflow converting %v to signed integer", ui)
return
}
if neg {
i = -i
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("binc: overflow integer: %v", i)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("Assigning negative signed value to unsigned type")
return
}
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("binc: overflow integer: %v", ui)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
vd, vs := d.vd, d.vs
if vd == bincVdSpecial {
d.bdRead = false
if vs == bincSpNan {
return math.NaN()
} else if vs == bincSpPosInf {
return math.Inf(1)
} else if vs == bincSpZeroFloat || vs == bincSpZero {
return
} else if vs == bincSpNegInf {
return math.Inf(-1)
} else {
d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
return
}
} else if vd == bincVdFloat {
f = d.decFloat()
} else {
f = float64(d.DecodeInt(64))
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("binc: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *bincDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) {
// b = false
} else if bd == (bincVdSpecial | bincSpTrue) {
b = true
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) ReadMapStart() (length int) {
if d.vd != bincVdMap {
d.d.errorf("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
return
}
length = d.decLen()
d.bdRead = false
return
}
func (d *bincDecDriver) ReadArrayStart() (length int) {
if d.vd != bincVdArray {
d.d.errorf("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
return
}
length = d.decLen()
d.bdRead = false
return
}
func (d *bincDecDriver) decLen() int {
if d.vs > 3 {
return int(d.vs - 4)
}
return int(d.decLenNumber())
}
func (d *bincDecDriver) decLenNumber() (v uint64) {
if x := d.vs; x == 0 {
v = uint64(d.r.readn1())
} else if x == 1 {
d.r.readb(d.b[6:8])
v = uint64(bigen.Uint16(d.b[6:8]))
} else if x == 2 {
d.r.readb(d.b[4:8])
v = uint64(bigen.Uint32(d.b[4:8]))
} else {
d.r.readb(d.b[:8])
v = bigen.Uint64(d.b[:8])
}
return
}
func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) (bs2 []byte, s string) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
return
}
var slen int = -1
// var ok bool
switch d.vd {
case bincVdString, bincVdByteArray:
slen = d.decLen()
if zerocopy {
if d.br {
bs2 = d.r.readx(slen)
} else if len(bs) == 0 {
bs2 = decByteSlice(d.r, slen, d.b[:])
} else {
bs2 = decByteSlice(d.r, slen, bs)
}
} else {
bs2 = decByteSlice(d.r, slen, bs)
}
if withString {
s = string(bs2)
}
case bincVdSymbol:
// zerocopy doesn't apply for symbols,
// as the values must be stored in a table for later use.
//
//from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
//extract symbol
//if containsStringVal, read it and put in map
//else look in map for string value
var symbol uint16
vs := d.vs
if vs&0x8 == 0 {
symbol = uint16(d.r.readn1())
} else {
symbol = uint16(bigen.Uint16(d.r.readx(2)))
}
if d.s == nil {
d.s = make([]bincDecSymbol, 0, 16)
}
if vs&0x4 == 0 {
for i := range d.s {
j := &d.s[i]
if j.i == symbol {
bs2 = j.b
if withString {
if j.s == "" && bs2 != nil {
j.s = string(bs2)
}
s = j.s
}
break
}
}
} else {
switch vs & 0x3 {
case 0:
slen = int(d.r.readn1())
case 1:
slen = int(bigen.Uint16(d.r.readx(2)))
case 2:
slen = int(bigen.Uint32(d.r.readx(4)))
case 3:
slen = int(bigen.Uint64(d.r.readx(8)))
}
// since using symbols, do not store any part of
// the parameter bs in the map, as it might be a shared buffer.
// bs2 = decByteSlice(d.r, slen, bs)
bs2 = decByteSlice(d.r, slen, nil)
if withString {
s = string(bs2)
}
d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2})
}
default:
d.d.errorf("Invalid d.vd. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeString() (s string) {
// DecodeBytes does not accommodate symbols, whose impl stores string version in map.
// Use decStringAndBytes directly.
// return string(d.DecodeBytes(d.b[:], true, true))
_, s = d.decStringAndBytes(d.b[:], true, true)
return
}
func (d *bincDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if isstring {
bsOut, _ = d.decStringAndBytes(bs, false, zerocopy)
return
}
if !d.bdRead {
d.readNextBd()
}
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
return nil
}
var clen int
if d.vd == bincVdString || d.vd == bincVdByteArray {
clen = d.decLen()
} else {
d.d.errorf("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
bincVdString, bincVdByteArray, d.vd)
return
}
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
return
}
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
realxtag = uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
} else {
ext.ReadExt(rv, xbs)
}
return
}
func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.vd == bincVdCustomExt {
l := d.decLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
xbs = d.r.readx(l)
} else if d.vd == bincVdByteArray {
xbs = d.DecodeBytes(nil, false, true)
} else {
d.d.errorf("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
return
}
d.bdRead = false
return
}
func (d *bincDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
n := &d.d.n
var decodeFurther bool
switch d.vd {
case bincVdSpecial:
switch d.vs {
case bincSpNil:
n.v = valueTypeNil
case bincSpFalse:
n.v = valueTypeBool
n.b = false
case bincSpTrue:
n.v = valueTypeBool
n.b = true
case bincSpNan:
n.v = valueTypeFloat
n.f = math.NaN()
case bincSpPosInf:
n.v = valueTypeFloat
n.f = math.Inf(1)
case bincSpNegInf:
n.v = valueTypeFloat
n.f = math.Inf(-1)
case bincSpZeroFloat:
n.v = valueTypeFloat
n.f = float64(0)
case bincSpZero:
n.v = valueTypeUint
n.u = uint64(0) // int8(0)
case bincSpNegOne:
n.v = valueTypeInt
n.i = int64(-1) // int8(-1)
default:
d.d.errorf("decodeNaked: Unrecognized special value 0x%x", d.vs)
}
case bincVdSmallInt:
n.v = valueTypeUint
n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
case bincVdPosInt:
n.v = valueTypeUint
n.u = d.decUint()
case bincVdNegInt:
n.v = valueTypeInt
n.i = -(int64(d.decUint()))
case bincVdFloat:
n.v = valueTypeFloat
n.f = d.decFloat()
case bincVdSymbol:
n.v = valueTypeSymbol
n.s = d.DecodeString()
case bincVdString:
n.v = valueTypeString
n.s = d.DecodeString()
case bincVdByteArray:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
case bincVdTimestamp:
n.v = valueTypeTimestamp
tt, err := decodeTime(d.r.readx(int(d.vs)))
if err != nil {
panic(err)
}
n.t = tt
case bincVdCustomExt:
n.v = valueTypeExt
l := d.decLen()
n.u = uint64(d.r.readn1())
n.l = d.r.readx(l)
case bincVdArray:
n.v = valueTypeArray
decodeFurther = true
case bincVdMap:
n.v = valueTypeMap
decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
}
if !decodeFurther {
d.bdRead = false
}
if n.v == valueTypeUint && d.h.SignedInteger {
n.v = valueTypeInt
n.i = int64(n.u)
}
return
}
//------------------------------------
//BincHandle is a Handle for the Binc Schema-Free Encoding Format
//defined at https://github.com/ugorji/binc .
//
//BincHandle currently supports all Binc features with the following EXCEPTIONS:
// - only integers up to 64 bits of precision are supported.
// big integers are unsupported.
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
// extended precision and decimal IEEE 754 floats are unsupported.
// - Only UTF-8 strings supported.
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
//
//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
type BincHandle struct {
BasicHandle
binaryEncodingType
}
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
}
func (h *BincHandle) newEncDriver(e *Encoder) encDriver {
return &bincEncDriver{e: e, w: e.w}
}
func (h *BincHandle) newDecDriver(d *Decoder) decDriver {
return &bincDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
func (e *bincEncDriver) reset() {
e.w = e.e.w
e.s = 0
e.m = nil
}
func (d *bincDecDriver) reset() {
d.r = d.d.r
d.s = nil
d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0
}
var _ decDriver = (*bincDecDriver)(nil)
var _ encDriver = (*bincEncDriver)(nil)

View File

@ -1,592 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math"
"reflect"
)
const (
cborMajorUint byte = iota
cborMajorNegInt
cborMajorBytes
cborMajorText
cborMajorArray
cborMajorMap
cborMajorTag
cborMajorOther
)
const (
cborBdFalse byte = 0xf4 + iota
cborBdTrue
cborBdNil
cborBdUndefined
cborBdExt
cborBdFloat16
cborBdFloat32
cborBdFloat64
)
const (
cborBdIndefiniteBytes byte = 0x5f
cborBdIndefiniteString = 0x7f
cborBdIndefiniteArray = 0x9f
cborBdIndefiniteMap = 0xbf
cborBdBreak = 0xff
)
const (
CborStreamBytes byte = 0x5f
CborStreamString = 0x7f
CborStreamArray = 0x9f
CborStreamMap = 0xbf
CborStreamBreak = 0xff
)
const (
cborBaseUint byte = 0x00
cborBaseNegInt = 0x20
cborBaseBytes = 0x40
cborBaseString = 0x60
cborBaseArray = 0x80
cborBaseMap = 0xa0
cborBaseTag = 0xc0
cborBaseSimple = 0xe0
)
// -------------------
type cborEncDriver struct {
noBuiltInTypes
encNoSeparator
e *Encoder
w encWriter
h *CborHandle
x [8]byte
}
func (e *cborEncDriver) EncodeNil() {
e.w.writen1(cborBdNil)
}
func (e *cborEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(cborBdTrue)
} else {
e.w.writen1(cborBdFalse)
}
}
func (e *cborEncDriver) EncodeFloat32(f float32) {
e.w.writen1(cborBdFloat32)
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *cborEncDriver) EncodeFloat64(f float64) {
e.w.writen1(cborBdFloat64)
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *cborEncDriver) encUint(v uint64, bd byte) {
if v <= 0x17 {
e.w.writen1(byte(v) + bd)
} else if v <= math.MaxUint8 {
e.w.writen2(bd+0x18, uint8(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd + 0x19)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd + 0x1a)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v))
} else { // if v <= math.MaxUint64 {
e.w.writen1(bd + 0x1b)
bigenHelper{e.x[:8], e.w}.writeUint64(v)
}
}
func (e *cborEncDriver) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-1-v), cborBaseNegInt)
} else {
e.encUint(uint64(v), cborBaseUint)
}
}
func (e *cborEncDriver) EncodeUint(v uint64) {
e.encUint(v, cborBaseUint)
}
func (e *cborEncDriver) encLen(bd byte, length int) {
e.encUint(uint64(length), bd)
}
func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) {
e.encUint(uint64(xtag), cborBaseTag)
if v := ext.ConvertExt(rv); v == nil {
e.EncodeNil()
} else {
en.encode(v)
}
}
func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) {
e.encUint(uint64(re.Tag), cborBaseTag)
if re.Data != nil {
en.encode(re.Data)
} else if re.Value == nil {
e.EncodeNil()
} else {
en.encode(re.Value)
}
}
func (e *cborEncDriver) EncodeArrayStart(length int) {
e.encLen(cborBaseArray, length)
}
func (e *cborEncDriver) EncodeMapStart(length int) {
e.encLen(cborBaseMap, length)
}
func (e *cborEncDriver) EncodeString(c charEncoding, v string) {
e.encLen(cborBaseString, len(v))
e.w.writestr(v)
}
func (e *cborEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
}
func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
if c == c_RAW {
e.encLen(cborBaseBytes, len(v))
} else {
e.encLen(cborBaseString, len(v))
}
e.w.writeb(v)
}
// ----------------------
type cborDecDriver struct {
d *Decoder
h *CborHandle
r decReader
b [scratchByteArrayLen]byte
br bool // bytes reader
bdRead bool
bd byte
noBuiltInTypes
decNoSeparator
}
func (d *cborDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
}
func (d *cborDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *cborDecDriver) ContainerType() (vt valueType) {
if d.bd == cborBdNil {
return valueTypeNil
} else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) {
return valueTypeBytes
} else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) {
return valueTypeString
} else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) {
return valueTypeArray
} else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) {
return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
}
return valueTypeUnset
}
func (d *cborDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
// treat Nil and Undefined as nil values
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return true
}
return false
}
func (d *cborDecDriver) CheckBreak() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdBreak {
d.bdRead = false
return true
}
return false
}
func (d *cborDecDriver) decUint() (ui uint64) {
v := d.bd & 0x1f
if v <= 0x17 {
ui = uint64(v)
} else {
if v == 0x18 {
ui = uint64(d.r.readn1())
} else if v == 0x19 {
ui = uint64(bigen.Uint16(d.r.readx(2)))
} else if v == 0x1a {
ui = uint64(bigen.Uint32(d.r.readx(4)))
} else if v == 0x1b {
ui = uint64(bigen.Uint64(d.r.readx(8)))
} else {
d.d.errorf("decUint: Invalid descriptor: %v", d.bd)
return
}
}
return
}
func (d *cborDecDriver) decCheckInteger() (neg bool) {
if !d.bdRead {
d.readNextBd()
}
major := d.bd >> 5
if major == cborMajorUint {
} else if major == cborMajorNegInt {
neg = true
} else {
d.d.errorf("invalid major: %v (bd: %v)", major, d.bd)
return
}
return
}
func (d *cborDecDriver) DecodeInt(bitsize uint8) (i int64) {
neg := d.decCheckInteger()
ui := d.decUint()
// check if this number can be converted to an int without overflow
var overflow bool
if neg {
if i, overflow = chkOvf.SignedInt(ui + 1); overflow {
d.d.errorf("cbor: overflow converting %v to signed integer", ui+1)
return
}
i = -i
} else {
if i, overflow = chkOvf.SignedInt(ui); overflow {
d.d.errorf("cbor: overflow converting %v to signed integer", ui)
return
}
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("cbor: overflow integer: %v", i)
return
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
if d.decCheckInteger() {
d.d.errorf("Assigning negative signed value to unsigned type")
return
}
ui = d.decUint()
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("cbor: overflow integer: %v", ui)
return
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == cborBdFloat16 {
f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2)))))
} else if bd == cborBdFloat32 {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if bd == cborBdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else if bd >= cborBaseUint && bd < cborBaseBytes {
f = float64(d.DecodeInt(64))
} else {
d.d.errorf("Float only valid from float16/32/64: Invalid descriptor: %v", bd)
return
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("cbor: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *cborDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if bd := d.bd; bd == cborBdTrue {
b = true
} else if bd == cborBdFalse {
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *cborDecDriver) ReadMapStart() (length int) {
d.bdRead = false
if d.bd == cborBdIndefiniteMap {
return -1
}
return d.decLen()
}
func (d *cborDecDriver) ReadArrayStart() (length int) {
d.bdRead = false
if d.bd == cborBdIndefiniteArray {
return -1
}
return d.decLen()
}
func (d *cborDecDriver) decLen() int {
return int(d.decUint())
}
func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte {
d.bdRead = false
for {
if d.CheckBreak() {
break
}
if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText {
d.d.errorf("cbor: expect bytes or string major type in indefinite string/bytes; got: %v, byte: %v", major, d.bd)
return nil
}
n := d.decLen()
oldLen := len(bs)
newLen := oldLen + n
if newLen > cap(bs) {
bs2 := make([]byte, newLen, 2*cap(bs)+n)
copy(bs2, bs)
bs = bs2
} else {
bs = bs[:newLen]
}
d.r.readb(bs[oldLen:newLen])
// bs = append(bs, d.r.readn()...)
d.bdRead = false
}
d.bdRead = false
return bs
}
func (d *cborDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == cborBdNil || d.bd == cborBdUndefined {
d.bdRead = false
return nil
}
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
if bs == nil {
return d.decAppendIndefiniteBytes(nil)
}
return d.decAppendIndefiniteBytes(bs[:0])
}
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *cborDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
}
func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if !d.bdRead {
d.readNextBd()
}
u := d.decUint()
d.bdRead = false
realxtag = u
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
d.d.decode(&re.Value)
} else if xtag != realxtag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
return
} else {
var v interface{}
d.d.decode(&v)
ext.UpdateExt(rv, v)
}
d.bdRead = false
return
}
func (d *cborDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
n := &d.d.n
var decodeFurther bool
switch d.bd {
case cborBdNil:
n.v = valueTypeNil
case cborBdFalse:
n.v = valueTypeBool
n.b = false
case cborBdTrue:
n.v = valueTypeBool
n.b = true
case cborBdFloat16, cborBdFloat32:
n.v = valueTypeFloat
n.f = d.DecodeFloat(true)
case cborBdFloat64:
n.v = valueTypeFloat
n.f = d.DecodeFloat(false)
case cborBdIndefiniteBytes:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
case cborBdIndefiniteString:
n.v = valueTypeString
n.s = d.DecodeString()
case cborBdIndefiniteArray:
n.v = valueTypeArray
decodeFurther = true
case cborBdIndefiniteMap:
n.v = valueTypeMap
decodeFurther = true
default:
switch {
case d.bd >= cborBaseUint && d.bd < cborBaseNegInt:
if d.h.SignedInteger {
n.v = valueTypeInt
n.i = d.DecodeInt(64)
} else {
n.v = valueTypeUint
n.u = d.DecodeUint(64)
}
case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes:
n.v = valueTypeInt
n.i = d.DecodeInt(64)
case d.bd >= cborBaseBytes && d.bd < cborBaseString:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
case d.bd >= cborBaseString && d.bd < cborBaseArray:
n.v = valueTypeString
n.s = d.DecodeString()
case d.bd >= cborBaseArray && d.bd < cborBaseMap:
n.v = valueTypeArray
decodeFurther = true
case d.bd >= cborBaseMap && d.bd < cborBaseTag:
n.v = valueTypeMap
decodeFurther = true
case d.bd >= cborBaseTag && d.bd < cborBaseSimple:
n.v = valueTypeExt
n.u = d.decUint()
n.l = nil
// d.bdRead = false
// d.d.decode(&re.Value) // handled by decode itself.
// decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
return
}
}
if !decodeFurther {
d.bdRead = false
}
return
}
// -------------------------
// CborHandle is a Handle for the CBOR encoding format,
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
//
// CBOR is comprehensively supported, including support for:
// - indefinite-length arrays/maps/bytes/strings
// - (extension) tags in range 0..0xffff (0 .. 65535)
// - half, single and double-precision floats
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
// - nil, true, false, ...
// - arrays and maps, bytes and text strings
//
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
// Users can implement them as needed (using SetExt), including spec-documented ones:
// - timestamp, BigNum, BigFloat, Decimals, Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
//
// To encode with indefinite lengths (streaming), users will use
// (Must)Encode methods of *Encoder, along with writing CborStreamXXX constants.
//
// For example, to encode "one-byte" as an indefinite length string:
// var buf bytes.Buffer
// e := NewEncoder(&buf, new(CborHandle))
// buf.WriteByte(CborStreamString)
// e.MustEncode("one-")
// e.MustEncode("byte")
// buf.WriteByte(CborStreamBreak)
// encodedBytes := buf.Bytes()
// var vv interface{}
// NewDecoderBytes(buf.Bytes(), new(CborHandle)).MustDecode(&vv)
// // Now, vv contains the same string "one-byte"
//
type CborHandle struct {
binaryEncodingType
BasicHandle
}
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
return h.SetExt(rt, tag, &setExtWrapper{i: ext})
}
func (h *CborHandle) newEncDriver(e *Encoder) encDriver {
return &cborEncDriver{e: e, w: e.w, h: h}
}
func (h *CborHandle) newDecDriver(d *Decoder) decDriver {
return &cborDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
func (e *cborEncDriver) reset() {
e.w = e.e.w
}
func (d *cborDecDriver) reset() {
d.r = d.d.r
d.bd, d.bdRead = 0, false
}
var _ decDriver = (*cborDecDriver)(nil)
var _ encDriver = (*cborEncDriver)(nil)

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5
package codec
import "reflect"
const reflectArrayOfSupported = true
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
rvn2 = reflect.New(reflect.ArrayOf(rvn.Len(), intfTyp)).Elem()
reflect.Copy(rvn2, rvn)
return
}

View File

@ -1,14 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build !go1.5
package codec
import "reflect"
const reflectArrayOfSupported = false
func reflectArrayOf(rvn reflect.Value) (rvn2 reflect.Value) {
panic("reflect.ArrayOf unsupported")
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,34 +0,0 @@
// +build notfastpath
package codec
import "reflect"
const fastpathEnabled = false
// The generated fast-path code is very large, and adds a few seconds to the build time.
// This causes test execution, execution of small tools which use codec, etc
// to take a long time.
//
// To mitigate, we now support the notfastpath tag.
// This tag disables fastpath during build, allowing for faster build, test execution,
// short-program runs, etc.
func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false }
func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
type fastpathT struct{}
type fastpathE struct {
rtid uintptr
rt reflect.Type
encfn func(*encFnInfo, reflect.Value)
decfn func(*decFnInfo, reflect.Value)
}
type fastpathA [0]fastpathE
func (x fastpathA) index(rtid uintptr) int { return -1 }
var fastpathAV fastpathA
var fastpathTV fastpathT

View File

@ -1,243 +0,0 @@
/* // +build ignore */
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
// ************************************************************
package codec
import (
"encoding"
"reflect"
)
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continuously and without notice.
//
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
//
// When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
return genHelperEncoder{e: e}, e.e
}
// GenHelperDecoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
return genHelperDecoder{d: d}, d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
e *Encoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
d *Decoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
f.e.encodeI(iv, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshal(bs, fnerr, false, c_UTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshal(bs, fnerr, true, c_UTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshal(bs, fnerr, false, c_RAW)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncRaw(iv Raw) {
f.e.raw(iv)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
if _, ok := f.e.hh.(*BincHandle); ok {
return timeTypId
}
return 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) HasExtensions() bool {
return len(f.e.h.extHandle) != 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
rt := reflect.TypeOf(v)
if rt.Kind() == reflect.Ptr {
rt = rt.Elem()
}
rtid := reflect.ValueOf(rt).Pointer()
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
return true
}
return false
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncSendContainerState(c containerState) {
if f.e.cr != nil {
f.e.cr.sendContainerState(c)
}
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() {
f.d.swallow()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:]
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback")
f.d.decodeI(iv, chkPtr, false, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeBytes(f.d.b[:], true, true)
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecRaw() []byte {
return f.d.raw()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
if _, ok := f.d.hh.(*BincHandle); ok {
return timeTypId
}
return 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) HasExtensions() bool {
return len(f.d.h.extHandle) != 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
rt := reflect.TypeOf(v).Elem()
rtid := reflect.ValueOf(rt).Pointer()
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
return true
}
return false
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSendContainerState(c containerState) {
if f.d.cr != nil {
f.d.cr.sendContainerState(c)
}
}

View File

@ -1,175 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl
const genDecMapTmpl = `
{{var "v"}} := *{{ .Varname }}
{{var "l"}} := r.ReadMapStart()
{{var "bh"}} := z.DecBasicHandle()
if {{var "v"}} == nil {
{{var "rl"}}, _ := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }})
{{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}})
*{{ .Varname }} = {{var "v"}}
}
var {{var "mk"}} {{ .KTyp }}
var {{var "mv"}} {{ .Typ }}
var {{var "mg"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool
if {{var "bh"}}.MapValueReset {
{{if decElemKindPtr}}{{var "mg"}} = true
{{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true }
{{else if not decElemKindImmutable}}{{var "mg"}} = true
{{end}} }
if {{var "l"}} > 0 {
for {{var "j"}} := 0; {{var "j"}} < {{var "l"}}; {{var "j"}}++ {
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}{{ end }}{{if decElemKindPtr}}
{{var "ms"}} = true{{end}}
if {{var "mg"}} {
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
if {{var "mok"}} {
{{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} else if {{var "l"}} < 0 {
for {{var "j"}} := 0; !r.CheckBreak(); {{var "j"}}++ {
z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }})
{{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }}
{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} {
{{var "mk"}} = string({{var "bv"}})
}{{ end }}{{if decElemKindPtr}}
{{var "ms"}} = true {{ end }}
if {{var "mg"}} {
{{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}]
if {{var "mok"}} {
{{var "ms"}} = false
} {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}}
} {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}}
z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }})
{{ $x := printf "%vmv%v" .TempVar .Rand }}{{ decLineVar $x }}
if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil {
{{var "v"}}[{{var "mk"}}] = {{var "mv"}}
}
}
} // else len==0: TODO: Should we clear map entries?
z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }})
`
const genDecListTmpl = `
{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }}
{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}}
var {{var "c"}} bool {{/* // changed */}}
_ = {{var "c"}}{{end}}
if {{var "l"}} == 0 {
{{if isSlice }}if {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
} else if len({{var "v"}}) != 0 {
{{var "v"}} = {{var "v"}}[:0]
{{var "c"}} = true
} {{end}} {{if isChan }}if {{var "v"}} == nil {
{{var "v"}} = make({{ .CTyp }}, 0)
{{var "c"}} = true
} {{end}}
} else if {{var "l"}} > 0 {
{{if isChan }}if {{var "v"}} == nil {
{{var "rl"}}, _ = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
{{var "v"}} = make({{ .CTyp }}, {{var "rl"}})
{{var "c"}} = true
}
for {{var "r"}} := 0; {{var "r"}} < {{var "l"}}; {{var "r"}}++ {
{{var "h"}}.ElemContainerState({{var "r"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
}
{{ else }} var {{var "rr"}}, {{var "rl"}} int {{/* // num2read, length of slice/array/chan */}}
var {{var "rt"}} bool {{/* truncated */}}
_, _ = {{var "rl"}}, {{var "rt"}}
{{var "rr"}} = {{var "l"}} // len({{var "v"}})
if {{var "l"}} > cap({{var "v"}}) {
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "l"}})
{{ else }}{{if not .Immutable }}
{{var "rg"}} := len({{var "v"}}) > 0
{{var "v2"}} := {{var "v"}} {{end}}
{{var "rl"}}, {{var "rt"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }})
if {{var "rt"}} {
if {{var "rl"}} <= cap({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "rl"}}]
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
} else {
{{var "v"}} = make([]{{ .Typ }}, {{var "rl"}})
}
{{var "c"}} = true
{{var "rr"}} = len({{var "v"}}) {{if not .Immutable }}
if {{var "rg"}} { copy({{var "v"}}, {{var "v2"}}) } {{end}} {{end}}{{/* end not Immutable, isArray */}}
} {{if isSlice }} else if {{var "l"}} != len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "l"}}]
{{var "c"}} = true
} {{end}} {{/* end isSlice:47 */}}
{{var "j"}} := 0
for ; {{var "j"}} < {{var "rr"}} ; {{var "j"}}++ {
{{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
{{if isArray }}for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
{{var "h"}}.ElemContainerState({{var "j"}})
z.DecSwallow()
}
{{ else }}if {{var "rt"}} {
for ; {{var "j"}} < {{var "l"}} ; {{var "j"}}++ {
{{var "v"}} = append({{var "v"}}, {{ zero}})
{{var "h"}}.ElemContainerState({{var "j"}})
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
}
} {{end}} {{/* end isArray:56 */}}
{{end}} {{/* end isChan:16 */}}
} else { {{/* len < 0 */}}
{{var "j"}} := 0
for ; !r.CheckBreak(); {{var "j"}}++ {
{{if isChan }}
{{var "h"}}.ElemContainerState({{var "j"}})
var {{var "t"}} {{ .Typ }}
{{ $x := printf "%st%s" .TempVar .Rand }}{{ decLineVar $x }}
{{var "v"}} <- {{var "t"}}
{{ else }}
if {{var "j"}} >= len({{var "v"}}) {
{{if isArray }}z.DecArrayCannotExpand(len({{var "v"}}), {{var "j"}}+1)
{{ else }}{{var "v"}} = append({{var "v"}}, {{zero}})// var {{var "z"}} {{ .Typ }}
{{var "c"}} = true {{end}}
}
{{var "h"}}.ElemContainerState({{var "j"}})
if {{var "j"}} < len({{var "v"}}) {
{{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }}
} else {
z.DecSwallow()
}
{{end}}
}
{{if isSlice }}if {{var "j"}} < len({{var "v"}}) {
{{var "v"}} = {{var "v"}}[:{{var "j"}}]
{{var "c"}} = true
} else if {{var "j"}} == 0 && {{var "v"}} == nil {
{{var "v"}} = []{{ .Typ }}{}
{{var "c"}} = true
}{{end}}
}
{{var "h"}}.End()
{{if not isArray }}if {{var "c"}} {
*{{ .Varname }} = {{var "v"}}
}{{end}}
`

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.5,!go1.6
package codec
import "os"
func init() {
genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
}

View File

@ -1,12 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.6
package codec
import "os"
func init() {
genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0"
}

View File

@ -1,10 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// +build go1.7
package codec
func init() {
genCheckVendor = true
}

File diff suppressed because it is too large Load Diff

View File

@ -1,242 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// All non-std package dependencies live in this file,
// so porting to different environment is easy (just update functions).
import (
"errors"
"fmt"
"math"
"reflect"
)
func panicValToErr(panicVal interface{}, err *error) {
if panicVal == nil {
return
}
// case nil
switch xerr := panicVal.(type) {
case error:
*err = xerr
case string:
*err = errors.New(xerr)
default:
*err = fmt.Errorf("%v", panicVal)
}
return
}
func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
if deref {
if v.IsNil() {
return true
}
return hIsEmptyValue(v.Elem(), deref, checkStruct)
} else {
return v.IsNil()
}
case reflect.Struct:
if !checkStruct {
return false
}
// return true if all fields are empty. else return false.
// we cannot use equality check, because some fields may be maps/slices/etc
// and consequently the structs are not comparable.
// return v.Interface() == reflect.Zero(v.Type()).Interface()
for i, n := 0, v.NumField(); i < n; i++ {
if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
return false
}
}
return true
}
return false
}
func isEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
return hIsEmptyValue(v, deref, checkStruct)
}
func pruneSignExt(v []byte, pos bool) (n int) {
if len(v) < 2 {
} else if pos && v[0] == 0 {
for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
}
} else if !pos && v[0] == 0xff {
for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
}
}
return
}
func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
if typ == nil {
return
}
rt := typ
// The type might be a pointer and we need to keep
// dereferencing to the base type until we find an implementation.
for {
if rt.Implements(iTyp) {
return true, indir
}
if p := rt; p.Kind() == reflect.Ptr {
indir++
if indir >= math.MaxInt8 { // insane number of indirections
return false, 0
}
rt = p.Elem()
continue
}
break
}
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
if typ.Kind() != reflect.Ptr {
// Not a pointer, but does the pointer work?
if reflect.PtrTo(typ).Implements(iTyp) {
return true, -1
}
}
return false, 0
}
// validate that this function is correct ...
// culled from OGRE (Object-Oriented Graphics Rendering Engine)
// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html)
func halfFloatToFloatBits(yy uint16) (d uint32) {
y := uint32(yy)
s := (y >> 15) & 0x01
e := (y >> 10) & 0x1f
m := y & 0x03ff
if e == 0 {
if m == 0 { // plu or minus 0
return s << 31
} else { // Denormalized number -- renormalize it
for (m & 0x00000400) == 0 {
m <<= 1
e -= 1
}
e += 1
const zz uint32 = 0x0400
m &= ^zz
}
} else if e == 31 {
if m == 0 { // Inf
return (s << 31) | 0x7f800000
} else { // NaN
return (s << 31) | 0x7f800000 | (m << 13)
}
}
e = e + (127 - 15)
m = m << 13
return (s << 31) | (e << 23) | m
}
// GrowCap will return a new capacity for a slice, given the following:
// - oldCap: current capacity
// - unit: in-memory size of an element
// - num: number of elements to add
func growCap(oldCap, unit, num int) (newCap int) {
// appendslice logic (if cap < 1024, *2, else *1.25):
// leads to many copy calls, especially when copying bytes.
// bytes.Buffer model (2*cap + n): much better for bytes.
// smarter way is to take the byte-size of the appended element(type) into account
// maintain 3 thresholds:
// t1: if cap <= t1, newcap = 2x
// t2: if cap <= t2, newcap = 1.75x
// t3: if cap <= t3, newcap = 1.5x
// else newcap = 1.25x
//
// t1, t2, t3 >= 1024 always.
// i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same)
//
// With this, appending for bytes increase by:
// 100% up to 4K
// 75% up to 8K
// 50% up to 16K
// 25% beyond that
// unit can be 0 e.g. for struct{}{}; handle that appropriately
var t1, t2, t3 int // thresholds
if unit <= 1 {
t1, t2, t3 = 4*1024, 8*1024, 16*1024
} else if unit < 16 {
t3 = 16 / unit * 1024
t1 = t3 * 1 / 4
t2 = t3 * 2 / 4
} else {
t1, t2, t3 = 1024, 1024, 1024
}
var x int // temporary variable
// x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively
if oldCap <= t1 { // [0,t1]
x = 8
} else if oldCap > t3 { // (t3,infinity]
x = 5
} else if oldCap <= t2 { // (t1,t2]
x = 7
} else { // (t2,t3]
x = 6
}
newCap = x * oldCap / 4
if num > 0 {
newCap += num
}
// ensure newCap is a multiple of 64 (if it is > 64) or 16.
if newCap > 64 {
if x = newCap % 64; x != 0 {
x = newCap / 64
newCap = 64 * (x + 1)
}
} else {
if x = newCap % 16; x != 0 {
x = newCap / 16
newCap = 16 * (x + 1)
}
}
return
}
func expandSliceValue(s reflect.Value, num int) reflect.Value {
if num <= 0 {
return s
}
l0 := s.Len()
l1 := l0 + num // new slice length
if l1 < l0 {
panic("ExpandSlice: slice overflow")
}
c0 := s.Cap()
if l1 <= c0 {
return s.Slice(0, l1)
}
st := s.Type()
c1 := growCap(c0, int(st.Elem().Size()), num)
s2 := reflect.MakeSlice(st, l1, c1)
// println("expandslicevalue: cap-old: ", c0, ", cap-new: ", c1, ", len-new: ", l1)
reflect.Copy(s2, s)
return s2
}

View File

@ -1,20 +0,0 @@
// +build !unsafe
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func stringView(v []byte) string {
return string(v)
}
// bytesView returns a view of the string as a []byte.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func bytesView(v string) []byte {
return []byte(v)
}

View File

@ -1,49 +0,0 @@
// +build unsafe
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"unsafe"
)
// This file has unsafe variants of some helper methods.
type unsafeString struct {
Data uintptr
Len int
}
type unsafeSlice struct {
Data uintptr
Len int
Cap int
}
// stringView returns a view of the []byte as a string.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func stringView(v []byte) string {
if len(v) == 0 {
return ""
}
bx := (*unsafeSlice)(unsafe.Pointer(&v))
sx := unsafeString{bx.Data, bx.Len}
return *(*string)(unsafe.Pointer(&sx))
}
// bytesView returns a view of the string as a []byte.
// In unsafe mode, it doesn't incur allocation and copying caused by conversion.
// In regular safe mode, it is an allocation and copy.
func bytesView(v string) []byte {
if len(v) == 0 {
return zeroByteSlice
}
sx := (*unsafeString)(unsafe.Pointer(&v))
bx := unsafeSlice{sx.Data, sx.Len, sx.Len}
return *(*[]byte)(unsafe.Pointer(&bx))
}

File diff suppressed because it is too large Load Diff

View File

@ -1,852 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
/*
MSGPACK
Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
We need to maintain compatibility with it and how it encodes integer values
without caring about the type.
For compatibility with behaviour of msgpack-c reference implementation:
- Go intX (>0) and uintX
IS ENCODED AS
msgpack +ve fixnum, unsigned
- Go intX (<0)
IS ENCODED AS
msgpack -ve fixnum, signed
*/
package codec
import (
"fmt"
"io"
"math"
"net/rpc"
"reflect"
)
const (
mpPosFixNumMin byte = 0x00
mpPosFixNumMax = 0x7f
mpFixMapMin = 0x80
mpFixMapMax = 0x8f
mpFixArrayMin = 0x90
mpFixArrayMax = 0x9f
mpFixStrMin = 0xa0
mpFixStrMax = 0xbf
mpNil = 0xc0
_ = 0xc1
mpFalse = 0xc2
mpTrue = 0xc3
mpFloat = 0xca
mpDouble = 0xcb
mpUint8 = 0xcc
mpUint16 = 0xcd
mpUint32 = 0xce
mpUint64 = 0xcf
mpInt8 = 0xd0
mpInt16 = 0xd1
mpInt32 = 0xd2
mpInt64 = 0xd3
// extensions below
mpBin8 = 0xc4
mpBin16 = 0xc5
mpBin32 = 0xc6
mpExt8 = 0xc7
mpExt16 = 0xc8
mpExt32 = 0xc9
mpFixExt1 = 0xd4
mpFixExt2 = 0xd5
mpFixExt4 = 0xd6
mpFixExt8 = 0xd7
mpFixExt16 = 0xd8
mpStr8 = 0xd9 // new
mpStr16 = 0xda
mpStr32 = 0xdb
mpArray16 = 0xdc
mpArray32 = 0xdd
mpMap16 = 0xde
mpMap32 = 0xdf
mpNegFixNumMin = 0xe0
mpNegFixNumMax = 0xff
)
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
// that the backend RPC service takes multiple arguments, which have been arranged
// in sequence in the slice.
//
// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
// array of 1 element).
type MsgpackSpecRpcMultiArgs []interface{}
// A MsgpackContainer type specifies the different types of msgpackContainers.
type msgpackContainerType struct {
fixCutoff int
bFixMin, b8, b16, b32 byte
hasFixMin, has8, has8Always bool
}
var (
msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
)
//---------------------------------------------
type msgpackEncDriver struct {
noBuiltInTypes
encNoSeparator
e *Encoder
w encWriter
h *MsgpackHandle
x [8]byte
}
func (e *msgpackEncDriver) EncodeNil() {
e.w.writen1(mpNil)
}
func (e *msgpackEncDriver) EncodeInt(i int64) {
if i >= 0 {
e.EncodeUint(uint64(i))
} else if i >= -32 {
e.w.writen1(byte(i))
} else if i >= math.MinInt8 {
e.w.writen2(mpInt8, byte(i))
} else if i >= math.MinInt16 {
e.w.writen1(mpInt16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
} else if i >= math.MinInt32 {
e.w.writen1(mpInt32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
} else {
e.w.writen1(mpInt64)
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
}
}
func (e *msgpackEncDriver) EncodeUint(i uint64) {
if i <= math.MaxInt8 {
e.w.writen1(byte(i))
} else if i <= math.MaxUint8 {
e.w.writen2(mpUint8, byte(i))
} else if i <= math.MaxUint16 {
e.w.writen1(mpUint16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i))
} else if i <= math.MaxUint32 {
e.w.writen1(mpUint32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i))
} else {
e.w.writen1(mpUint64)
bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i))
}
}
func (e *msgpackEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(mpTrue)
} else {
e.w.writen1(mpFalse)
}
}
func (e *msgpackEncDriver) EncodeFloat32(f float32) {
e.w.writen1(mpFloat)
bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *msgpackEncDriver) EncodeFloat64(f float64) {
e.w.writen1(mpDouble)
bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(v)
if bs == nil {
e.EncodeNil()
return
}
if e.h.WriteExt {
e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs)
} else {
e.EncodeStringBytes(c_RAW, bs)
}
}
func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.w.writeb(re.Data)
}
func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
if l == 1 {
e.w.writen2(mpFixExt1, xtag)
} else if l == 2 {
e.w.writen2(mpFixExt2, xtag)
} else if l == 4 {
e.w.writen2(mpFixExt4, xtag)
} else if l == 8 {
e.w.writen2(mpFixExt8, xtag)
} else if l == 16 {
e.w.writen2(mpFixExt16, xtag)
} else if l < 256 {
e.w.writen2(mpExt8, byte(l))
e.w.writen1(xtag)
} else if l < 65536 {
e.w.writen1(mpExt16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
e.w.writen1(xtag)
} else {
e.w.writen1(mpExt32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
e.w.writen1(xtag)
}
}
func (e *msgpackEncDriver) EncodeArrayStart(length int) {
e.writeContainerLen(msgpackContainerList, length)
}
func (e *msgpackEncDriver) EncodeMapStart(length int) {
e.writeContainerLen(msgpackContainerMap, length)
}
func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) {
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(s))
} else {
e.writeContainerLen(msgpackContainerStr, len(s))
}
if len(s) > 0 {
e.w.writestr(s)
}
}
func (e *msgpackEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
}
func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) {
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(bs))
} else {
e.writeContainerLen(msgpackContainerStr, len(bs))
}
if len(bs) > 0 {
e.w.writeb(bs)
}
}
func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
if ct.hasFixMin && l < ct.fixCutoff {
e.w.writen1(ct.bFixMin | byte(l))
} else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) {
e.w.writen2(ct.b8, uint8(l))
} else if l < 65536 {
e.w.writen1(ct.b16)
bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l))
} else {
e.w.writen1(ct.b32)
bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l))
}
}
//---------------------------------------------
type msgpackDecDriver struct {
d *Decoder
r decReader // *Decoder decReader decReaderT
h *MsgpackHandle
b [scratchByteArrayLen]byte
bd byte
bdRead bool
br bool // bytes reader
noBuiltInTypes
noStreamingCodec
decNoSeparator
}
// Note: This returns either a primitive (int, bool, etc) for non-containers,
// or a containerType, or a specific type denoting nil or extension.
// It is called when a nil interface{} is passed, leaving it up to the DecDriver
// to introspect the stream and decide how best to decode.
// It deciphers the value by looking at the stream first.
func (d *msgpackDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
bd := d.bd
n := &d.d.n
var decodeFurther bool
switch bd {
case mpNil:
n.v = valueTypeNil
d.bdRead = false
case mpFalse:
n.v = valueTypeBool
n.b = false
case mpTrue:
n.v = valueTypeBool
n.b = true
case mpFloat:
n.v = valueTypeFloat
n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
case mpDouble:
n.v = valueTypeFloat
n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
case mpUint8:
n.v = valueTypeUint
n.u = uint64(d.r.readn1())
case mpUint16:
n.v = valueTypeUint
n.u = uint64(bigen.Uint16(d.r.readx(2)))
case mpUint32:
n.v = valueTypeUint
n.u = uint64(bigen.Uint32(d.r.readx(4)))
case mpUint64:
n.v = valueTypeUint
n.u = uint64(bigen.Uint64(d.r.readx(8)))
case mpInt8:
n.v = valueTypeInt
n.i = int64(int8(d.r.readn1()))
case mpInt16:
n.v = valueTypeInt
n.i = int64(int16(bigen.Uint16(d.r.readx(2))))
case mpInt32:
n.v = valueTypeInt
n.i = int64(int32(bigen.Uint32(d.r.readx(4))))
case mpInt64:
n.v = valueTypeInt
n.i = int64(int64(bigen.Uint64(d.r.readx(8))))
default:
switch {
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
// positive fixnum (always signed)
n.v = valueTypeInt
n.i = int64(int8(bd))
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
// negative fixnum
n.v = valueTypeInt
n.i = int64(int8(bd))
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
if d.h.RawToString {
n.v = valueTypeString
n.s = d.DecodeString()
} else {
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
}
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
n.v = valueTypeArray
decodeFurther = true
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
n.v = valueTypeMap
decodeFurther = true
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
n.v = valueTypeExt
clen := d.readExtLen()
n.u = uint64(d.r.readn1())
n.l = d.r.readx(clen)
default:
d.d.errorf("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
}
}
if !decodeFurther {
d.bdRead = false
}
if n.v == valueTypeUint && d.h.SignedInteger {
n.v = valueTypeInt
n.i = int64(n.u)
}
return
}
// int can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) DecodeInt(bitsize uint8) (i int64) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case mpUint8:
i = int64(uint64(d.r.readn1()))
case mpUint16:
i = int64(uint64(bigen.Uint16(d.r.readx(2))))
case mpUint32:
i = int64(uint64(bigen.Uint32(d.r.readx(4))))
case mpUint64:
i = int64(bigen.Uint64(d.r.readx(8)))
case mpInt8:
i = int64(int8(d.r.readn1()))
case mpInt16:
i = int64(int16(bigen.Uint16(d.r.readx(2))))
case mpInt32:
i = int64(int32(bigen.Uint32(d.r.readx(4))))
case mpInt64:
i = int64(bigen.Uint64(d.r.readx(8)))
default:
switch {
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
i = int64(int8(d.bd))
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
i = int64(int8(d.bd))
default:
d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
return
}
}
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
d.d.errorf("Overflow int value: %v", i)
return
}
}
d.bdRead = false
return
}
// uint can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case mpUint8:
ui = uint64(d.r.readn1())
case mpUint16:
ui = uint64(bigen.Uint16(d.r.readx(2)))
case mpUint32:
ui = uint64(bigen.Uint32(d.r.readx(4)))
case mpUint64:
ui = bigen.Uint64(d.r.readx(8))
case mpInt8:
if i := int64(int8(d.r.readn1())); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt16:
if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt32:
if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
case mpInt64:
if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 {
ui = uint64(i)
} else {
d.d.errorf("Assigning negative signed value: %v, to unsigned type", i)
return
}
default:
switch {
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
ui = uint64(d.bd)
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
d.d.errorf("Assigning negative signed value: %v, to unsigned type", int(d.bd))
return
default:
d.d.errorf("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
return
}
}
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
d.d.errorf("Overflow uint value: %v", ui)
return
}
}
d.bdRead = false
return
}
// float can either be decoded from msgpack type: float, double or intX
func (d *msgpackDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpFloat {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if d.bd == mpDouble {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else {
f = float64(d.DecodeInt(0))
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("msgpack: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool, fixnum 0 or 1.
func (d *msgpackDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpFalse || d.bd == 0 {
// b = false
} else if d.bd == mpTrue || d.bd == 1 {
b = true
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *msgpackDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
var clen int
// ignore isstring. Expect that the bytes may be found from msgpackContainerStr or msgpackContainerBin
if bd := d.bd; bd == mpBin8 || bd == mpBin16 || bd == mpBin32 {
clen = d.readContainerLen(msgpackContainerBin)
} else {
clen = d.readContainerLen(msgpackContainerStr)
}
// println("DecodeBytes: clen: ", clen)
d.bdRead = false
// bytes may be nil, so handle it. if nil, clen=-1.
if clen < 0 {
return nil
}
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *msgpackDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
}
func (d *msgpackDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
}
func (d *msgpackDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *msgpackDecDriver) ContainerType() (vt valueType) {
bd := d.bd
if bd == mpNil {
return valueTypeNil
} else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 ||
(!d.h.RawToString &&
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) {
return valueTypeBytes
} else if d.h.RawToString &&
(bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) {
return valueTypeString
} else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) {
return valueTypeArray
} else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) {
return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
}
return valueTypeUnset
}
func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == mpNil {
d.bdRead = false
v = true
}
return
}
func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
bd := d.bd
if bd == mpNil {
clen = -1 // to represent nil
} else if bd == ct.b8 {
clen = int(d.r.readn1())
} else if bd == ct.b16 {
clen = int(bigen.Uint16(d.r.readx(2)))
} else if bd == ct.b32 {
clen = int(bigen.Uint32(d.r.readx(4)))
} else if (ct.bFixMin & bd) == ct.bFixMin {
clen = int(ct.bFixMin ^ bd)
} else {
d.d.errorf("readContainerLen: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd)
return
}
d.bdRead = false
return
}
func (d *msgpackDecDriver) ReadMapStart() int {
return d.readContainerLen(msgpackContainerMap)
}
func (d *msgpackDecDriver) ReadArrayStart() int {
return d.readContainerLen(msgpackContainerList)
}
func (d *msgpackDecDriver) readExtLen() (clen int) {
switch d.bd {
case mpNil:
clen = -1 // to represent nil
case mpFixExt1:
clen = 1
case mpFixExt2:
clen = 2
case mpFixExt4:
clen = 4
case mpFixExt8:
clen = 8
case mpFixExt16:
clen = 16
case mpExt8:
clen = int(d.r.readn1())
case mpExt16:
clen = int(bigen.Uint16(d.r.readx(2)))
case mpExt32:
clen = int(bigen.Uint32(d.r.readx(4)))
default:
d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd)
return
}
return
}
func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
return
}
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
realxtag = uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
} else {
ext.ReadExt(rv, xbs)
}
return
}
func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
if !d.bdRead {
d.readNextBd()
}
xbd := d.bd
if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 {
xbs = d.DecodeBytes(nil, false, true)
} else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 ||
(xbd >= mpFixStrMin && xbd <= mpFixStrMax) {
xbs = d.DecodeBytes(nil, true, true)
} else {
clen := d.readExtLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
xbs = d.r.readx(clen)
}
d.bdRead = false
return
}
//--------------------------------------------------
//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
type MsgpackHandle struct {
BasicHandle
// RawToString controls how raw bytes are decoded into a nil interface{}.
RawToString bool
// WriteExt flag supports encoding configured extensions with extension tags.
// It also controls whether other elements of the new spec are encoded (ie Str8).
//
// With WriteExt=false, configured extensions are serialized as raw bytes
// and Str8 is not encoded.
//
// A stream can still be decoded into a typed value, provided an appropriate value
// is provided, but the type cannot be inferred from the stream. If no appropriate
// type is provided (e.g. decoding into a nil interface{}), you get back
// a []byte or string based on the setting of RawToString.
WriteExt bool
binaryEncodingType
}
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
}
func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver {
return &msgpackEncDriver{e: e, w: e.w, h: h}
}
func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver {
return &msgpackDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
func (e *msgpackEncDriver) reset() {
e.w = e.e.w
}
func (d *msgpackDecDriver) reset() {
d.r = d.d.r
d.bd, d.bdRead = 0, false
}
//--------------------------------------------------
type msgpackSpecRpcCodec struct {
rpcCodec
}
// /////////////// Spec RPC Codec ///////////////////
func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// WriteRequest can write to both a Go service, and other services that do
// not abide by the 1 argument rule of a Go service.
// We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
var bodyArr []interface{}
if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
bodyArr = ([]interface{})(m)
} else {
bodyArr = []interface{}{body}
}
r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
return c.write(r2, nil, false, true)
}
func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
var moe interface{}
if r.Error != "" {
moe = r.Error
}
if moe != nil && body != nil {
body = nil
}
r2 := []interface{}{1, uint32(r.Seq), moe, body}
return c.write(r2, nil, false, true)
}
func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.parseCustomHeader(1, &r.Seq, &r.Error)
}
func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
}
func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
if body == nil { // read and discard
return c.read(nil)
}
bodyArr := []interface{}{body}
return c.read(&bodyArr)
}
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
if c.isClosed() {
return io.EOF
}
// We read the response header by hand
// so that the body can be decoded on its own from the stream at a later time.
const fia byte = 0x94 //four item array descriptor value
// Not sure why the panic of EOF is swallowed above.
// if bs1 := c.dec.r.readn1(); bs1 != fia {
// err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
// return
// }
var b byte
b, err = c.br.ReadByte()
if err != nil {
return
}
if b != fia {
err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
return
}
if err = c.read(&b); err != nil {
return
}
if b != expectTypeByte {
err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
return
}
if err = c.read(msgid); err != nil {
return
}
if err = c.read(methodOrError); err != nil {
return
}
return
}
//--------------------------------------------------
// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
type msgpackSpecRpc struct{}
// MsgpackSpecRpc implements Rpc using the communication protocol defined in
// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
var MsgpackSpecRpc msgpackSpecRpc
func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}
func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}
var _ decDriver = (*msgpackDecDriver)(nil)
var _ encDriver = (*msgpackEncDriver)(nil)

View File

@ -1,213 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math/rand"
"time"
)
// NoopHandle returns a no-op handle. It basically does nothing.
// It is only useful for benchmarking, as it gives an idea of the
// overhead from the codec framework.
//
// LIBRARY USERS: *** DO NOT USE ***
func NoopHandle(slen int) *noopHandle {
h := noopHandle{}
h.rand = rand.New(rand.NewSource(time.Now().UnixNano()))
h.B = make([][]byte, slen)
h.S = make([]string, slen)
for i := 0; i < len(h.S); i++ {
b := make([]byte, i+1)
for j := 0; j < len(b); j++ {
b[j] = 'a' + byte(i)
}
h.B[i] = b
h.S[i] = string(b)
}
return &h
}
// noopHandle does nothing.
// It is used to simulate the overhead of the codec framework.
type noopHandle struct {
BasicHandle
binaryEncodingType
noopDrv // noopDrv is unexported here, so we can get a copy of it when needed.
}
type noopDrv struct {
d *Decoder
e *Encoder
i int
S []string
B [][]byte
mks []bool // stack. if map (true), else if array (false)
mk bool // top of stack. what container are we on? map or array?
ct valueType // last response for IsContainerType.
cb int // counter for ContainerType
rand *rand.Rand
}
func (h *noopDrv) r(v int) int { return h.rand.Intn(v) }
func (h *noopDrv) m(v int) int { h.i++; return h.i % v }
func (h *noopDrv) newEncDriver(e *Encoder) encDriver { h.e = e; return h }
func (h *noopDrv) newDecDriver(d *Decoder) decDriver { h.d = d; return h }
func (h *noopDrv) reset() {}
func (h *noopDrv) uncacheRead() {}
// --- encDriver
// stack functions (for map and array)
func (h *noopDrv) start(b bool) {
// println("start", len(h.mks)+1)
h.mks = append(h.mks, b)
h.mk = b
}
func (h *noopDrv) end() {
// println("end: ", len(h.mks)-1)
h.mks = h.mks[:len(h.mks)-1]
if len(h.mks) > 0 {
h.mk = h.mks[len(h.mks)-1]
} else {
h.mk = false
}
}
func (h *noopDrv) EncodeBuiltin(rt uintptr, v interface{}) {}
func (h *noopDrv) EncodeNil() {}
func (h *noopDrv) EncodeInt(i int64) {}
func (h *noopDrv) EncodeUint(i uint64) {}
func (h *noopDrv) EncodeBool(b bool) {}
func (h *noopDrv) EncodeFloat32(f float32) {}
func (h *noopDrv) EncodeFloat64(f float64) {}
func (h *noopDrv) EncodeRawExt(re *RawExt, e *Encoder) {}
func (h *noopDrv) EncodeArrayStart(length int) { h.start(true) }
func (h *noopDrv) EncodeMapStart(length int) { h.start(false) }
func (h *noopDrv) EncodeEnd() { h.end() }
func (h *noopDrv) EncodeString(c charEncoding, v string) {}
func (h *noopDrv) EncodeSymbol(v string) {}
func (h *noopDrv) EncodeStringBytes(c charEncoding, v []byte) {}
func (h *noopDrv) EncodeExt(rv interface{}, xtag uint64, ext Ext, e *Encoder) {}
// ---- decDriver
func (h *noopDrv) initReadNext() {}
func (h *noopDrv) CheckBreak() bool { return false }
func (h *noopDrv) IsBuiltinType(rt uintptr) bool { return false }
func (h *noopDrv) DecodeBuiltin(rt uintptr, v interface{}) {}
func (h *noopDrv) DecodeInt(bitsize uint8) (i int64) { return int64(h.m(15)) }
func (h *noopDrv) DecodeUint(bitsize uint8) (ui uint64) { return uint64(h.m(35)) }
func (h *noopDrv) DecodeFloat(chkOverflow32 bool) (f float64) { return float64(h.m(95)) }
func (h *noopDrv) DecodeBool() (b bool) { return h.m(2) == 0 }
func (h *noopDrv) DecodeString() (s string) { return h.S[h.m(8)] }
// func (h *noopDrv) DecodeStringAsBytes(bs []byte) []byte { return h.DecodeBytes(bs) }
func (h *noopDrv) DecodeBytes(bs []byte, isstring, zerocopy bool) []byte { return h.B[h.m(len(h.B))] }
func (h *noopDrv) ReadEnd() { h.end() }
// toggle map/slice
func (h *noopDrv) ReadMapStart() int { h.start(true); return h.m(10) }
func (h *noopDrv) ReadArrayStart() int { h.start(false); return h.m(10) }
func (h *noopDrv) ContainerType() (vt valueType) {
// return h.m(2) == 0
// handle kStruct, which will bomb is it calls this and doesn't get back a map or array.
// consequently, if the return value is not map or array, reset it to one of them based on h.m(7) % 2
// for kstruct: at least one out of every 2 times, return one of valueTypeMap or Array (else kstruct bombs)
// however, every 10th time it is called, we just return something else.
var vals = [...]valueType{valueTypeArray, valueTypeMap}
// ------------ TAKE ------------
// if h.cb%2 == 0 {
// if h.ct == valueTypeMap || h.ct == valueTypeArray {
// } else {
// h.ct = vals[h.m(2)]
// }
// } else if h.cb%5 == 0 {
// h.ct = valueType(h.m(8))
// } else {
// h.ct = vals[h.m(2)]
// }
// ------------ TAKE ------------
// if h.cb%16 == 0 {
// h.ct = valueType(h.cb % 8)
// } else {
// h.ct = vals[h.cb%2]
// }
h.ct = vals[h.cb%2]
h.cb++
return h.ct
// if h.ct == valueTypeNil || h.ct == valueTypeString || h.ct == valueTypeBytes {
// return h.ct
// }
// return valueTypeUnset
// TODO: may need to tweak this so it works.
// if h.ct == valueTypeMap && vt == valueTypeArray || h.ct == valueTypeArray && vt == valueTypeMap {
// h.cb = !h.cb
// h.ct = vt
// return h.cb
// }
// // go in a loop and check it.
// h.ct = vt
// h.cb = h.m(7) == 0
// return h.cb
}
func (h *noopDrv) TryDecodeAsNil() bool {
if h.mk {
return false
} else {
return h.m(8) == 0
}
}
func (h *noopDrv) DecodeExt(rv interface{}, xtag uint64, ext Ext) uint64 {
return 0
}
func (h *noopDrv) DecodeNaked() {
// use h.r (random) not h.m() because h.m() could cause the same value to be given.
var sk int
if h.mk {
// if mapkey, do not support values of nil OR bytes, array, map or rawext
sk = h.r(7) + 1
} else {
sk = h.r(12)
}
n := &h.d.n
switch sk {
case 0:
n.v = valueTypeNil
case 1:
n.v, n.b = valueTypeBool, false
case 2:
n.v, n.b = valueTypeBool, true
case 3:
n.v, n.i = valueTypeInt, h.DecodeInt(64)
case 4:
n.v, n.u = valueTypeUint, h.DecodeUint(64)
case 5:
n.v, n.f = valueTypeFloat, h.DecodeFloat(true)
case 6:
n.v, n.f = valueTypeFloat, h.DecodeFloat(false)
case 7:
n.v, n.s = valueTypeString, h.DecodeString()
case 8:
n.v, n.l = valueTypeBytes, h.B[h.m(len(h.B))]
case 9:
n.v = valueTypeArray
case 10:
n.v = valueTypeMap
default:
n.v = valueTypeExt
n.u = h.DecodeUint(64)
n.l = h.B[h.m(len(h.B))]
}
h.ct = n.v
return
}

View File

@ -1,3 +0,0 @@
package codec
//go:generate bash prebuild.sh

View File

@ -1,180 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"bufio"
"io"
"net/rpc"
"sync"
)
// rpcEncodeTerminator allows a handler specify a []byte terminator to send after each Encode.
//
// Some codecs like json need to put a space after each encoded value, to serve as a
// delimiter for things like numbers (else json codec will continue reading till EOF).
type rpcEncodeTerminator interface {
rpcEncodeTerminate() []byte
}
// Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface {
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
}
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
// used by the rpc connection. It accommodates use-cases where the connection
// should be used by rpc and non-rpc functions, e.g. streaming a file after
// sending an rpc response.
type RpcCodecBuffered interface {
BufferedReader() *bufio.Reader
BufferedWriter() *bufio.Writer
}
// -------------------------------------
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
rwc io.ReadWriteCloser
dec *Decoder
enc *Encoder
bw *bufio.Writer
br *bufio.Reader
mu sync.Mutex
h Handle
cls bool
clsmu sync.RWMutex
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
bw := bufio.NewWriter(conn)
br := bufio.NewReader(conn)
return rpcCodec{
rwc: conn,
bw: bw,
br: br,
enc: NewEncoder(bw, h),
dec: NewDecoder(br, h),
h: h,
}
}
func (c *rpcCodec) BufferedReader() *bufio.Reader {
return c.br
}
func (c *rpcCodec) BufferedWriter() *bufio.Writer {
return c.bw
}
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
if c.isClosed() {
return io.EOF
}
if err = c.enc.Encode(obj1); err != nil {
return
}
t, tOk := c.h.(rpcEncodeTerminator)
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
if writeObj2 {
if err = c.enc.Encode(obj2); err != nil {
return
}
if tOk {
c.bw.Write(t.rpcEncodeTerminate())
}
}
if doFlush {
return c.bw.Flush()
}
return
}
func (c *rpcCodec) read(obj interface{}) (err error) {
if c.isClosed() {
return io.EOF
}
//If nil is passed in, we should still attempt to read content to nowhere.
if obj == nil {
var obj2 interface{}
return c.dec.Decode(&obj2)
}
return c.dec.Decode(obj)
}
func (c *rpcCodec) isClosed() bool {
c.clsmu.RLock()
x := c.cls
c.clsmu.RUnlock()
return x
}
func (c *rpcCodec) Close() error {
if c.isClosed() {
return io.EOF
}
c.clsmu.Lock()
c.cls = true
c.clsmu.Unlock()
return c.rwc.Close()
}
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
type goRpcCodec struct {
rpcCodec
}
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// Must protect for concurrent access as per API
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
// goRpc is the implementation of Rpc that uses the communication protocol
// as defined in net/rpc package.
type goRpc struct{}
// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
var GoRpc goRpc
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered

View File

@ -1,526 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"math"
"reflect"
)
const (
_ uint8 = iota
simpleVdNil = 1
simpleVdFalse = 2
simpleVdTrue = 3
simpleVdFloat32 = 4
simpleVdFloat64 = 5
// each lasts for 4 (ie n, n+1, n+2, n+3)
simpleVdPosInt = 8
simpleVdNegInt = 12
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216
simpleVdByteArray = 224
simpleVdArray = 232
simpleVdMap = 240
simpleVdExt = 248
)
type simpleEncDriver struct {
noBuiltInTypes
encNoSeparator
e *Encoder
h *SimpleHandle
w encWriter
b [8]byte
}
func (e *simpleEncDriver) EncodeNil() {
e.w.writen1(simpleVdNil)
}
func (e *simpleEncDriver) EncodeBool(b bool) {
if b {
e.w.writen1(simpleVdTrue)
} else {
e.w.writen1(simpleVdFalse)
}
}
func (e *simpleEncDriver) EncodeFloat32(f float32) {
e.w.writen1(simpleVdFloat32)
bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f))
}
func (e *simpleEncDriver) EncodeFloat64(f float64) {
e.w.writen1(simpleVdFloat64)
bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f))
}
func (e *simpleEncDriver) EncodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-v), simpleVdNegInt)
} else {
e.encUint(uint64(v), simpleVdPosInt)
}
}
func (e *simpleEncDriver) EncodeUint(v uint64) {
e.encUint(v, simpleVdPosInt)
}
func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
if v <= math.MaxUint8 {
e.w.writen2(bd, uint8(v))
} else if v <= math.MaxUint16 {
e.w.writen1(bd + 1)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v))
} else if v <= math.MaxUint32 {
e.w.writen1(bd + 2)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v))
} else { // if v <= math.MaxUint64 {
e.w.writen1(bd + 3)
bigenHelper{e.b[:8], e.w}.writeUint64(v)
}
}
func (e *simpleEncDriver) encLen(bd byte, length int) {
if length == 0 {
e.w.writen1(bd)
} else if length <= math.MaxUint8 {
e.w.writen1(bd + 1)
e.w.writen1(uint8(length))
} else if length <= math.MaxUint16 {
e.w.writen1(bd + 2)
bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length))
} else if int64(length) <= math.MaxUint32 {
e.w.writen1(bd + 3)
bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length))
} else {
e.w.writen1(bd + 4)
bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length))
}
}
func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) {
bs := ext.WriteExt(rv)
if bs == nil {
e.EncodeNil()
return
}
e.encodeExtPreamble(uint8(xtag), len(bs))
e.w.writeb(bs)
}
func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) {
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
e.w.writeb(re.Data)
}
func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
e.encLen(simpleVdExt, length)
e.w.writen1(xtag)
}
func (e *simpleEncDriver) EncodeArrayStart(length int) {
e.encLen(simpleVdArray, length)
}
func (e *simpleEncDriver) EncodeMapStart(length int) {
e.encLen(simpleVdMap, length)
}
func (e *simpleEncDriver) EncodeString(c charEncoding, v string) {
e.encLen(simpleVdString, len(v))
e.w.writestr(v)
}
func (e *simpleEncDriver) EncodeSymbol(v string) {
e.EncodeString(c_UTF8, v)
}
func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) {
e.encLen(simpleVdByteArray, len(v))
e.w.writeb(v)
}
//------------------------------------
type simpleDecDriver struct {
d *Decoder
h *SimpleHandle
r decReader
bdRead bool
bd byte
br bool // bytes reader
noBuiltInTypes
noStreamingCodec
decNoSeparator
b [scratchByteArrayLen]byte
}
func (d *simpleDecDriver) readNextBd() {
d.bd = d.r.readn1()
d.bdRead = true
}
func (d *simpleDecDriver) uncacheRead() {
if d.bdRead {
d.r.unreadn1()
d.bdRead = false
}
}
func (d *simpleDecDriver) ContainerType() (vt valueType) {
if d.bd == simpleVdNil {
return valueTypeNil
} else if d.bd == simpleVdByteArray || d.bd == simpleVdByteArray+1 ||
d.bd == simpleVdByteArray+2 || d.bd == simpleVdByteArray+3 || d.bd == simpleVdByteArray+4 {
return valueTypeBytes
} else if d.bd == simpleVdString || d.bd == simpleVdString+1 ||
d.bd == simpleVdString+2 || d.bd == simpleVdString+3 || d.bd == simpleVdString+4 {
return valueTypeString
} else if d.bd == simpleVdArray || d.bd == simpleVdArray+1 ||
d.bd == simpleVdArray+2 || d.bd == simpleVdArray+3 || d.bd == simpleVdArray+4 {
return valueTypeArray
} else if d.bd == simpleVdMap || d.bd == simpleVdMap+1 ||
d.bd == simpleVdMap+2 || d.bd == simpleVdMap+3 || d.bd == simpleVdMap+4 {
return valueTypeMap
} else {
// d.d.errorf("isContainerType: unsupported parameter: %v", vt)
}
return valueTypeUnset
}
func (d *simpleDecDriver) TryDecodeAsNil() bool {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return true
}
return false
}
func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdPosInt:
ui = uint64(d.r.readn1())
case simpleVdPosInt + 1:
ui = uint64(bigen.Uint16(d.r.readx(2)))
case simpleVdPosInt + 2:
ui = uint64(bigen.Uint32(d.r.readx(4)))
case simpleVdPosInt + 3:
ui = uint64(bigen.Uint64(d.r.readx(8)))
case simpleVdNegInt:
ui = uint64(d.r.readn1())
neg = true
case simpleVdNegInt + 1:
ui = uint64(bigen.Uint16(d.r.readx(2)))
neg = true
case simpleVdNegInt + 2:
ui = uint64(bigen.Uint32(d.r.readx(4)))
neg = true
case simpleVdNegInt + 3:
ui = uint64(bigen.Uint64(d.r.readx(8)))
neg = true
default:
d.d.errorf("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
return
}
// don't do this check, because callers may only want the unsigned value.
// if ui > math.MaxInt64 {
// d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
// return
// }
return
}
func (d *simpleDecDriver) DecodeInt(bitsize uint8) (i int64) {
ui, neg := d.decCheckInteger()
i, overflow := chkOvf.SignedInt(ui)
if overflow {
d.d.errorf("simple: overflow converting %v to signed integer", ui)
return
}
if neg {
i = -i
}
if chkOvf.Int(i, bitsize) {
d.d.errorf("simple: overflow integer: %v", i)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeUint(bitsize uint8) (ui uint64) {
ui, neg := d.decCheckInteger()
if neg {
d.d.errorf("Assigning negative signed value to unsigned type")
return
}
if chkOvf.Uint(ui, bitsize) {
d.d.errorf("simple: overflow integer: %v", ui)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdFloat32 {
f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4))))
} else if d.bd == simpleVdFloat64 {
f = math.Float64frombits(bigen.Uint64(d.r.readx(8)))
} else {
if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
f = float64(d.DecodeInt(64))
} else {
d.d.errorf("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
return
}
}
if chkOverflow32 && chkOvf.Float32(f) {
d.d.errorf("msgpack: float32 overflow: %v", f)
return
}
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *simpleDecDriver) DecodeBool() (b bool) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdTrue {
b = true
} else if d.bd == simpleVdFalse {
} else {
d.d.errorf("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) ReadMapStart() (length int) {
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) ReadArrayStart() (length int) {
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) decLen() int {
switch d.bd % 8 {
case 0:
return 0
case 1:
return int(d.r.readn1())
case 2:
return int(bigen.Uint16(d.r.readx(2)))
case 3:
ui := uint64(bigen.Uint32(d.r.readx(4)))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("simple: overflow integer: %v", ui)
return 0
}
return int(ui)
case 4:
ui := bigen.Uint64(d.r.readx(8))
if chkOvf.Uint(ui, intBitsize) {
d.d.errorf("simple: overflow integer: %v", ui)
return 0
}
return int(ui)
}
d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
}
func (d *simpleDecDriver) DecodeString() (s string) {
return string(d.DecodeBytes(d.b[:], true, true))
}
func (d *simpleDecDriver) DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) {
if !d.bdRead {
d.readNextBd()
}
if d.bd == simpleVdNil {
d.bdRead = false
return
}
clen := d.decLen()
d.bdRead = false
if zerocopy {
if d.br {
return d.r.readx(clen)
} else if len(bs) == 0 {
bs = d.b[:]
}
}
return decByteSlice(d.r, clen, bs)
}
func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) {
if xtag > 0xff {
d.d.errorf("decodeExt: tag must be <= 0xff; got: %v", xtag)
return
}
realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag))
realxtag = uint64(realxtag1)
if ext == nil {
re := rv.(*RawExt)
re.Tag = realxtag
re.Data = detachZeroCopyBytes(d.br, re.Data, xbs)
} else {
ext.ReadExt(rv, xbs)
}
return
}
func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
if !d.bdRead {
d.readNextBd()
}
switch d.bd {
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
l := d.decLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
return
}
xbs = d.r.readx(l)
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs = d.DecodeBytes(nil, false, true)
default:
d.d.errorf("Invalid d.bd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
return
}
d.bdRead = false
return
}
func (d *simpleDecDriver) DecodeNaked() {
if !d.bdRead {
d.readNextBd()
}
n := &d.d.n
var decodeFurther bool
switch d.bd {
case simpleVdNil:
n.v = valueTypeNil
case simpleVdFalse:
n.v = valueTypeBool
n.b = false
case simpleVdTrue:
n.v = valueTypeBool
n.b = true
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
if d.h.SignedInteger {
n.v = valueTypeInt
n.i = d.DecodeInt(64)
} else {
n.v = valueTypeUint
n.u = d.DecodeUint(64)
}
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
n.v = valueTypeInt
n.i = d.DecodeInt(64)
case simpleVdFloat32:
n.v = valueTypeFloat
n.f = d.DecodeFloat(true)
case simpleVdFloat64:
n.v = valueTypeFloat
n.f = d.DecodeFloat(false)
case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
n.v = valueTypeString
n.s = d.DecodeString()
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
n.v = valueTypeBytes
n.l = d.DecodeBytes(nil, false, false)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
n.v = valueTypeExt
l := d.decLen()
n.u = uint64(d.r.readn1())
n.l = d.r.readx(l)
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
n.v = valueTypeArray
decodeFurther = true
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
n.v = valueTypeMap
decodeFurther = true
default:
d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
}
if !decodeFurther {
d.bdRead = false
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Lenght of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3)
// - maps are encoded as [bd] [length] [[key][value]]...
// - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]...
//
// The full spec will be published soon.
type SimpleHandle struct {
BasicHandle
binaryEncodingType
}
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
return h.SetExt(rt, tag, &setExtWrapper{b: ext})
}
func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver {
return &simpleEncDriver{e: e, w: e.w, h: h}
}
func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver {
return &simpleDecDriver{d: d, r: d.r, h: h, br: d.bytes}
}
func (e *simpleEncDriver) reset() {
e.w = e.e.w
}
func (d *simpleDecDriver) reset() {
d.r = d.d.r
d.bd, d.bdRead = 0, false
}
var _ decDriver = (*simpleDecDriver)(nil)
var _ encDriver = (*simpleEncDriver)(nil)

View File

@ -1,233 +0,0 @@
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"fmt"
"reflect"
"time"
)
var (
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
timeExtEncFn = func(rv reflect.Value) (bs []byte, err error) {
defer panicToErr(&err)
bs = timeExt{}.WriteExt(rv.Interface())
return
}
timeExtDecFn = func(rv reflect.Value, bs []byte) (err error) {
defer panicToErr(&err)
timeExt{}.ReadExt(rv.Interface(), bs)
return
}
)
type timeExt struct{}
func (x timeExt) WriteExt(v interface{}) (bs []byte) {
switch v2 := v.(type) {
case time.Time:
bs = encodeTime(v2)
case *time.Time:
bs = encodeTime(*v2)
default:
panic(fmt.Errorf("unsupported format for time conversion: expecting time.Time; got %T", v2))
}
return
}
func (x timeExt) ReadExt(v interface{}, bs []byte) {
tt, err := decodeTime(bs)
if err != nil {
panic(err)
}
*(v.(*time.Time)) = tt
}
func (x timeExt) ConvertExt(v interface{}) interface{} {
return x.WriteExt(v)
}
func (x timeExt) UpdateExt(v interface{}, src interface{}) {
x.ReadExt(v, src.([]byte))
}
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
// Format Description
//
// A timestamp is composed of 3 components:
//
// - secs: signed integer representing seconds since unix epoch
// - nsces: unsigned integer representing fractional seconds as a
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
// - tz: signed integer representing timezone offset in minutes east of UTC,
// and a dst (daylight savings time) flag
//
// When encoding a timestamp, the first byte is the descriptor, which
// defines which components are encoded and how many bytes are used to
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
// is not encoded in the byte array explicitly*.
//
// Descriptor 8 bits are of the form `A B C DDD EE`:
// A: Is secs component encoded? 1 = true
// B: Is nsecs component encoded? 1 = true
// C: Is tz component encoded? 1 = true
// DDD: Number of extra bytes for secs (range 0-7).
// If A = 1, secs encoded in DDD+1 bytes.
// If A = 0, secs is not encoded, and is assumed to be 0.
// If A = 1, then we need at least 1 byte to encode secs.
// DDD says the number of extra bytes beyond that 1.
// E.g. if DDD=0, then secs is represented in 1 byte.
// if DDD=2, then secs is represented in 3 bytes.
// EE: Number of extra bytes for nsecs (range 0-3).
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
//
// Following the descriptor bytes, subsequent bytes are:
//
// secs component encoded in `DDD + 1` bytes (if A == 1)
// nsecs component encoded in `EE + 1` bytes (if B == 1)
// tz component encoded in 2 bytes (if C == 1)
//
// secs and nsecs components are integers encoded in a BigEndian
// 2-complement encoding format.
//
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
// Least significant bit 0 are described below:
//
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
//
func encodeTime(t time.Time) []byte {
//t := rv.Interface().(time.Time)
tsecs, tnsecs := t.Unix(), t.Nanosecond()
var (
bd byte
btmp [8]byte
bs [16]byte
i int = 1
)
l := t.Location()
if l == time.UTC {
l = nil
}
if tsecs != 0 {
bd = bd | 0x80
bigen.PutUint64(btmp[:], uint64(tsecs))
f := pruneSignExt(btmp[:], tsecs >= 0)
bd = bd | (byte(7-f) << 2)
copy(bs[i:], btmp[f:])
i = i + (8 - f)
}
if tnsecs != 0 {
bd = bd | 0x40
bigen.PutUint32(btmp[:4], uint32(tnsecs))
f := pruneSignExt(btmp[:4], true)
bd = bd | byte(3-f)
copy(bs[i:], btmp[f:4])
i = i + (4 - f)
}
if l != nil {
bd = bd | 0x20
// Note that Go Libs do not give access to dst flag.
_, zoneOffset := t.Zone()
//zoneName, zoneOffset := t.Zone()
zoneOffset /= 60
z := uint16(zoneOffset)
bigen.PutUint16(btmp[:2], z)
// clear dst flags
bs[i] = btmp[0] & 0x3f
bs[i+1] = btmp[1]
i = i + 2
}
bs[0] = bd
return bs[0:i]
}
// DecodeTime decodes a []byte into a time.Time.
func decodeTime(bs []byte) (tt time.Time, err error) {
bd := bs[0]
var (
tsec int64
tnsec uint32
tz uint16
i byte = 1
i2 byte
n byte
)
if bd&(1<<7) != 0 {
var btmp [8]byte
n = ((bd >> 2) & 0x7) + 1
i2 = i + n
copy(btmp[8-n:], bs[i:i2])
//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
if bs[i]&(1<<7) != 0 {
copy(btmp[0:8-n], bsAll0xff)
//for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
}
i = i2
tsec = int64(bigen.Uint64(btmp[:]))
}
if bd&(1<<6) != 0 {
var btmp [4]byte
n = (bd & 0x3) + 1
i2 = i + n
copy(btmp[4-n:], bs[i:i2])
i = i2
tnsec = bigen.Uint32(btmp[:])
}
if bd&(1<<5) == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
return
}
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
// However, we need name here, so it can be shown when time is printed.
// Zone name is in form: UTC-08:00.
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
i2 = i + 2
tz = bigen.Uint16(bs[i:i2])
i = i2
// sign extend sign bit into top 2 MSB (which were dst bits):
if tz&(1<<13) == 0 { // positive
tz = tz & 0x3fff //clear 2 MSBs: dst bits
} else { // negative
tz = tz | 0xc000 //set 2 MSBs: dst bits
//tzname[3] = '-' (TODO: verify. this works here)
}
tzint := int16(tz)
if tzint == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
} else {
// For Go Time, do not use a descriptive timezone.
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
// var zoneName = timeLocUTCName(tzint)
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
}
return
}
func timeLocUTCName(tzint int16) string {
if tzint == 0 {
return "UTC"
}
var tzname = []byte("UTC+00:00")
//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
var tzhr, tzmin int16
if tzint < 0 {
tzname[3] = '-' // (TODO: verify. this works here)
tzhr, tzmin = -tzint/60, (-tzint)%60
} else {
tzhr, tzmin = tzint/60, tzint%60
}
tzname[4] = timeDigits[tzhr/10]
tzname[5] = timeDigits[tzhr%10]
tzname[7] = timeDigits[tzmin/10]
tzname[8] = timeDigits[tzmin%10]
return string(tzname)
//return time.FixedZone(string(tzname), int(tzint)*60)
}

File diff suppressed because it is too large Load Diff

View File

@ -21,6 +21,7 @@ syntax = 'proto2';
package k8s.io.api.core.v1; package k8s.io.api.core.v1;
import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto";
import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto";
@ -2132,6 +2133,31 @@ message PersistentVolumeClaim {
optional PersistentVolumeClaimStatus status = 3; optional PersistentVolumeClaimStatus status = 3;
} }
// PersistentVolumeClaimCondition contails details about state of pvc
message PersistentVolumeClaimCondition {
optional string type = 1;
optional string status = 2;
// Last time we probed the condition.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3;
// Last time the condition transitioned from one status to another.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
// Unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
optional string reason = 5;
// Human-readable message indicating details about last transition.
// +optional
optional string message = 6;
}
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. // PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
message PersistentVolumeClaimList { message PersistentVolumeClaimList {
// Standard list metadata. // Standard list metadata.
@ -2185,6 +2211,13 @@ message PersistentVolumeClaimStatus {
// Represents the actual resources of the underlying volume. // Represents the actual resources of the underlying volume.
// +optional // +optional
map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 3; map<string, k8s.io.apimachinery.pkg.api.resource.Quantity> capacity = 3;
// Current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
repeated PersistentVolumeClaimCondition conditions = 4;
} }
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@ -3629,7 +3662,7 @@ message SecurityContext {
optional bool readOnlyRootFilesystem = 6; optional bool readOnlyRootFilesystem = 6;
// AllowPrivilegeEscalation controls whether a process can gain more // AllowPrivilegeEscalation controls whether a process can gain more
// privileges than it's parent process. This bool directly controls if // privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process. // the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is: // AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged // 1) run as Privileged
@ -4070,6 +4103,14 @@ message VolumeMount {
// Defaults to "" (volume's root). // Defaults to "" (volume's root).
// +optional // +optional
optional string subPath = 4; optional string subPath = 4;
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is alpha in 1.8 and can be reworked or removed in a future
// release.
// +optional
optional string mountPropagation = 5;
} }
// Projection that may be projected along with other supported volume types // Projection that may be projected along with other supported volume types

File diff suppressed because it is too large Load Diff

99
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -630,6 +630,34 @@ type PersistentVolumeClaimSpec struct {
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
} }
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
type PersistentVolumeClaimConditionType string
const (
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
)
// PersistentVolumeClaimCondition contails details about state of pvc
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim. // PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct { type PersistentVolumeClaimStatus struct {
// Phase represents the current phase of PersistentVolumeClaim. // Phase represents the current phase of PersistentVolumeClaim.
@ -642,6 +670,12 @@ type PersistentVolumeClaimStatus struct {
// Represents the actual resources of the underlying volume. // Represents the actual resources of the underlying volume.
// +optional // +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
} }
type PersistentVolumeAccessMode string type PersistentVolumeAccessMode string
@ -739,7 +773,7 @@ type EmptyDirVolumeSource struct {
// The default is nil which means that the limit is undefined. // The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional // +optional
SizeLimit resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
} }
// Represents a Glusterfs mount that lasts the lifetime of a pod. // Represents a Glusterfs mount that lasts the lifetime of a pod.
@ -909,8 +943,9 @@ type FlockerVolumeSource struct {
type StorageMedium string type StorageMedium string
const ( const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
StorageMediumHugepages StorageMedium = "HugePages" // use hugepages
) )
// Protocol defines network protocols supported for things like container ports. // Protocol defines network protocols supported for things like container ports.
@ -1547,8 +1582,34 @@ type VolumeMount struct {
// Defaults to "" (volume's root). // Defaults to "" (volume's root).
// +optional // +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is alpha in 1.8 and can be reworked or removed in a future
// release.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
} }
// MountPropagationMode describes mount propagation.
type MountPropagationMode string
const (
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// EnvVar represents an environment variable present in a Container. // EnvVar represents an environment variable present in a Container.
type EnvVar struct { type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER. // Name of the environment variable. Must be a C_IDENTIFIER.
@ -2425,6 +2486,7 @@ const (
// Kubelet without going through the scheduler to start. // Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler. // Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint. // Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController. // Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute" TaintEffectNoExecute TaintEffect = "NoExecute"
@ -2467,27 +2529,6 @@ const (
TolerationOpEqual TolerationOperator = "Equal" TolerationOpEqual TolerationOperator = "Equal"
) )
const (
// This annotation key will be used to contain an array of v1 JSON encoded Containers
// for init containers. The annotation will be placed into the internal type and cleared.
// This key is only recognized by version >= 1.4.
PodInitContainersBetaAnnotationKey = "pod.beta.kubernetes.io/init-containers"
// This annotation key will be used to contain an array of v1 JSON encoded Containers
// for init containers. The annotation will be placed into the internal type and cleared.
// This key is recognized by version >= 1.3. For version 1.4 code, this key
// will have its value copied to the beta key.
PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers"
// This annotation key will be used to contain an array of v1 JSON encoded
// ContainerStatuses for init containers. The annotation will be placed into the internal
// type and cleared. This key is only recognized by version >= 1.4.
PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses"
// This annotation key will be used to contain an array of v1 JSON encoded
// ContainerStatuses for init containers. The annotation will be placed into the internal
// type and cleared. This key is recognized by version >= 1.3. For version 1.4 code,
// this key will have its value copied to the beta key.
PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses"
)
// PodSpec is a description of a pod. // PodSpec is a description of a pod.
type PodSpec struct { type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod. // List of volumes that can be mounted by containers belonging to the pod.
@ -2951,6 +2992,8 @@ type ReplicationControllerCondition struct {
} }
// +genclient // +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller. // ReplicationController represents the configuration of a replication controller.
@ -3003,6 +3046,8 @@ const (
ServiceAffinityNone ServiceAffinity = "None" ServiceAffinityNone ServiceAffinity = "None"
) )
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity. // SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct { type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity. // clientIP contains the configurations of Client IP based session affinity.
@ -3648,6 +3693,8 @@ const (
NodeDiskPressure NodeConditionType = "DiskPressure" NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured. // NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeConfigOK indicates whether the kubelet is correctly configured
NodeConfigOK NodeConditionType = "ConfigOK"
) )
// NodeCondition contains condition information for a node. // NodeCondition contains condition information for a node.
@ -3716,6 +3763,8 @@ const (
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
// Default namespace prefix. // Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/" ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
) )
// ResourceList is a set of (resource name, quantity) pairs. // ResourceList is a set of (resource name, quantity) pairs.
@ -4743,7 +4792,7 @@ type SecurityContext struct {
// +optional // +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more // AllowPrivilegeEscalation controls whether a process can gain more
// privileges than it's parent process. This bool directly controls if // privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process. // the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is: // AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged // 1) run as Privileged

View File

@ -1122,6 +1122,18 @@ func (PersistentVolumeClaim) SwaggerDoc() map[string]string {
return map_PersistentVolumeClaim return map_PersistentVolumeClaim
} }
var map_PersistentVolumeClaimCondition = map[string]string{
"": "PersistentVolumeClaimCondition contails details about state of pvc",
"lastProbeTime": "Last time we probed the condition.",
"lastTransitionTime": "Last time the condition transitioned from one status to another.",
"reason": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.",
"message": "Human-readable message indicating details about last transition.",
}
func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string {
return map_PersistentVolumeClaimCondition
}
var map_PersistentVolumeClaimList = map[string]string{ var map_PersistentVolumeClaimList = map[string]string{
"": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
@ -1150,6 +1162,7 @@ var map_PersistentVolumeClaimStatus = map[string]string{
"phase": "Phase represents the current phase of PersistentVolumeClaim.", "phase": "Phase represents the current phase of PersistentVolumeClaim.",
"accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1",
"capacity": "Represents the actual resources of the underlying volume.", "capacity": "Represents the actual resources of the underlying volume.",
"conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.",
} }
func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
@ -1818,7 +1831,7 @@ var map_SecurityContext = map[string]string{
"runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
"runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
"readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.",
"allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than it's parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN",
} }
func (SecurityContext) SwaggerDoc() map[string]string { func (SecurityContext) SwaggerDoc() map[string]string {
@ -2019,11 +2032,12 @@ func (Volume) SwaggerDoc() map[string]string {
} }
var map_VolumeMount = map[string]string{ var map_VolumeMount = map[string]string{
"": "VolumeMount describes a mounting of a Volume within a container.", "": "VolumeMount describes a mounting of a Volume within a container.",
"name": "This must match the Name of a Volume.", "name": "This must match the Name of a Volume.",
"readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.",
"mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.",
"subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).",
"mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is alpha in 1.8 and can be reworked or removed in a future release.",
} }
func (VolumeMount) SwaggerDoc() map[string]string { func (VolumeMount) SwaggerDoc() map[string]string {

View File

@ -21,6 +21,7 @@ limitations under the License.
package v1 package v1
import ( import (
resource "k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion" conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
@ -426,6 +427,10 @@ func RegisterDeepCopies(scheme *runtime.Scheme) error {
in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim))
return nil return nil
}, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, }, InType: reflect.TypeOf(&PersistentVolumeClaim{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition))
return nil
}, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})},
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList))
return nil return nil
@ -1408,7 +1413,9 @@ func (in *Container) DeepCopyInto(out *Container) {
if in.VolumeMounts != nil { if in.VolumeMounts != nil {
in, out := &in.VolumeMounts, &out.VolumeMounts in, out := &in.VolumeMounts, &out.VolumeMounts
*out = make([]VolumeMount, len(*in)) *out = make([]VolumeMount, len(*in))
copy(*out, *in) for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
} }
if in.LivenessProbe != nil { if in.LivenessProbe != nil {
in, out := &in.LivenessProbe, &out.LivenessProbe in, out := &in.LivenessProbe, &out.LivenessProbe
@ -1787,7 +1794,15 @@ func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) {
*out = *in *out = *in
out.SizeLimit = in.SizeLimit.DeepCopy() if in.SizeLimit != nil {
in, out := &in.SizeLimit, &out.SizeLimit
if *in == nil {
*out = nil
} else {
*out = new(resource.Quantity)
**out = (*in).DeepCopy()
}
}
return return
} }
@ -3488,6 +3503,24 @@ func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object {
} }
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition.
func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition {
if in == nil {
return nil
}
out := new(PersistentVolumeClaimCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) {
*out = *in *out = *in
@ -3577,6 +3610,13 @@ func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimSt
(*out)[key] = val.DeepCopy() (*out)[key] = val.DeepCopy()
} }
} }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]PersistentVolumeClaimCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return return
} }
@ -5924,6 +5964,15 @@ func (in *Volume) DeepCopy() *Volume {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { func (in *VolumeMount) DeepCopyInto(out *VolumeMount) {
*out = *in *out = *in
if in.MountPropagation != nil {
in, out := &in.MountPropagation, &out.MountPropagation
if *in == nil {
*out = nil
} else {
*out = new(MountPropagationMode)
**out = **in
}
}
return return
} }

View File

@ -28,6 +28,12 @@ import (
"k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apimachinery/pkg/util/validation/field"
) )
const (
// StatusTooManyRequests means the server experienced too many requests within a
// given window and that the client must wait to perform the action again.
StatusTooManyRequests = 429
)
// StatusError is an error intended for consumption by a REST API server; it can also be // StatusError is an error intended for consumption by a REST API server; it can also be
// reconstructed by clients from a REST response. Public to allow easy type switches. // reconstructed by clients from a REST response. Public to allow easy type switches.
type StatusError struct { type StatusError struct {
@ -174,6 +180,17 @@ func NewGone(message string) *StatusError {
}} }}
} }
// NewResourceExpired creates an error that indicates that the requested resource content has expired from
// the server (usually due to a resourceVersion that is too old).
func NewResourceExpired(message string) *StatusError {
return &StatusError{metav1.Status{
Status: metav1.StatusFailure,
Code: http.StatusGone,
Reason: metav1.StatusReasonExpired,
Message: message,
}}
}
// NewInvalid returns an error indicating the item is invalid and cannot be processed. // NewInvalid returns an error indicating the item is invalid and cannot be processed.
func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError { func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError {
causes := make([]metav1.StatusCause, 0, len(errs)) causes := make([]metav1.StatusCause, 0, len(errs))
@ -298,6 +315,18 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
}} }}
} }
// NewTooManyRequestsError returns an error indicating that the request was rejected because
// the server has received too many requests. Client should wait and retry. But if the request
// is perishable, then the client should not retry the request.
func NewTooManyRequestsError(message string) *StatusError {
return &StatusError{metav1.Status{
Status: metav1.StatusFailure,
Code: StatusTooManyRequests,
Reason: metav1.StatusReasonTooManyRequests,
Message: fmt.Sprintf("Too many requests: %s", message),
}}
}
// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. // NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
reason := metav1.StatusReasonUnknown reason := metav1.StatusReasonUnknown
@ -394,12 +423,28 @@ func IsInvalid(err error) bool {
return reasonForError(err) == metav1.StatusReasonInvalid return reasonForError(err) == metav1.StatusReasonInvalid
} }
// IsGone is true if the error indicates the requested resource is no longer available.
func IsGone(err error) bool {
return reasonForError(err) == metav1.StatusReasonGone
}
// IsResourceExpired is true if the error indicates the resource has expired and the current action is
// no longer possible.
func IsResourceExpired(err error) bool {
return reasonForError(err) == metav1.StatusReasonExpired
}
// IsMethodNotSupported determines if the err is an error which indicates the provided action could not // IsMethodNotSupported determines if the err is an error which indicates the provided action could not
// be performed because it is not supported by the server. // be performed because it is not supported by the server.
func IsMethodNotSupported(err error) bool { func IsMethodNotSupported(err error) bool {
return reasonForError(err) == metav1.StatusReasonMethodNotAllowed return reasonForError(err) == metav1.StatusReasonMethodNotAllowed
} }
// IsServiceUnavailable is true if the error indicates the underlying service is no longer available.
func IsServiceUnavailable(err error) bool {
return reasonForError(err) == metav1.StatusReasonServiceUnavailable
}
// IsBadRequest determines if err is an error which indicates that the request is invalid. // IsBadRequest determines if err is an error which indicates that the request is invalid.
func IsBadRequest(err error) bool { func IsBadRequest(err error) bool {
return reasonForError(err) == metav1.StatusReasonBadRequest return reasonForError(err) == metav1.StatusReasonBadRequest

View File

@ -35,6 +35,8 @@ func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out
out.ResourceVersion = in.ResourceVersion out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = in.TimeoutSeconds out.TimeoutSeconds = in.TimeoutSeconds
out.Watch = in.Watch out.Watch = in.Watch
out.Limit = in.Limit
out.Continue = in.Continue
return nil return nil
} }
@ -49,6 +51,8 @@ func Convert_v1_ListOptions_To_internalversion_ListOptions(in *metav1.ListOption
out.ResourceVersion = in.ResourceVersion out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = in.TimeoutSeconds out.TimeoutSeconds = in.TimeoutSeconds
out.Watch = in.Watch out.Watch = in.Watch
out.Limit = in.Limit
out.Continue = in.Continue
return nil return nil
} }

View File

@ -25,8 +25,7 @@ import (
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ListOptions is the query options to a standard REST list call, and has future support for // ListOptions is the query options to a standard REST list call.
// watch calls.
type ListOptions struct { type ListOptions struct {
metav1.TypeMeta metav1.TypeMeta
@ -48,6 +47,15 @@ type ListOptions struct {
ResourceVersion string ResourceVersion string
// Timeout for the list/watch call. // Timeout for the list/watch call.
TimeoutSeconds *int64 TimeoutSeconds *int64
// Limit specifies the maximum number of results to return from the server. The server may
// not support this field on all resource types, but if it does and more results remain it
// will set the continue field on the returned list object.
Limit int64
// Continue is a token returned by the server that lets a client retrieve chunks of results
// from the server by specifying limit. The server may reject requests for continuation tokens
// it does not recognize and will return a 410 error if the token can no longer be used because
// it has expired.
Continue string
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

View File

@ -439,6 +439,14 @@ func (m *APIResource) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], s) i += copy(dAtA[i:], s)
} }
} }
dAtA[i] = 0x42
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group)))
i += copy(dAtA[i:], m.Group)
dAtA[i] = 0x4a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version)))
i += copy(dAtA[i:], m.Version)
return i, nil return i, nil
} }
@ -1035,6 +1043,10 @@ func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) {
i++ i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion)))
i += copy(dAtA[i:], m.ResourceVersion) i += copy(dAtA[i:], m.ResourceVersion)
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
i += copy(dAtA[i:], m.Continue)
return i, nil return i, nil
} }
@ -1086,6 +1098,13 @@ func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0 dAtA[i] = 0
} }
i++ i++
dAtA[i] = 0x38
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.Limit))
dAtA[i] = 0x42
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue)))
i += copy(dAtA[i:], m.Continue)
return i, nil return i, nil
} }
@ -1711,6 +1730,10 @@ func (m *APIResource) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l)) n += 1 + l + sovGenerated(uint64(l))
} }
} }
l = len(m.Group)
n += 1 + l + sovGenerated(uint64(l))
l = len(m.Version)
n += 1 + l + sovGenerated(uint64(l))
return n return n
} }
@ -1935,6 +1958,8 @@ func (m *ListMeta) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l)) n += 1 + l + sovGenerated(uint64(l))
l = len(m.ResourceVersion) l = len(m.ResourceVersion)
n += 1 + l + sovGenerated(uint64(l)) n += 1 + l + sovGenerated(uint64(l))
l = len(m.Continue)
n += 1 + l + sovGenerated(uint64(l))
return n return n
} }
@ -1952,6 +1977,9 @@ func (m *ListOptions) Size() (n int) {
n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
} }
n += 2 n += 2
n += 1 + sovGenerated(uint64(m.Limit))
l = len(m.Continue)
n += 1 + l + sovGenerated(uint64(l))
return n return n
} }
@ -2209,6 +2237,8 @@ func (this *APIResource) String() string {
`ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`,
`SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`, `SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`,
`Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`,
`Group:` + fmt.Sprintf("%v", this.Group) + `,`,
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -2352,6 +2382,7 @@ func (this *ListMeta) String() string {
s := strings.Join([]string{`&ListMeta{`, s := strings.Join([]string{`&ListMeta{`,
`SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`,
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -2367,6 +2398,8 @@ func (this *ListOptions) String() string {
`ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`,
`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
`IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`,
`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
`Continue:` + fmt.Sprintf("%v", this.Continue) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@ -3024,6 +3057,64 @@ func (m *APIResource) Unmarshal(dAtA []byte) error {
} }
m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex iNdEx = postIndex
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Group = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Version = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:]) skippy, err := skipGenerated(dAtA[iNdEx:])
@ -5108,6 +5199,35 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error {
} }
m.ResourceVersion = string(dAtA[iNdEx:postIndex]) m.ResourceVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Continue = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:]) skippy, err := skipGenerated(dAtA[iNdEx:])
@ -5305,6 +5425,54 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error {
} }
} }
m.IncludeUninitialized = bool(v != 0) m.IncludeUninitialized = bool(v != 0)
case 7:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
}
m.Limit = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Limit |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 8:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Continue = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:]) skippy, err := skipGenerated(dAtA[iNdEx:])
@ -7547,154 +7715,157 @@ func init() {
} }
var fileDescriptorGenerated = []byte{ var fileDescriptorGenerated = []byte{
// 2375 bytes of a gzipped FileDescriptorProto // 2428 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0xcf, 0x6f, 0x23, 0x49, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49,
0xf5, 0x4f, 0x3b, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0x51, 0x9b, 0xf9, 0x7e, 0xbd, 0x11, 0xd8, 0xd9, 0x15, 0x4e, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa9, 0xcd, 0x80, 0x37, 0x02, 0x3b, 0xdb, 0x8b,
0x5e, 0xb4, 0xca, 0xc2, 0xac, 0x4d, 0xb2, 0xb0, 0x1a, 0x06, 0x18, 0x48, 0xc7, 0x99, 0x51, 0xb4, 0x56, 0x59, 0x98, 0xb5, 0x49, 0x16, 0x56, 0xc3, 0x00, 0x03, 0xe9, 0x38, 0x33, 0x8a, 0x76, 0x32,
0x93, 0x19, 0xab, 0xb2, 0x33, 0x88, 0x61, 0x84, 0xe8, 0xb4, 0x2b, 0x4e, 0x93, 0x76, 0x77, 0x6f, 0x63, 0x55, 0x76, 0x06, 0x31, 0x8c, 0x10, 0x9d, 0x76, 0xc5, 0x69, 0xd2, 0xee, 0xf6, 0x56, 0x95,
0x55, 0x39, 0x33, 0x61, 0x0f, 0xec, 0x01, 0x24, 0x0e, 0x08, 0xcd, 0x91, 0x13, 0xda, 0x11, 0xfc, 0x33, 0x09, 0x1c, 0xd8, 0x03, 0x48, 0x1c, 0x10, 0x9a, 0x23, 0x27, 0xb4, 0x23, 0xb8, 0x70, 0xe5,
0x05, 0x9c, 0x38, 0x71, 0x42, 0x62, 0x8e, 0x2b, 0x71, 0xd9, 0x03, 0xb2, 0x76, 0xbc, 0x07, 0x4e, 0xc4, 0x05, 0x4e, 0x48, 0xcc, 0x71, 0x24, 0x2e, 0x7b, 0x40, 0xd6, 0x8e, 0xf7, 0xc0, 0x09, 0x71,
0x2b, 0xee, 0x91, 0x90, 0x50, 0x55, 0x57, 0xff, 0xb2, 0xe3, 0x4d, 0x7b, 0x67, 0x41, 0x9c, 0xe2, 0xcf, 0x09, 0x55, 0x75, 0xf5, 0x9f, 0x1d, 0x4f, 0xda, 0x3b, 0x0b, 0xe2, 0x14, 0xf7, 0xfb, 0xf9,
0x7e, 0x3f, 0x3e, 0xef, 0xd5, 0xab, 0x57, 0xef, 0xbd, 0xaa, 0xc0, 0xfe, 0xc9, 0x35, 0xd6, 0xb0, 0xde, 0xab, 0xaa, 0xf7, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3,
0xbd, 0xe6, 0x49, 0xff, 0x90, 0x50, 0x97, 0x70, 0xc2, 0x9a, 0xa7, 0xc4, 0xed, 0x78, 0xb4, 0xa9, 0xfe, 0x01, 0xa1, 0x2e, 0xe1, 0x84, 0x35, 0x4e, 0x88, 0xdb, 0xf6, 0x68, 0x43, 0x31, 0xcc, 0x9e,
0x18, 0xa6, 0x6f, 0xf7, 0x4c, 0xeb, 0xd8, 0x76, 0x09, 0x3d, 0x6b, 0xfa, 0x27, 0x5d, 0x41, 0x60, 0xdd, 0x35, 0xad, 0x23, 0xdb, 0x25, 0xf4, 0xac, 0xd1, 0x3b, 0xee, 0x08, 0x02, 0x6b, 0x74, 0x09,
0xcd, 0x1e, 0xe1, 0x66, 0xf3, 0x74, 0xb3, 0xd9, 0x25, 0x2e, 0xa1, 0x26, 0x27, 0x9d, 0x86, 0x4f, 0x37, 0x1b, 0x27, 0x1b, 0x8d, 0x0e, 0x71, 0x09, 0x35, 0x39, 0x69, 0xd7, 0x7b, 0xd4, 0xe3, 0x1e,
0x3d, 0xee, 0xa1, 0xaf, 0x04, 0x5a, 0x8d, 0xa4, 0x56, 0xc3, 0x3f, 0xe9, 0x0a, 0x02, 0x6b, 0x08, 0xfa, 0x92, 0xaf, 0x55, 0x8f, 0x6b, 0xd5, 0x7b, 0xc7, 0x1d, 0x41, 0x60, 0x75, 0xa1, 0x55, 0x3f,
0xad, 0xc6, 0xe9, 0xe6, 0xda, 0x1b, 0x5d, 0x9b, 0x1f, 0xf7, 0x0f, 0x1b, 0x96, 0xd7, 0x6b, 0x76, 0xd9, 0x58, 0x7d, 0xab, 0x63, 0xf3, 0xa3, 0xfe, 0x41, 0xdd, 0xf2, 0xba, 0x8d, 0x8e, 0xd7, 0xf1,
0xbd, 0xae, 0xd7, 0x94, 0xca, 0x87, 0xfd, 0x23, 0xf9, 0x25, 0x3f, 0xe4, 0xaf, 0x00, 0x74, 0x6d, 0x1a, 0x52, 0xf9, 0xa0, 0x7f, 0x28, 0xbf, 0xe4, 0x87, 0xfc, 0xe5, 0x83, 0xae, 0x4e, 0x74, 0x85,
0xa2, 0x2b, 0xb4, 0xef, 0x72, 0xbb, 0x47, 0x46, 0xbd, 0x58, 0x7b, 0xeb, 0x32, 0x05, 0x66, 0x1d, 0xf6, 0x5d, 0x6e, 0x77, 0xc9, 0xa8, 0x17, 0xab, 0xef, 0x5c, 0xa6, 0xc0, 0xac, 0x23, 0xd2, 0x35,
0x93, 0x9e, 0x39, 0xa6, 0xf7, 0xe6, 0x24, 0xbd, 0x3e, 0xb7, 0x9d, 0xa6, 0xed, 0x72, 0xc6, 0xe9, 0xc7, 0xf4, 0xde, 0x9e, 0xa4, 0xd7, 0xe7, 0xb6, 0xd3, 0xb0, 0x5d, 0xce, 0x38, 0x1d, 0x55, 0xd2,
0xa8, 0x92, 0xfe, 0xd7, 0x59, 0x28, 0x6e, 0xb7, 0xf7, 0x6e, 0x51, 0xaf, 0xef, 0xa3, 0x75, 0x98, 0xff, 0x96, 0x85, 0xc2, 0x56, 0x6b, 0xf7, 0x16, 0xf5, 0xfa, 0x3d, 0xb4, 0x06, 0xb3, 0xae, 0xd9,
0x73, 0xcd, 0x1e, 0xa9, 0x6a, 0xeb, 0xda, 0x46, 0xc9, 0x58, 0x78, 0x36, 0xa8, 0xcf, 0x0c, 0x07, 0x25, 0x15, 0x6d, 0x4d, 0x5b, 0x2f, 0x1a, 0xf3, 0x4f, 0x07, 0xb5, 0x99, 0xe1, 0xa0, 0x36, 0x7b,
0xf5, 0xb9, 0x3b, 0x66, 0x8f, 0x60, 0xc9, 0x41, 0x0e, 0x14, 0x4f, 0x09, 0x65, 0xb6, 0xe7, 0xb2, 0xc7, 0xec, 0x12, 0x2c, 0x39, 0xc8, 0x81, 0xc2, 0x09, 0xa1, 0xcc, 0xf6, 0x5c, 0x56, 0xc9, 0xac,
0x6a, 0x6e, 0x7d, 0x76, 0xa3, 0xbc, 0x75, 0xa3, 0x91, 0x25, 0x68, 0x0d, 0x69, 0xe0, 0x7e, 0xa0, 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x36, 0xad, 0x2e, 0x0d, 0xdc, 0xf7, 0x55, 0x6f, 0x7a,
0x7a, 0xd3, 0xa3, 0x2d, 0x9b, 0x59, 0xde, 0x29, 0xa1, 0x67, 0xc6, 0xb2, 0xb2, 0x52, 0x54, 0x4c, 0xb4, 0x69, 0x33, 0xcb, 0x3b, 0x21, 0xf4, 0xcc, 0x58, 0x52, 0x56, 0x0a, 0x8a, 0xc9, 0x70, 0x68,
0x86, 0x23, 0x0b, 0xe8, 0x17, 0x1a, 0x2c, 0xfb, 0x94, 0x1c, 0x11, 0x4a, 0x49, 0x47, 0xf1, 0xab, 0x01, 0xfd, 0x5c, 0x83, 0xa5, 0x1e, 0x25, 0x87, 0x84, 0x52, 0xd2, 0x56, 0xfc, 0x4a, 0x76, 0x4d,
0xb3, 0xeb, 0xda, 0x17, 0x60, 0xb6, 0xaa, 0xcc, 0x2e, 0xb7, 0x47, 0xf0, 0xf1, 0x98, 0x45, 0xf4, 0xfb, 0x0c, 0xcc, 0x56, 0x94, 0xd9, 0xa5, 0xd6, 0x08, 0x3e, 0x1e, 0xb3, 0x88, 0x7e, 0xa7, 0xc1,
0x7b, 0x0d, 0xd6, 0x18, 0xa1, 0xa7, 0x84, 0x6e, 0x77, 0x3a, 0x94, 0x30, 0x66, 0x9c, 0xed, 0x38, 0x2a, 0x23, 0xf4, 0x84, 0xd0, 0xad, 0x76, 0x9b, 0x12, 0xc6, 0x8c, 0xb3, 0x6d, 0xc7, 0x26, 0x2e,
0x36, 0x71, 0xf9, 0xce, 0x5e, 0x0b, 0xb3, 0xea, 0x9c, 0x8c, 0xc3, 0xf7, 0xb2, 0x39, 0x74, 0x30, 0xdf, 0xde, 0x6d, 0x62, 0x56, 0x99, 0x95, 0xfb, 0xf0, 0x9d, 0x74, 0x0e, 0xed, 0x4f, 0xc2, 0x31,
0x09, 0xc7, 0xd0, 0x95, 0x47, 0x6b, 0x13, 0x45, 0x18, 0xfe, 0x0c, 0x37, 0xf4, 0x23, 0x58, 0x08, 0x74, 0xe5, 0xd1, 0xea, 0x44, 0x11, 0x86, 0x5f, 0xe0, 0x86, 0x7e, 0x08, 0xf3, 0xc1, 0x41, 0xde,
0x37, 0xf2, 0xb6, 0xcd, 0x38, 0xba, 0x0f, 0x85, 0xae, 0xf8, 0x60, 0x55, 0x4d, 0x3a, 0xd8, 0xc8, 0xb6, 0x19, 0x47, 0xf7, 0x21, 0xdf, 0x11, 0x1f, 0xac, 0xa2, 0x49, 0x07, 0xeb, 0xe9, 0x1c, 0x0c,
0xe6, 0x60, 0x88, 0x61, 0x2c, 0x2a, 0x7f, 0x0a, 0xf2, 0x93, 0x61, 0x85, 0xa6, 0x7f, 0x9a, 0x83, 0x30, 0x8c, 0x05, 0xe5, 0x4f, 0x5e, 0x7e, 0x32, 0xac, 0xd0, 0xf4, 0x3f, 0x67, 0xa1, 0xb4, 0xd5,
0xf2, 0x76, 0x7b, 0x0f, 0x13, 0xe6, 0xf5, 0xa9, 0x45, 0x32, 0x24, 0xcd, 0x16, 0x80, 0xf8, 0xcb, 0xda, 0xc5, 0x84, 0x79, 0x7d, 0x6a, 0x91, 0x14, 0x41, 0xb3, 0x09, 0x20, 0xfe, 0xb2, 0x9e, 0x69,
0x7c, 0xd3, 0x22, 0x9d, 0x6a, 0x6e, 0x5d, 0xdb, 0x28, 0x1a, 0x48, 0xc9, 0xc1, 0x9d, 0x88, 0x83, 0x91, 0x76, 0x25, 0xb3, 0xa6, 0xad, 0x17, 0x0c, 0xa4, 0xe4, 0xe0, 0x4e, 0xc8, 0xc1, 0x31, 0x29,
0x13, 0x52, 0x02, 0xf5, 0xc4, 0x76, 0x3b, 0x72, 0xb7, 0x13, 0xa8, 0x6f, 0xdb, 0x6e, 0x07, 0x4b, 0x81, 0x7a, 0x6c, 0xbb, 0x6d, 0x79, 0xda, 0x31, 0xd4, 0x77, 0x6d, 0xb7, 0x8d, 0x25, 0x07, 0xdd,
0x0e, 0xba, 0x0d, 0xf9, 0x53, 0x42, 0x0f, 0x45, 0xfc, 0x45, 0x42, 0x7c, 0x2d, 0xdb, 0xf2, 0xee, 0x86, 0xdc, 0x09, 0xa1, 0x07, 0x62, 0xff, 0x45, 0x40, 0x7c, 0x25, 0xdd, 0xf2, 0xee, 0x0b, 0x15,
0x0b, 0x15, 0xa3, 0x34, 0x1c, 0xd4, 0xf3, 0xf2, 0x27, 0x0e, 0x40, 0x50, 0x03, 0x80, 0x1d, 0x7b, 0xa3, 0x38, 0x1c, 0xd4, 0x72, 0xf2, 0x27, 0xf6, 0x41, 0x50, 0x1d, 0x80, 0x1d, 0x79, 0x94, 0x4b,
0x94, 0x4b, 0x77, 0xaa, 0xf9, 0xf5, 0xd9, 0x8d, 0x92, 0xb1, 0x28, 0xfc, 0x3b, 0x88, 0xa8, 0x38, 0x77, 0x2a, 0xb9, 0xb5, 0xec, 0x7a, 0xd1, 0x58, 0x10, 0xfe, 0xed, 0x87, 0x54, 0x1c, 0x93, 0x40,
0x21, 0x81, 0xae, 0xc1, 0x02, 0xb3, 0xdd, 0x6e, 0xdf, 0x31, 0xa9, 0x20, 0x54, 0x0b, 0xd2, 0xcf, 0xd7, 0x60, 0x9e, 0xd9, 0x6e, 0xa7, 0xef, 0x98, 0x54, 0x10, 0x2a, 0x79, 0xe9, 0xe7, 0x8a, 0xf2,
0x55, 0xe5, 0xe7, 0xc2, 0x41, 0x82, 0x87, 0x53, 0x92, 0xc2, 0x92, 0x65, 0x72, 0xd2, 0xf5, 0xa8, 0x73, 0x7e, 0x3f, 0xc6, 0xc3, 0x09, 0x49, 0x61, 0xc9, 0x32, 0x39, 0xe9, 0x78, 0xd4, 0x26, 0xac,
0x4d, 0x58, 0x75, 0x3e, 0xb6, 0xb4, 0x13, 0x51, 0x71, 0x42, 0x42, 0xff, 0xa3, 0x06, 0x4b, 0x89, 0x32, 0x17, 0x59, 0xda, 0x0e, 0xa9, 0x38, 0x26, 0x81, 0x5e, 0x87, 0x9c, 0xdc, 0xf9, 0x4a, 0x41,
0x78, 0xcb, 0xbd, 0xbd, 0x06, 0x0b, 0xdd, 0x44, 0x66, 0xab, 0xd8, 0x47, 0xd6, 0x93, 0x59, 0x8f, 0x9a, 0x28, 0x2b, 0x13, 0x39, 0x79, 0x2c, 0xd8, 0xe7, 0xa1, 0x37, 0x61, 0x4e, 0x65, 0x4d, 0xa5,
0x53, 0x92, 0x88, 0x40, 0x89, 0x2a, 0xa4, 0xf0, 0x04, 0x6f, 0x66, 0x4e, 0x8c, 0xd0, 0x87, 0xd8, 0x28, 0xc5, 0x16, 0x95, 0xd8, 0x5c, 0x10, 0xd6, 0x01, 0x5f, 0xff, 0xa3, 0x06, 0x8b, 0xb1, 0xf3,
0x52, 0x82, 0xc8, 0x70, 0x8c, 0xac, 0xff, 0x43, 0x93, 0x49, 0x12, 0x9e, 0x69, 0xb4, 0x91, 0xa8, 0x93, 0xb1, 0x72, 0x0d, 0xe6, 0x3b, 0xb1, 0x4c, 0x51, 0x67, 0x19, 0xae, 0x26, 0x9e, 0x45, 0x38,
0x1b, 0x9a, 0x5c, 0xf2, 0xc2, 0x84, 0x33, 0x7f, 0xc9, 0x61, 0xcb, 0xfd, 0x4f, 0x1c, 0xb6, 0xeb, 0x21, 0x89, 0x08, 0x14, 0xa9, 0x42, 0x0a, 0x2a, 0xc2, 0x46, 0xea, 0x40, 0x0b, 0x7c, 0x88, 0x2c,
0xc5, 0xdf, 0x7e, 0x50, 0x9f, 0x79, 0xff, 0xef, 0xeb, 0x33, 0xfa, 0x27, 0x39, 0xa8, 0xb4, 0x88, 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x19, 0x74, 0x41, 0x8d, 0x40, 0xeb, 0xb1, 0x3a,
0x43, 0x38, 0xb9, 0xeb, 0x73, 0xb9, 0x82, 0x9b, 0x80, 0xba, 0xd4, 0xb4, 0x48, 0x9b, 0x50, 0xdb, 0xa4, 0xc9, 0x2d, 0x9c, 0x9f, 0x50, 0x43, 0x2e, 0x49, 0xde, 0xcc, 0xff, 0x45, 0xf2, 0x5e, 0x2f,
0xeb, 0x1c, 0x10, 0xcb, 0x73, 0x3b, 0x4c, 0x6e, 0xd1, 0xac, 0xf1, 0x7f, 0xc3, 0x41, 0x1d, 0xdd, 0xfc, 0xe6, 0xc3, 0xda, 0xcc, 0x07, 0xff, 0x58, 0x9b, 0xd1, 0x3f, 0xc9, 0x40, 0xb9, 0x49, 0x1c,
0x1a, 0xe3, 0xe2, 0x0b, 0x34, 0x90, 0x03, 0x15, 0x9f, 0xca, 0xdf, 0x36, 0x57, 0x05, 0x57, 0x24, 0xc2, 0xc9, 0xdd, 0x1e, 0x97, 0x2b, 0xb8, 0x09, 0xa8, 0x43, 0x4d, 0x8b, 0xb4, 0x08, 0xb5, 0xbd,
0xfa, 0x9b, 0xd9, 0xd6, 0xde, 0x4e, 0xaa, 0x1a, 0x2b, 0xc3, 0x41, 0xbd, 0x92, 0x22, 0xe1, 0x34, 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x11, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b,
0x38, 0xfa, 0x3e, 0x2c, 0x7b, 0xd4, 0x3f, 0x36, 0xdd, 0x16, 0xf1, 0x89, 0xdb, 0x21, 0x2e, 0x67, 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0x22, 0x71,
0xf2, 0xf0, 0x15, 0x8d, 0x55, 0x51, 0x26, 0xef, 0x8e, 0xf0, 0xf0, 0x98, 0x34, 0x7a, 0x00, 0x2b, 0xde, 0x4e, 0xb7, 0xf6, 0x56, 0x5c, 0xd5, 0x58, 0x1e, 0x0e, 0x6a, 0xe5, 0x04, 0x09, 0x27, 0xc1,
0x3e, 0xf5, 0x7c, 0xb3, 0x6b, 0x0a, 0xc4, 0xb6, 0xe7, 0xd8, 0xd6, 0x99, 0x3c, 0x9c, 0x25, 0xe3, 0xd1, 0x77, 0x61, 0xc9, 0xa3, 0xbd, 0x23, 0xd3, 0x6d, 0x92, 0x1e, 0x71, 0xdb, 0xc4, 0xe5, 0x4c,
0xea, 0x70, 0x50, 0x5f, 0x69, 0x8f, 0x32, 0xcf, 0x07, 0xf5, 0x97, 0x64, 0xe8, 0x04, 0x25, 0x66, 0x26, 0x73, 0xc1, 0x58, 0x11, 0x65, 0xf7, 0xee, 0x08, 0x0f, 0x8f, 0x49, 0xa3, 0x07, 0xb0, 0xdc,
0xe2, 0x71, 0x18, 0x7d, 0x0f, 0x8a, 0xad, 0x3e, 0x95, 0x14, 0xf4, 0x5d, 0x28, 0x76, 0xd4, 0x6f, 0xa3, 0x5e, 0xcf, 0xec, 0x98, 0x02, 0xb1, 0xe5, 0x39, 0xb6, 0x75, 0x26, 0x93, 0xbd, 0x68, 0x5c,
0x15, 0xd5, 0x57, 0xc2, 0x1e, 0x12, 0xca, 0x9c, 0x0f, 0xea, 0x15, 0xd1, 0x2a, 0x1b, 0x21, 0x01, 0x1d, 0x0e, 0x6a, 0xcb, 0xad, 0x51, 0xe6, 0xf9, 0xa0, 0xf6, 0x8a, 0xdc, 0x3a, 0x41, 0x89, 0x98,
0x47, 0x2a, 0xfa, 0x43, 0xa8, 0xec, 0x3e, 0xf6, 0x3d, 0xca, 0xc3, 0xfd, 0x7a, 0x0d, 0x0a, 0x44, 0x78, 0x1c, 0x46, 0xdf, 0x85, 0x42, 0xb3, 0x4f, 0x25, 0x05, 0x7d, 0x1b, 0x0a, 0x6d, 0xf5, 0x5b,
0x12, 0x24, 0x5a, 0x31, 0x2e, 0x7c, 0x81, 0x18, 0x56, 0x5c, 0xf4, 0x2a, 0xe4, 0xc9, 0x63, 0xd3, 0xed, 0xea, 0x6b, 0xc1, 0x9d, 0x14, 0xc8, 0x9c, 0x0f, 0x6a, 0x65, 0x71, 0xf5, 0xd6, 0x03, 0x02,
0xe2, 0xaa, 0x82, 0x55, 0x94, 0x58, 0x7e, 0x57, 0x10, 0x71, 0xc0, 0xd3, 0x9f, 0x6a, 0x00, 0xb7, 0x0e, 0x55, 0xf4, 0x87, 0x50, 0xde, 0x39, 0xed, 0x79, 0x94, 0x07, 0xe7, 0xf5, 0x06, 0xe4, 0x89,
0x48, 0x84, 0xbd, 0x0d, 0x4b, 0xe1, 0xa1, 0x48, 0x9f, 0xd5, 0xff, 0x57, 0xda, 0x4b, 0x38, 0xcd, 0x24, 0x48, 0xb4, 0x42, 0x54, 0x48, 0x7d, 0x31, 0xac, 0xb8, 0x22, 0xb1, 0xc9, 0xa9, 0x69, 0x71,
0xc6, 0xa3, 0xf2, 0xa8, 0x0d, 0xab, 0xb6, 0x6b, 0x39, 0xfd, 0x0e, 0xb9, 0xe7, 0xda, 0xae, 0xcd, 0x55, 0x11, 0xc3, 0xc4, 0xde, 0x11, 0x44, 0xec, 0xf3, 0xf4, 0x27, 0x1a, 0xc0, 0x2d, 0x12, 0x62,
0x6d, 0xd3, 0xb1, 0x7f, 0x16, 0xd5, 0xd1, 0x2f, 0x29, 0x9c, 0xd5, 0xbd, 0x0b, 0x64, 0xf0, 0x85, 0x6f, 0xc1, 0x62, 0x90, 0x14, 0xc9, 0x5c, 0xfd, 0xbc, 0xd2, 0x5e, 0xc4, 0x49, 0x36, 0x1e, 0x95,
0x9a, 0xfa, 0x43, 0x28, 0xc9, 0x0a, 0x21, 0x8a, 0xa9, 0x58, 0x95, 0x2c, 0x10, 0xca, 0xaf, 0x68, 0x47, 0x2d, 0x58, 0xb1, 0x5d, 0xcb, 0xe9, 0xb7, 0xc9, 0x3d, 0xd7, 0x76, 0x6d, 0x6e, 0x9b, 0x8e,
0x55, 0x52, 0x02, 0x07, 0xbc, 0xa8, 0x1a, 0xe7, 0x26, 0x55, 0xe3, 0xc4, 0x81, 0x70, 0xa0, 0x12, 0xfd, 0x93, 0xb0, 0x2e, 0x7f, 0x41, 0xe1, 0xac, 0xec, 0x5e, 0x20, 0x83, 0x2f, 0xd4, 0xd4, 0x1f,
0xe8, 0x86, 0x0d, 0x22, 0x93, 0x85, 0xab, 0x50, 0x0c, 0x17, 0xae, 0xac, 0x44, 0x83, 0x41, 0x08, 0x42, 0x51, 0x56, 0x08, 0x51, 0x9c, 0xa3, 0x72, 0xa5, 0xbd, 0xa0, 0x5c, 0x05, 0xd5, 0x3d, 0x33,
0x84, 0x23, 0x89, 0x84, 0xb5, 0x63, 0x48, 0x55, 0xbb, 0x6c, 0xc6, 0x5e, 0x87, 0x79, 0x55, 0x6f, 0xa9, 0xba, 0xc7, 0x12, 0xc2, 0x81, 0xb2, 0xaf, 0x1b, 0x5c, 0x38, 0xa9, 0x2c, 0x5c, 0x85, 0x42,
0x94, 0xad, 0x25, 0x25, 0x36, 0x1f, 0xee, 0x42, 0xc8, 0x4f, 0x58, 0xfa, 0x39, 0x54, 0x27, 0x4d, 0xb0, 0x70, 0x65, 0x25, 0x6c, 0x34, 0x02, 0x20, 0x1c, 0x4a, 0xc4, 0xac, 0x1d, 0x41, 0xa2, 0xda,
0x13, 0x2f, 0x50, 0x8f, 0xb3, 0xbb, 0xa2, 0xff, 0x46, 0x83, 0xe5, 0x24, 0x52, 0xf6, 0xed, 0xcb, 0xa5, 0x33, 0x16, 0xab, 0xbe, 0x99, 0x17, 0x57, 0xdf, 0x98, 0xa5, 0x9f, 0x41, 0x65, 0x52, 0x77,
0x6e, 0xe4, 0xf2, 0xbe, 0x9b, 0x88, 0xc8, 0xef, 0x34, 0x58, 0x4d, 0x2d, 0x6d, 0xaa, 0x1d, 0x9f, 0xf2, 0x12, 0xf5, 0x38, 0xbd, 0x2b, 0xfa, 0xaf, 0x35, 0x58, 0x8a, 0x23, 0xa5, 0x3f, 0xbe, 0xf4,
0xc2, 0xa9, 0x64, 0x72, 0xcc, 0x4e, 0x91, 0x1c, 0x4d, 0x28, 0xef, 0x45, 0x79, 0x4f, 0x2f, 0x9f, 0x46, 0x2e, 0xbf, 0xc7, 0x63, 0x3b, 0xf2, 0x5b, 0x0d, 0x56, 0x12, 0x4b, 0x9b, 0xea, 0xc4, 0xa7,
0x54, 0xf4, 0x3f, 0x6b, 0xb0, 0x90, 0xd0, 0x60, 0xe8, 0x21, 0xcc, 0x8b, 0xfa, 0x66, 0xbb, 0x5d, 0x70, 0x2a, 0x1e, 0x1c, 0xd9, 0x29, 0x82, 0xa3, 0x01, 0xa5, 0xdd, 0x30, 0xee, 0xe9, 0xe5, 0x9d,
0x35, 0x45, 0x65, 0x6c, 0x96, 0x09, 0x90, 0x78, 0x5d, 0xed, 0x00, 0x09, 0x87, 0x90, 0xa8, 0x0d, 0x8f, 0xfe, 0x17, 0x0d, 0xe6, 0x63, 0x1a, 0x0c, 0x3d, 0x84, 0x39, 0x51, 0xdf, 0x6c, 0xb7, 0xa3,
0x05, 0x4a, 0x58, 0xdf, 0xe1, 0xaa, 0xb4, 0x5f, 0xcd, 0xd8, 0xd6, 0xb8, 0xc9, 0xfb, 0xcc, 0x00, 0xba, 0xb2, 0x94, 0x97, 0x65, 0x0c, 0x24, 0x5a, 0x57, 0xcb, 0x47, 0xc2, 0x01, 0x24, 0x6a, 0x41,
0x51, 0xa3, 0xb0, 0xd4, 0xc7, 0x0a, 0x47, 0xff, 0x5b, 0x0e, 0x2a, 0xb7, 0xcd, 0x43, 0xe2, 0x1c, 0x9e, 0x12, 0xd6, 0x77, 0xb8, 0x2a, 0xed, 0x57, 0x53, 0x5e, 0x6b, 0xdc, 0xe4, 0x7d, 0x66, 0x80,
0x10, 0x87, 0x58, 0xdc, 0xa3, 0xe8, 0x3d, 0x28, 0xf7, 0x4c, 0x6e, 0x1d, 0x4b, 0x6a, 0x38, 0x0b, 0xa8, 0x51, 0x58, 0xea, 0x63, 0x85, 0xa3, 0xff, 0x3d, 0x03, 0xe5, 0xdb, 0xe6, 0x01, 0x71, 0xf6,
0xb6, 0xb2, 0x19, 0x4a, 0x21, 0x35, 0xf6, 0x63, 0x98, 0x5d, 0x97, 0xd3, 0x33, 0xe3, 0x25, 0xb5, 0x89, 0x43, 0x2c, 0xee, 0x51, 0xf4, 0x53, 0x28, 0x75, 0x4d, 0x6e, 0x1d, 0x49, 0x6a, 0xd0, 0x5b,
0xb0, 0x72, 0x82, 0x83, 0x93, 0xd6, 0xe4, 0x00, 0x2f, 0xbf, 0x77, 0x1f, 0xfb, 0xa2, 0x89, 0x4e, 0x36, 0xd3, 0x19, 0x4a, 0x20, 0xd5, 0xf7, 0x22, 0x98, 0x1d, 0x97, 0xd3, 0x33, 0xe3, 0x15, 0xb5,
0x7f, 0x6f, 0x48, 0xb9, 0x80, 0xc9, 0xbb, 0x7d, 0x9b, 0x92, 0x1e, 0x71, 0x79, 0x3c, 0xc0, 0xef, 0xb0, 0x52, 0x8c, 0x83, 0xe3, 0xd6, 0xe4, 0x83, 0x40, 0x7e, 0xef, 0x9c, 0xf6, 0xc4, 0x25, 0x3a,
0x8f, 0xe0, 0xe3, 0x31, 0x8b, 0x6b, 0x37, 0x60, 0x79, 0xd4, 0x79, 0xb4, 0x0c, 0xb3, 0x27, 0xe4, 0xfd, 0x3b, 0x24, 0xe1, 0x02, 0x26, 0xef, 0xf7, 0x6d, 0x4a, 0xba, 0xc4, 0xe5, 0xd1, 0x83, 0x60,
0x2c, 0xc8, 0x05, 0x2c, 0x7e, 0xa2, 0x55, 0xc8, 0x9f, 0x9a, 0x4e, 0x5f, 0xd5, 0x1f, 0x1c, 0x7c, 0x6f, 0x04, 0x1f, 0x8f, 0x59, 0x5c, 0xbd, 0x01, 0x4b, 0xa3, 0xce, 0xa3, 0x25, 0xc8, 0x1e, 0x93,
0x5c, 0xcf, 0x5d, 0xd3, 0xf4, 0x3f, 0x68, 0x50, 0x9d, 0xe4, 0x08, 0xfa, 0x72, 0x02, 0xc8, 0x28, 0x33, 0x3f, 0x16, 0xb0, 0xf8, 0x89, 0x56, 0x20, 0x77, 0x62, 0x3a, 0x7d, 0x55, 0x7f, 0xb0, 0xff,
0x2b, 0xaf, 0x66, 0xdf, 0x26, 0x67, 0x01, 0xea, 0x2e, 0x14, 0x3d, 0x5f, 0x5c, 0xb9, 0x3c, 0xaa, 0x71, 0x3d, 0x73, 0x4d, 0xd3, 0x7f, 0xaf, 0x41, 0x65, 0x92, 0x23, 0xe8, 0x8b, 0x31, 0x20, 0xa3,
0xf2, 0xfc, 0xf5, 0x30, 0x77, 0xef, 0x2a, 0xfa, 0xf9, 0xa0, 0x7e, 0x25, 0x05, 0x1f, 0x32, 0x70, 0xa4, 0xbc, 0xca, 0xbe, 0x4b, 0xce, 0x7c, 0xd4, 0x1d, 0x28, 0x78, 0x3d, 0xf1, 0x84, 0xf3, 0xa8,
0xa4, 0x8a, 0x74, 0x28, 0x48, 0x7f, 0x44, 0x53, 0x16, 0xe3, 0x93, 0xdc, 0xfc, 0xfb, 0x92, 0x82, 0x8a, 0xf3, 0x37, 0x83, 0xd8, 0xbd, 0xab, 0xe8, 0xe7, 0x83, 0xda, 0x95, 0x04, 0x7c, 0xc0, 0xc0,
0x15, 0x47, 0xff, 0x93, 0x06, 0x73, 0x72, 0x3c, 0x7c, 0x08, 0x45, 0x11, 0xbf, 0x8e, 0xc9, 0x4d, 0xa1, 0x2a, 0xd2, 0x21, 0x2f, 0xfd, 0x11, 0x97, 0xb2, 0x68, 0x9f, 0xe4, 0xe1, 0xdf, 0x97, 0x14,
0xe9, 0x57, 0xe6, 0xe1, 0x5f, 0x68, 0xef, 0x13, 0x6e, 0xc6, 0xe7, 0x2b, 0xa4, 0xe0, 0x08, 0x11, 0xac, 0x38, 0xfa, 0x9f, 0x34, 0x98, 0x95, 0xed, 0xe1, 0x43, 0x28, 0x88, 0xfd, 0x6b, 0x9b, 0xdc,
0x61, 0xc8, 0xdb, 0x9c, 0xf4, 0xc2, 0x8d, 0x7c, 0x63, 0x22, 0xb4, 0xba, 0xaf, 0x36, 0xb0, 0xf9, 0x94, 0x7e, 0xa5, 0x7e, 0x4c, 0x08, 0xed, 0x3d, 0xc2, 0xcd, 0x28, 0xbf, 0x02, 0x0a, 0x0e, 0x11,
0x68, 0xf7, 0x31, 0x27, 0xae, 0xd8, 0x8c, 0xb8, 0x18, 0xec, 0x09, 0x0c, 0x1c, 0x40, 0xe9, 0xef, 0x11, 0x86, 0x9c, 0xcd, 0x49, 0x37, 0x38, 0xc8, 0xb7, 0x26, 0x42, 0xab, 0xf7, 0x6f, 0x1d, 0x9b,
0x41, 0x64, 0x49, 0x9c, 0x76, 0x46, 0x9c, 0xa3, 0xdb, 0xb6, 0x7b, 0xa2, 0xa2, 0x1a, 0x79, 0x73, 0x8f, 0x76, 0x4e, 0x39, 0x71, 0xc5, 0x61, 0x44, 0xc5, 0x60, 0x57, 0x60, 0x60, 0x1f, 0x4a, 0xff,
0xa0, 0xe8, 0x38, 0x92, 0xb8, 0xa8, 0xc3, 0xe6, 0xa6, 0xeb, 0xb0, 0xfa, 0xbf, 0x72, 0x50, 0x16, 0x83, 0x06, 0xa1, 0x29, 0x91, 0xee, 0x8c, 0x38, 0x87, 0xb7, 0x6d, 0xf7, 0x58, 0x6d, 0x6b, 0xe8,
0xd6, 0xc3, 0xa6, 0xfd, 0x6d, 0xa8, 0x38, 0xc9, 0xed, 0x50, 0x5e, 0x5c, 0x51, 0x80, 0xe9, 0x03, 0xce, 0xbe, 0xa2, 0xe3, 0x50, 0xe2, 0xa2, 0x2b, 0x36, 0x33, 0xe5, 0x15, 0x7b, 0x15, 0x0a, 0x96,
0x86, 0xd3, 0xb2, 0x42, 0xf9, 0xc8, 0x26, 0x4e, 0x27, 0x52, 0xce, 0xa5, 0x95, 0x6f, 0x26, 0x99, 0xe7, 0x72, 0xdb, 0xed, 0x8f, 0xd5, 0x97, 0x6d, 0x45, 0xc7, 0xa1, 0x84, 0xfe, 0x2c, 0x0b, 0x25,
0x38, 0x2d, 0x2b, 0x0a, 0xe7, 0x23, 0x91, 0xa8, 0x6a, 0xf2, 0x8a, 0x62, 0xf5, 0x03, 0x41, 0xc4, 0xe1, 0x6b, 0x70, 0xc7, 0x7f, 0x13, 0xca, 0x4e, 0xfc, 0xf4, 0x94, 0xcf, 0x57, 0x14, 0x44, 0x32,
0x01, 0xef, 0xa2, 0x15, 0xcf, 0x4d, 0x39, 0x53, 0x5c, 0x87, 0x45, 0xb1, 0x33, 0x5e, 0x9f, 0x87, 0x1f, 0x71, 0x52, 0x56, 0x28, 0x1f, 0xda, 0xc4, 0x69, 0x87, 0xca, 0x99, 0xa4, 0xf2, 0xcd, 0x38,
0xe3, 0x69, 0x5e, 0x0e, 0x52, 0x68, 0x38, 0xa8, 0x2f, 0xbe, 0x93, 0xe2, 0xe0, 0x11, 0xc9, 0x89, 0x13, 0x27, 0x65, 0x45, 0x9d, 0x7d, 0x24, 0xe2, 0x5a, 0x35, 0x6a, 0xe1, 0xd6, 0x7e, 0x4f, 0x10,
0xf3, 0x48, 0xe1, 0x73, 0xcf, 0x23, 0xef, 0x42, 0x69, 0xdf, 0xb6, 0xa8, 0x27, 0x0c, 0x8b, 0xb6, 0xb1, 0xcf, 0xbb, 0x68, 0x7f, 0x66, 0xa7, 0xdc, 0x9f, 0xeb, 0xb0, 0x20, 0x0e, 0xd2, 0xeb, 0xf3,
0xc0, 0x52, 0x23, 0x73, 0x54, 0x3e, 0x43, 0x87, 0x42, 0xbe, 0x88, 0x96, 0x6b, 0xba, 0x5e, 0x30, 0xa0, 0x9b, 0xcd, 0xc9, 0xbe, 0x0b, 0x0d, 0x07, 0xb5, 0x85, 0xf7, 0x12, 0x1c, 0x3c, 0x22, 0x39,
0x18, 0xe7, 0xe3, 0x68, 0xdd, 0x11, 0x44, 0x1c, 0xf0, 0xae, 0xaf, 0x8a, 0x6e, 0xf0, 0xab, 0xa7, 0xb1, 0x7d, 0xc9, 0x7f, 0xda, 0xf6, 0x45, 0xac, 0xda, 0xb1, 0xbb, 0x36, 0xaf, 0xcc, 0x49, 0x27,
0xf5, 0x99, 0x27, 0x4f, 0xeb, 0x33, 0x1f, 0x3c, 0x55, 0x9d, 0xe1, 0x53, 0x00, 0xb8, 0x7b, 0xf8, 0xc2, 0x55, 0xdf, 0x16, 0x44, 0xec, 0xf3, 0x12, 0x47, 0x5a, 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2,
0x53, 0x62, 0x05, 0x29, 0x77, 0xf9, 0x1d, 0x56, 0x74, 0x78, 0xf5, 0x74, 0x22, 0xef, 0x7b, 0xb9, 0x9e, 0x6d, 0x51, 0x4f, 0xac, 0x45, 0x5c, 0x4c, 0x2c, 0xd1, 0xb4, 0x87, 0x05, 0x3c, 0x58, 0x63,
0x91, 0x0e, 0x9f, 0xe0, 0xe1, 0x94, 0x24, 0x6a, 0x42, 0x29, 0xba, 0xd7, 0xaa, 0xee, 0xb5, 0xa2, 0xc0, 0x17, 0xae, 0xb8, 0xa6, 0xeb, 0xf9, 0xad, 0x79, 0x2e, 0x72, 0xe5, 0x8e, 0x20, 0x62, 0x9f,
0xd4, 0x4a, 0xd1, 0xe5, 0x17, 0xc7, 0x32, 0xa9, 0xfc, 0x9f, 0xbb, 0x34, 0xff, 0x0d, 0x98, 0xed, 0x77, 0x7d, 0x45, 0xdc, 0x47, 0xbf, 0x7c, 0x52, 0x9b, 0x79, 0xfc, 0xa4, 0x36, 0xf3, 0xe1, 0x13,
0xdb, 0x1d, 0xb9, 0x7f, 0x25, 0xe3, 0xeb, 0x61, 0xf9, 0xb9, 0xb7, 0xd7, 0x3a, 0x1f, 0xd4, 0x5f, 0x75, 0x37, 0xfd, 0x0b, 0x00, 0xee, 0x1e, 0xfc, 0x98, 0x58, 0x7e, 0xcc, 0x5f, 0xfe, 0x2a, 0x17,
0x99, 0xf4, 0x22, 0xc4, 0xcf, 0x7c, 0xc2, 0x1a, 0xf7, 0xf6, 0x5a, 0x58, 0x28, 0x5f, 0x94, 0x51, 0x3d, 0x86, 0x1a, 0x06, 0xc9, 0x17, 0x6c, 0x66, 0xa4, 0xc7, 0x88, 0xf1, 0x70, 0x42, 0x12, 0x35,
0x85, 0x29, 0x33, 0x6a, 0x0b, 0x40, 0xad, 0x5a, 0x68, 0xcf, 0x07, 0xd9, 0x14, 0xde, 0xf1, 0x6f, 0xa0, 0x18, 0xbe, 0xd4, 0x55, 0x7c, 0x2f, 0x2b, 0xb5, 0x62, 0xf8, 0x9c, 0xc7, 0x91, 0x4c, 0x22,
0x45, 0x1c, 0x9c, 0x90, 0x42, 0x0c, 0x56, 0x2c, 0x4a, 0xe4, 0x6f, 0xb1, 0xf5, 0x8c, 0x9b, 0x3d, 0x01, 0x67, 0x2f, 0x4d, 0x40, 0x03, 0xb2, 0x7d, 0xbb, 0x2d, 0x43, 0xa2, 0x68, 0x7c, 0x35, 0x28,
0xbf, 0x5a, 0x94, 0xf5, 0xea, 0xab, 0xd9, 0xea, 0x95, 0x50, 0x33, 0x5e, 0x56, 0x66, 0x56, 0x76, 0x80, 0xf7, 0x76, 0x9b, 0xe7, 0x83, 0xda, 0x6b, 0x93, 0x66, 0x5c, 0xfc, 0xac, 0x47, 0x58, 0xfd,
0x46, 0xc1, 0xf0, 0x38, 0x3e, 0xf2, 0x60, 0xa5, 0xa3, 0xee, 0x1c, 0xb1, 0xd1, 0xd2, 0xd4, 0x46, 0xde, 0x6e, 0x13, 0x0b, 0xe5, 0x8b, 0x82, 0x34, 0x3f, 0x65, 0x90, 0x6e, 0x02, 0xa8, 0x55, 0x0b,
0xaf, 0x08, 0x83, 0xad, 0x51, 0x20, 0x3c, 0x8e, 0x8d, 0x7e, 0x0c, 0x6b, 0x21, 0x71, 0xfc, 0xe2, 0x6d, 0x3f, 0x36, 0xc2, 0xa9, 0xc5, 0xad, 0x90, 0x83, 0x63, 0x52, 0x88, 0xc1, 0xb2, 0x45, 0x89,
0x57, 0x05, 0x19, 0xa9, 0x9a, 0xb8, 0x8a, 0xb6, 0x26, 0x4a, 0xe1, 0xcf, 0x40, 0x40, 0x1d, 0x28, 0xfc, 0x2d, 0x8e, 0x9e, 0x71, 0xb3, 0xeb, 0xbf, 0xdb, 0x4b, 0x9b, 0x5f, 0x4e, 0x57, 0x31, 0x85,
0x38, 0x41, 0x6f, 0x2f, 0xcb, 0x7a, 0xfc, 0x9d, 0x6c, 0xab, 0x88, 0xb3, 0xbf, 0x91, 0xec, 0xe9, 0x9a, 0xf1, 0xaa, 0x32, 0xb3, 0xbc, 0x3d, 0x0a, 0x86, 0xc7, 0xf1, 0x91, 0x07, 0xcb, 0x6d, 0xf5,
0xd1, 0xe5, 0x47, 0xb5, 0x73, 0x85, 0x8d, 0x1e, 0x43, 0xd9, 0x74, 0x5d, 0x8f, 0x9b, 0xc1, 0x55, 0xea, 0x89, 0x8c, 0x16, 0xa7, 0x36, 0x7a, 0x45, 0x18, 0x6c, 0x8e, 0x02, 0xe1, 0x71, 0x6c, 0xf4,
0x74, 0x41, 0x9a, 0xda, 0x9e, 0xda, 0xd4, 0x76, 0x8c, 0x31, 0x32, 0x43, 0x24, 0x38, 0x38, 0x69, 0x43, 0x58, 0x0d, 0x88, 0xe3, 0x4f, 0xcf, 0x0a, 0xc8, 0x9d, 0xaa, 0x8a, 0xc7, 0x70, 0x73, 0xa2,
0x0a, 0x3d, 0x82, 0x25, 0xef, 0x91, 0x4b, 0x28, 0x26, 0x47, 0x84, 0x12, 0xd7, 0x22, 0xac, 0x5a, 0x14, 0x7e, 0x01, 0x02, 0x6a, 0x43, 0xde, 0xf1, 0xbb, 0x8b, 0x92, 0xbc, 0x11, 0xbe, 0x95, 0x6e,
0x91, 0xd6, 0xbf, 0x91, 0xd1, 0x7a, 0x4a, 0x39, 0x4e, 0xe9, 0x34, 0x9d, 0xe1, 0x51, 0x2b, 0xa8, 0x15, 0x51, 0xf4, 0xd7, 0xe3, 0x5d, 0x45, 0xf8, 0xfc, 0x52, 0x0d, 0x85, 0xc2, 0x46, 0xa7, 0x50,
0x01, 0x70, 0x64, 0xbb, 0x6a, 0x12, 0xac, 0x2e, 0xc6, 0x0f, 0x35, 0x37, 0x23, 0x2a, 0x4e, 0x48, 0x32, 0x5d, 0xd7, 0xe3, 0xa6, 0xff, 0x18, 0x9e, 0x97, 0xa6, 0xb6, 0xa6, 0x36, 0xb5, 0x15, 0x61,
0xa0, 0x6f, 0x42, 0xd9, 0x72, 0xfa, 0x8c, 0x93, 0xe0, 0x45, 0x68, 0x49, 0x9e, 0xa0, 0x68, 0x7d, 0x8c, 0x74, 0x31, 0x31, 0x0e, 0x8e, 0x9b, 0x42, 0x8f, 0x60, 0xd1, 0x7b, 0xe4, 0x12, 0x8a, 0xc9,
0x3b, 0x31, 0x0b, 0x27, 0xe5, 0xd0, 0x31, 0x2c, 0xd8, 0x89, 0x91, 0xb3, 0xba, 0x2c, 0x73, 0x71, 0x21, 0xa1, 0xc4, 0xb5, 0x08, 0xab, 0x94, 0xa5, 0xf5, 0xaf, 0xa5, 0xb4, 0x9e, 0x50, 0x8e, 0x42,
0x6b, 0xea, 0x39, 0x93, 0x19, 0xcb, 0xa2, 0x12, 0x25, 0x29, 0x38, 0x85, 0xbc, 0xf6, 0x2d, 0x28, 0x3a, 0x49, 0x67, 0x78, 0xd4, 0x0a, 0xaa, 0x03, 0x1c, 0xda, 0xae, 0xea, 0x45, 0x2b, 0x0b, 0xd1,
0x7f, 0xce, 0x09, 0x48, 0x4c, 0x50, 0xa3, 0x5b, 0x37, 0xd5, 0x04, 0xf5, 0x97, 0x1c, 0x2c, 0xa6, 0xe8, 0xe9, 0x66, 0x48, 0xc5, 0x31, 0x09, 0xf4, 0x75, 0x28, 0x59, 0x4e, 0x9f, 0x71, 0xe2, 0xcf,
0x03, 0x1e, 0xdd, 0x34, 0xb4, 0x89, 0x2f, 0x7c, 0x61, 0x55, 0x9e, 0x9d, 0x58, 0x95, 0x55, 0xf1, 0xb8, 0x16, 0x65, 0x06, 0x85, 0xeb, 0xdb, 0x8e, 0x58, 0x38, 0x2e, 0x87, 0x8e, 0x60, 0xde, 0x8e,
0x9b, 0x7b, 0x91, 0xe2, 0xb7, 0x05, 0x60, 0xfa, 0x76, 0x58, 0xf7, 0x82, 0x3a, 0x1a, 0x55, 0xae, 0x35, 0xbd, 0x95, 0x25, 0x19, 0x8b, 0x9b, 0x53, 0x77, 0xba, 0xcc, 0x58, 0x12, 0x95, 0x28, 0x4e,
0xf8, 0x0d, 0x0b, 0x27, 0xa4, 0xe4, 0x1b, 0x9e, 0xe7, 0x72, 0xea, 0x39, 0x0e, 0xa1, 0xaa, 0xf3, 0xc1, 0x09, 0xe4, 0xd5, 0x6f, 0x40, 0xe9, 0x53, 0xf6, 0x60, 0xa2, 0x87, 0x1b, 0x3d, 0xba, 0xa9,
0x05, 0x6f, 0x78, 0x11, 0x15, 0x27, 0x24, 0xd0, 0x4d, 0x40, 0x87, 0x8e, 0x67, 0x9d, 0xc8, 0x10, 0x7a, 0xb8, 0xbf, 0x66, 0x60, 0x21, 0xb9, 0xe1, 0xe1, 0x5b, 0x47, 0x9b, 0x38, 0xb3, 0x0c, 0xaa,
0x84, 0xe7, 0x5c, 0x56, 0xc9, 0x62, 0xf0, 0x24, 0x64, 0x8c, 0x71, 0xf1, 0x05, 0x1a, 0xfa, 0x5d, 0x72, 0x76, 0x62, 0x55, 0x56, 0xc5, 0x6f, 0xf6, 0x65, 0x8a, 0xdf, 0x26, 0x80, 0xd9, 0xb3, 0x83,
0x48, 0x3f, 0xe2, 0xa0, 0x1b, 0x41, 0x00, 0xb4, 0xe8, 0x95, 0x65, 0xba, 0xc5, 0xeb, 0x57, 0xa1, 0xba, 0xe7, 0xd7, 0xd1, 0xb0, 0x72, 0x45, 0x53, 0x34, 0x1c, 0x93, 0x92, 0x53, 0x49, 0xcf, 0xe5,
0x84, 0x3d, 0x8f, 0xb7, 0x4d, 0x7e, 0xcc, 0x50, 0x1d, 0xf2, 0xbe, 0xf8, 0xa1, 0x5e, 0xe8, 0xe4, 0xd4, 0x73, 0x1c, 0x42, 0xd5, 0x65, 0xea, 0x4f, 0x25, 0x43, 0x2a, 0x8e, 0x49, 0xa0, 0x9b, 0x80,
0x23, 0xa9, 0xe4, 0xe0, 0x80, 0xae, 0xff, 0x5a, 0x83, 0x97, 0x27, 0x3e, 0x98, 0x89, 0x40, 0x5a, 0x0e, 0x1c, 0xcf, 0x3a, 0x96, 0x5b, 0x10, 0xe4, 0xb9, 0xac, 0x92, 0x05, 0x7f, 0x28, 0x65, 0x8c,
0xd1, 0x97, 0x72, 0x29, 0x0a, 0x64, 0x2c, 0x87, 0x13, 0x52, 0x62, 0x5a, 0x4a, 0xbd, 0xb2, 0x8d, 0x71, 0xf1, 0x05, 0x1a, 0xfa, 0x5d, 0x48, 0x8e, 0x91, 0xd0, 0x0d, 0x7f, 0x03, 0xb4, 0x70, 0xce,
0x4e, 0x4b, 0x29, 0x6b, 0x38, 0x2d, 0xab, 0xff, 0x33, 0x07, 0x85, 0xe0, 0x2e, 0xf4, 0x1f, 0x9e, 0x33, 0xdd, 0xe2, 0xf5, 0xab, 0x50, 0xc4, 0x9e, 0xc7, 0x5b, 0x26, 0x3f, 0x62, 0xa8, 0x06, 0xb9,
0x78, 0x5f, 0x83, 0x02, 0x93, 0x76, 0x94, 0x7b, 0x51, 0x91, 0x0c, 0xac, 0x63, 0xc5, 0x15, 0xb3, 0x9e, 0xf8, 0xa1, 0x66, 0x84, 0x72, 0xec, 0x2b, 0x39, 0xd8, 0xa7, 0xeb, 0xbf, 0xd2, 0xe0, 0xd5,
0x4b, 0x8f, 0x30, 0x66, 0x76, 0xc3, 0x9c, 0x8d, 0x66, 0x97, 0xfd, 0x80, 0x8c, 0x43, 0x3e, 0x7a, 0x89, 0x23, 0x3b, 0xb1, 0x91, 0x56, 0xf8, 0xa5, 0x5c, 0x0a, 0x37, 0x32, 0x92, 0xc3, 0x31, 0x29,
0x4b, 0x5c, 0xfd, 0x4c, 0x16, 0xcd, 0x6e, 0xb5, 0x10, 0x12, 0x4b, 0xea, 0xf9, 0xa0, 0xbe, 0xa0, 0xd1, 0x80, 0x25, 0xe6, 0x7c, 0xa3, 0x0d, 0x58, 0xc2, 0x1a, 0x4e, 0xca, 0xea, 0xff, 0xce, 0x40,
0xc0, 0xe5, 0x37, 0x56, 0xd2, 0xe8, 0x01, 0xcc, 0x77, 0x08, 0x37, 0x6d, 0x27, 0x18, 0xd9, 0x32, 0xde, 0x7f, 0x8d, 0xfd, 0x97, 0x7b, 0xee, 0x37, 0x20, 0xcf, 0xa4, 0x1d, 0xe5, 0x5e, 0x58, 0x24,
0x3f, 0x07, 0x06, 0x60, 0xad, 0x40, 0xd5, 0x28, 0x0b, 0x9f, 0xd4, 0x07, 0x0e, 0x01, 0xc5, 0x79, 0x7d, 0xeb, 0x58, 0x71, 0x45, 0xef, 0xd2, 0x25, 0x8c, 0x99, 0x9d, 0x20, 0x66, 0xc3, 0xde, 0x65,
0xb3, 0xbc, 0x4e, 0xf0, 0x96, 0x9d, 0x8f, 0xcf, 0xdb, 0x8e, 0xd7, 0x21, 0x58, 0x72, 0xf4, 0x27, 0xcf, 0x27, 0xe3, 0x80, 0x8f, 0xde, 0x11, 0x8f, 0x4f, 0x93, 0x85, 0xed, 0x60, 0x35, 0x80, 0xc4,
0x1a, 0x94, 0x03, 0xa4, 0x1d, 0xb3, 0xcf, 0x08, 0xda, 0x8c, 0x56, 0x11, 0x6c, 0x77, 0xd8, 0x8a, 0x92, 0x7a, 0x3e, 0xa8, 0xcd, 0x2b, 0x70, 0xf9, 0x8d, 0x95, 0x34, 0x7a, 0x00, 0x73, 0x6d, 0xc2,
0xe7, 0xde, 0x39, 0xf3, 0xc9, 0xf9, 0xa0, 0x5e, 0x92, 0x62, 0xe2, 0x23, 0x5a, 0x40, 0x22, 0x46, 0x4d, 0xdb, 0xf1, 0xbb, 0xc0, 0xd4, 0x03, 0x49, 0x1f, 0xac, 0xe9, 0xab, 0x1a, 0x25, 0xe1, 0x93,
0xb9, 0x4b, 0x62, 0xf4, 0x2a, 0xe4, 0xe5, 0x78, 0xac, 0x82, 0x19, 0xcd, 0x77, 0x72, 0x84, 0xc6, 0xfa, 0xc0, 0x01, 0xa0, 0xc8, 0x37, 0xcb, 0x6b, 0xfb, 0xd3, 0xf9, 0x5c, 0x94, 0x6f, 0xdb, 0x5e,
0x01, 0x4f, 0xff, 0x38, 0x07, 0x95, 0xd4, 0xe2, 0x32, 0x0c, 0x73, 0xd1, 0xfb, 0x44, 0x2e, 0xc3, 0x9b, 0x60, 0xc9, 0xd1, 0x1f, 0x6b, 0x50, 0xf2, 0x91, 0xb6, 0xcd, 0x3e, 0x23, 0x68, 0x23, 0x5c,
0x9b, 0xd7, 0xe4, 0xff, 0x40, 0xfc, 0x10, 0x0a, 0x96, 0x58, 0x5f, 0xf8, 0x2f, 0xa0, 0xcd, 0x69, 0x85, 0x7f, 0xdc, 0xc1, 0x55, 0x3c, 0xfb, 0xde, 0x59, 0x8f, 0x9c, 0x0f, 0x6a, 0x45, 0x29, 0x26,
0xb6, 0x42, 0x46, 0x26, 0xce, 0x24, 0xf9, 0xc9, 0xb0, 0x02, 0x44, 0xb7, 0x60, 0x85, 0x12, 0x4e, 0x3e, 0xc2, 0x05, 0xc4, 0xf6, 0x28, 0x73, 0xc9, 0x1e, 0xbd, 0x0e, 0x39, 0xd9, 0x71, 0xab, 0xcd,
0xcf, 0xb6, 0x8f, 0x38, 0xa1, 0xc9, 0x19, 0x3d, 0x1f, 0x8f, 0x3b, 0x78, 0x54, 0x00, 0x8f, 0xeb, 0x0c, 0xfb, 0x3b, 0xd9, 0x95, 0x63, 0x9f, 0xa7, 0x7f, 0x9c, 0x81, 0x72, 0x62, 0x71, 0x29, 0x9a,
0x84, 0x15, 0xb2, 0xf0, 0x02, 0x15, 0x52, 0x77, 0x60, 0xee, 0xbf, 0x38, 0x9a, 0xff, 0x08, 0x4a, 0xb9, 0x70, 0x42, 0x92, 0x49, 0x31, 0x75, 0x9b, 0xfc, 0x3f, 0x95, 0xef, 0x43, 0xde, 0x12, 0xeb,
0xf1, 0xf0, 0xf4, 0x05, 0x9b, 0xd4, 0x7f, 0x02, 0x45, 0x91, 0x8d, 0xe1, 0xd0, 0x7f, 0x49, 0x03, 0x0b, 0xfe, 0xa9, 0xb5, 0x31, 0xcd, 0x51, 0xc8, 0x9d, 0x89, 0x22, 0x49, 0x7e, 0x32, 0xac, 0x00,
0x4a, 0xb7, 0x86, 0x5c, 0x96, 0xd6, 0xa0, 0x6f, 0x41, 0xf0, 0x8f, 0x25, 0x51, 0x4d, 0x83, 0x6b, 0xd1, 0x2d, 0x58, 0xa6, 0x84, 0xd3, 0xb3, 0xad, 0x43, 0x4e, 0x68, 0xbc, 0xed, 0xcf, 0x45, 0xed,
0x72, 0xa2, 0x9a, 0x26, 0xef, 0xbc, 0x89, 0x77, 0xaa, 0x5f, 0x6a, 0x00, 0xf2, 0x8a, 0xb7, 0x7b, 0x0e, 0x1e, 0x15, 0xc0, 0xe3, 0x3a, 0x41, 0x85, 0xcc, 0xbf, 0x44, 0x85, 0xd4, 0x1d, 0x98, 0xfd,
0x4a, 0x5c, 0x2e, 0x1c, 0x13, 0x3b, 0x30, 0xea, 0x98, 0x3c, 0x46, 0x92, 0x83, 0xee, 0x41, 0xc1, 0x1f, 0xb6, 0xe6, 0x3f, 0x80, 0x62, 0xd4, 0x3c, 0x7d, 0xc6, 0x26, 0xf5, 0x1f, 0x41, 0x41, 0x44,
0x93, 0x43, 0x95, 0x7a, 0x38, 0x9a, 0xf2, 0x0e, 0x1e, 0x65, 0x5d, 0x30, 0x99, 0x61, 0x05, 0x66, 0x63, 0xd0, 0xf4, 0x5f, 0x72, 0x01, 0x25, 0xaf, 0x86, 0x4c, 0x9a, 0xab, 0x41, 0xdf, 0x04, 0xff,
0x6c, 0x3c, 0x7b, 0x5e, 0x9b, 0xf9, 0xf0, 0x79, 0x6d, 0xe6, 0xa3, 0xe7, 0xb5, 0x99, 0xf7, 0x87, 0x5f, 0x65, 0xa2, 0x9a, 0xfa, 0x0f, 0xf5, 0x58, 0x35, 0x8d, 0xbf, 0xba, 0x63, 0x93, 0xb2, 0x5f,
0x35, 0xed, 0xd9, 0xb0, 0xa6, 0x7d, 0x38, 0xac, 0x69, 0x1f, 0x0d, 0x6b, 0xda, 0xc7, 0xc3, 0x9a, 0x68, 0x00, 0xf2, 0xd5, 0xb8, 0x73, 0x42, 0x5c, 0x2e, 0x1c, 0x13, 0x27, 0x30, 0xea, 0x98, 0x4c,
0xf6, 0xe4, 0x93, 0xda, 0xcc, 0x83, 0xdc, 0xe9, 0xe6, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x23, 0xc9, 0x41, 0xf7, 0x20, 0xef, 0xc9, 0xa6, 0x4a, 0x8d, 0xae, 0xa6, 0x9c, 0x02, 0x84, 0x51,
0x97, 0x5e, 0x35, 0x79, 0x1f, 0x00, 0x00, 0xe7, 0x77, 0x66, 0x58, 0x81, 0x19, 0xeb, 0x4f, 0x9f, 0x57, 0x67, 0x9e, 0x3d, 0xaf, 0xce, 0x7c,
0xf4, 0xbc, 0x3a, 0xf3, 0xc1, 0xb0, 0xaa, 0x3d, 0x1d, 0x56, 0xb5, 0x67, 0xc3, 0xaa, 0xf6, 0xd1,
0xb0, 0xaa, 0x7d, 0x3c, 0xac, 0x6a, 0x8f, 0x3f, 0xa9, 0xce, 0x3c, 0xc8, 0x9c, 0x6c, 0xfc, 0x27,
0x00, 0x00, 0xff, 0xff, 0x66, 0xe7, 0x2a, 0x84, 0x4b, 0x20, 0x00, 0x00,
} }

View File

@ -72,6 +72,14 @@ message APIResource {
// namespaced indicates if a resource is namespaced or not. // namespaced indicates if a resource is namespaced or not.
optional bool namespaced = 2; optional bool namespaced = 2;
// group is the preferred group of the resource. Empty implies the group of the containing resource list.
// For subresources, this may have a different value, for example: Scale".
optional string group = 8;
// version is the preferred version of the resource. Empty implies the version of the containing resource list
// For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
optional string version = 9;
// kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
optional string kind = 3; optional string kind = 3;
@ -283,7 +291,7 @@ message LabelSelectorRequirement {
optional string key = 1; optional string key = 1;
// operator represents a key's relationship to a set of values. // operator represents a key's relationship to a set of values.
// Valid operators ard In, NotIn, Exists and DoesNotExist. // Valid operators are In, NotIn, Exists and DoesNotExist.
optional string operator = 2; optional string operator = 2;
// values is an array of string values. If the operator is In or NotIn, // values is an array of string values. If the operator is In or NotIn,
@ -308,7 +316,7 @@ message List {
// ListMeta describes metadata that synthetic resources must have, including lists and // ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. // various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
message ListMeta { message ListMeta {
// SelfLink is a URL representing this object. // selfLink is a URL representing this object.
// Populated by the system. // Populated by the system.
// Read-only. // Read-only.
// +optional // +optional
@ -322,6 +330,14 @@ message ListMeta {
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional // +optional
optional string resourceVersion = 2; optional string resourceVersion = 2;
// continue may be set if the user set a limit on the number of items returned, and indicates that
// the server has more data available. The value is opaque and may be used to issue another request
// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
// list may not be possible if the server configuration has changed or more than a few minutes have
// passed. The resourceVersion field returned when using this continue value will be identical to
// the value in the first response.
optional string continue = 3;
} }
// ListOptions is the query options to a standard REST list call. // ListOptions is the query options to a standard REST list call.
@ -357,6 +373,34 @@ message ListOptions {
// Timeout for the list/watch call. // Timeout for the list/watch call.
// +optional // +optional
optional int64 timeoutSeconds = 5; optional int64 timeoutSeconds = 5;
// limit is a maximum number of responses to return for a list call. If more items exist, the
// server will set the `continue` field on the list metadata to a value that can be used with the
// same initial query to retrieve the next set of results. Setting a limit may return fewer than
// the requested amount of items (up to zero items) in the event all requested objects are
// filtered out and clients should only use the presence of the continue field to determine whether
// more results are available. Servers may choose not to support the limit argument and will return
// all of the available results. If limit is specified and the continue field is empty, clients may
// assume that no more results are available. This field is not supported if watch is true.
//
// The server guarantees that the objects returned when using continue will be identical to issuing
// a single list call without a limit - that is, no objects created, modified, or deleted after the
// first request is issued will be included in any subsequent continued requests. This is sometimes
// referred to as a consistent snapshot, and ensures that a client that is using limit to receive
// smaller chunks of a very large result can ensure they see all possible objects. If objects are
// updated during a chunked list the version of the object that was present at the time the first list
// result was calculated is returned.
optional int64 limit = 7;
// The continue option should be set when retrieving more results from the server. Since this value
// is server defined, clients may only use the continue value from a previous query result with
// identical query parameters (except for the value of continue) and the server may reject a continue
// value it does not recognize. If the specified continue value is no longer valid whether due to
// expiration (generally five to fifteen minutes) or a configuration change on the server the server
// will respond with a 410 ResourceExpired error indicating the client must restart their list without
// the continue field. This field is not supported when watch is true. Clients may start a watch from
// the last resourceVersion value returned by the server and not miss any modifications.
optional string continue = 8;
} }
// MicroTime is version of Time with microsecond level precision. // MicroTime is version of Time with microsecond level precision.

View File

@ -90,6 +90,8 @@ type ListInterface interface {
SetResourceVersion(version string) SetResourceVersion(version string)
GetSelfLink() string GetSelfLink() string
SetSelfLink(selfLink string) SetSelfLink(selfLink string)
GetContinue() string
SetContinue(c string)
} }
// Type exposes the type and APIVersion of versioned or internal API objects. // Type exposes the type and APIVersion of versioned or internal API objects.
@ -105,6 +107,8 @@ func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceV
func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version }
func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink }
func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink }
func (meta *ListMeta) GetContinue() string { return meta.Continue }
func (meta *ListMeta) SetContinue(c string) { meta.Continue = c }
func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }

View File

@ -58,7 +58,7 @@ type TypeMeta struct {
// ListMeta describes metadata that synthetic resources must have, including lists and // ListMeta describes metadata that synthetic resources must have, including lists and
// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. // various status objects. A resource may have only one of {ObjectMeta, ListMeta}.
type ListMeta struct { type ListMeta struct {
// SelfLink is a URL representing this object. // selfLink is a URL representing this object.
// Populated by the system. // Populated by the system.
// Read-only. // Read-only.
// +optional // +optional
@ -72,6 +72,14 @@ type ListMeta struct {
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional // +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"`
// continue may be set if the user set a limit on the number of items returned, and indicates that
// the server has more data available. The value is opaque and may be used to issue another request
// to the endpoint that served this list to retrieve the next set of available objects. Continuing a
// list may not be possible if the server configuration has changed or more than a few minutes have
// passed. The resourceVersion field returned when using this continue value will be identical to
// the value in the first response.
Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"`
} }
// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here
@ -335,6 +343,33 @@ type ListOptions struct {
// Timeout for the list/watch call. // Timeout for the list/watch call.
// +optional // +optional
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
// limit is a maximum number of responses to return for a list call. If more items exist, the
// server will set the `continue` field on the list metadata to a value that can be used with the
// same initial query to retrieve the next set of results. Setting a limit may return fewer than
// the requested amount of items (up to zero items) in the event all requested objects are
// filtered out and clients should only use the presence of the continue field to determine whether
// more results are available. Servers may choose not to support the limit argument and will return
// all of the available results. If limit is specified and the continue field is empty, clients may
// assume that no more results are available. This field is not supported if watch is true.
//
// The server guarantees that the objects returned when using continue will be identical to issuing
// a single list call without a limit - that is, no objects created, modified, or deleted after the
// first request is issued will be included in any subsequent continued requests. This is sometimes
// referred to as a consistent snapshot, and ensures that a client that is using limit to receive
// smaller chunks of a very large result can ensure they see all possible objects. If objects are
// updated during a chunked list the version of the object that was present at the time the first list
// result was calculated is returned.
Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"`
// The continue option should be set when retrieving more results from the server. Since this value
// is server defined, clients may only use the continue value from a previous query result with
// identical query parameters (except for the value of continue) and the server may reject a continue
// value it does not recognize. If the specified continue value is no longer valid whether due to
// expiration (generally five to fifteen minutes) or a configuration change on the server the server
// will respond with a 410 ResourceExpired error indicating the client must restart their list without
// the continue field. This field is not supported when watch is true. Clients may start a watch from
// the last resourceVersion value returned by the server and not miss any modifications.
Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"`
} }
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -778,6 +813,12 @@ type APIResource struct {
SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"` SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"`
// namespaced indicates if a resource is namespaced or not. // namespaced indicates if a resource is namespaced or not.
Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"`
// group is the preferred group of the resource. Empty implies the group of the containing resource list.
// For subresources, this may have a different value, for example: Scale".
Group string `json:"group,omitempty" protobuf:"bytes,8,opt,name=group"`
// version is the preferred version of the resource. Empty implies the version of the containing resource list
// For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)".
Version string `json:"version,omitempty" protobuf:"bytes,9,opt,name=version"`
// kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')
Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"`
// verbs is a list of supported kube verbs (this includes get, list, watch, create, // verbs is a list of supported kube verbs (this includes get, list, watch, create,
@ -869,7 +910,7 @@ type LabelSelectorRequirement struct {
// +patchStrategy=merge // +patchStrategy=merge
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
// operator represents a key's relationship to a set of values. // operator represents a key's relationship to a set of values.
// Valid operators ard In, NotIn, Exists and DoesNotExist. // Valid operators are In, NotIn, Exists and DoesNotExist.
Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"`
// values is an array of string values. If the operator is In or NotIn, // values is an array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist, // the values array must be non-empty. If the operator is Exists or DoesNotExist,

View File

@ -53,6 +53,8 @@ var map_APIResource = map[string]string{
"name": "name is the plural name of the resource.", "name": "name is the plural name of the resource.",
"singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
"namespaced": "namespaced indicates if a resource is namespaced or not.", "namespaced": "namespaced indicates if a resource is namespaced or not.",
"group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
"version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
"kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
"verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
"shortNames": "shortNames is a list of suggested short names of the resource.", "shortNames": "shortNames is a list of suggested short names of the resource.",
@ -157,7 +159,7 @@ func (LabelSelector) SwaggerDoc() map[string]string {
var map_LabelSelectorRequirement = map[string]string{ var map_LabelSelectorRequirement = map[string]string{
"": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
"key": "key is the label key that the selector applies to.", "key": "key is the label key that the selector applies to.",
"operator": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist.", "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.",
"values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
} }
@ -177,8 +179,9 @@ func (List) SwaggerDoc() map[string]string {
var map_ListMeta = map[string]string{ var map_ListMeta = map[string]string{
"": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
"selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.",
"resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency",
"continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response.",
} }
func (ListMeta) SwaggerDoc() map[string]string { func (ListMeta) SwaggerDoc() map[string]string {
@ -193,6 +196,8 @@ var map_ListOptions = map[string]string{
"watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
"resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
"timeoutSeconds": "Timeout for the list/watch call.", "timeoutSeconds": "Timeout for the list/watch call.",
"limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
"continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
} }
func (ListOptions) SwaggerDoc() map[string]string { func (ListOptions) SwaggerDoc() map[string]string {

View File

@ -468,6 +468,14 @@ func (u *Unstructured) SetSelfLink(selfLink string) {
u.setNestedField(selfLink, "metadata", "selfLink") u.setNestedField(selfLink, "metadata", "selfLink")
} }
func (u *Unstructured) GetContinue() string {
return getNestedString(u.Object, "metadata", "continue")
}
func (u *Unstructured) SetContinue(c string) {
u.setNestedField(c, "metadata", "continue")
}
func (u *Unstructured) GetCreationTimestamp() metav1.Time { func (u *Unstructured) GetCreationTimestamp() metav1.Time {
var timestamp metav1.Time var timestamp metav1.Time
timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp"))
@ -652,6 +660,14 @@ func (u *UnstructuredList) SetSelfLink(selfLink string) {
u.setNestedField(selfLink, "metadata", "selfLink") u.setNestedField(selfLink, "metadata", "selfLink")
} }
func (u *UnstructuredList) GetContinue() string {
return getNestedString(u.Object, "metadata", "continue")
}
func (u *UnstructuredList) SetContinue(c string) {
u.setNestedField(c, "metadata", "continue")
}
func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) {
u.SetAPIVersion(gvk.GroupVersion().String()) u.SetAPIVersion(gvk.GroupVersion().String())
u.SetKind(gvk.Kind) u.SetKind(gvk.Kind)
@ -702,9 +718,12 @@ func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error {
for _, i := range t.Items { for _, i := range t.Items {
items = append(items, i.Object) items = append(items, i.Object)
} }
t.Object["items"] = items listObj := make(map[string]interface{}, len(t.Object)+1)
defer func() { delete(t.Object, "items") }() for k, v := range t.Object { // Make a shallow copy
return json.NewEncoder(w).Encode(t.Object) listObj[k] = v
}
listObj["items"] = items
return json.NewEncoder(w).Encode(listObj)
case *runtime.Unknown: case *runtime.Unknown:
// TODO: Unstructured needs to deal with ContentType. // TODO: Unstructured needs to deal with ContentType.
_, err := w.Write(t.Raw) _, err := w.Write(t.Raw)

View File

@ -426,17 +426,29 @@ var (
falseBytes = []byte("false") falseBytes = []byte("false")
) )
func toUnstructured(sv, dv reflect.Value) error { func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) {
st, dt := sv.Type(), dv.Type() // Check value receivers if v is not a pointer and pointer receivers if v is a pointer
if v.Type().Implements(marshalerType) {
return v.Interface().(encodingjson.Marshaler), true
}
// Check pointer receivers if v is not a pointer
if v.Kind() != reflect.Ptr && v.CanAddr() {
v = v.Addr()
if v.Type().Implements(marshalerType) {
return v.Interface().(encodingjson.Marshaler), true
}
}
return nil, false
}
func toUnstructured(sv, dv reflect.Value) error {
// Check if the object has a custom JSON marshaller/unmarshaller. // Check if the object has a custom JSON marshaller/unmarshaller.
if st.Implements(marshalerType) { if marshaler, ok := getMarshaler(sv); ok {
if sv.Kind() == reflect.Ptr && sv.IsNil() { if sv.Kind() == reflect.Ptr && sv.IsNil() {
// We're done - we don't need to store anything. // We're done - we don't need to store anything.
return nil return nil
} }
marshaler := sv.Interface().(encodingjson.Marshaler)
data, err := marshaler.MarshalJSON() data, err := marshaler.MarshalJSON()
if err != nil { if err != nil {
return err return err
@ -496,6 +508,7 @@ func toUnstructured(sv, dv reflect.Value) error {
return nil return nil
} }
st, dt := sv.Type(), dv.Type()
switch st.Kind() { switch st.Kind() {
case reflect.String: case reflect.String:
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {

View File

@ -19,9 +19,11 @@ package json
import ( import (
"encoding/json" "encoding/json"
"io" "io"
"strconv"
"unsafe"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"github.com/ugorji/go/codec" jsoniter "github.com/json-iterator/go"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
@ -66,6 +68,36 @@ type Serializer struct {
var _ runtime.Serializer = &Serializer{} var _ runtime.Serializer = &Serializer{}
var _ recognizer.RecognizingDecoder = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{}
func init() {
// Force jsoniter to decode number to interface{} via ints, if possible.
decodeNumberAsInt64IfPossible := func(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
switch iter.WhatIsNext() {
case jsoniter.NumberValue:
var number json.Number
iter.ReadVal(&number)
u64, err := strconv.ParseUint(string(number), 10, 64)
if err == nil {
*(*interface{})(ptr) = u64
return
}
i64, err := strconv.ParseInt(string(number), 10, 64)
if err == nil {
*(*interface{})(ptr) = i64
return
}
f64, err := strconv.ParseFloat(string(number), 64)
if err == nil {
*(*interface{})(ptr) = f64
return
}
// Not much we can do here.
default:
*(*interface{})(ptr) = iter.Read()
}
}
jsoniter.RegisterTypeDecoderFunc("interface {}", decodeNumberAsInt64IfPossible)
}
// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then // Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then
// load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be // load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, the raw data will be
// extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using // extracted and no decoding will be performed. If into is not registered with the typer, then the object will be straight decoded using
@ -121,7 +153,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
types, _, err := s.typer.ObjectKinds(into) types, _, err := s.typer.ObjectKinds(into)
switch { switch {
case runtime.IsNotRegisteredError(err): case runtime.IsNotRegisteredError(err):
if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(into); err != nil { if err := jsoniter.ConfigFastest.Unmarshal(data, into); err != nil {
return nil, actual, err return nil, actual, err
} }
return into, actual, nil return into, actual, nil
@ -155,7 +187,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
return nil, actual, err return nil, actual, err
} }
if err := codec.NewDecoderBytes(data, new(codec.JsonHandle)).Decode(obj); err != nil { if err := jsoniter.ConfigFastest.Unmarshal(data, obj); err != nil {
return nil, actual, err return nil, actual, err
} }
return obj, actual, nil return obj, actual, nil
@ -164,7 +196,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
// Encode serializes the provided object to the given writer. // Encode serializes the provided object to the given writer.
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
if s.yaml { if s.yaml {
json, err := json.Marshal(obj) json, err := jsoniter.ConfigFastest.Marshal(obj)
if err != nil { if err != nil {
return err return err
} }
@ -177,7 +209,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
} }
if s.pretty { if s.pretty {
data, err := json.MarshalIndent(obj, "", " ") data, err := jsoniter.ConfigFastest.MarshalIndent(obj, "", " ")
if err != nil { if err != nil {
return err return err
} }

View File

@ -22,6 +22,8 @@ import (
"net" "net"
"regexp" "regexp"
"strings" "strings"
"k8s.io/apimachinery/pkg/util/validation/field"
) )
const qnameCharFmt string = "[A-Za-z0-9]" const qnameCharFmt string = "[A-Za-z0-9]"
@ -67,6 +69,21 @@ func IsQualifiedName(value string) []string {
return errs return errs
} }
// IsFullyQualifiedName checks if the name is fully qualified.
func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList {
var allErrors field.ErrorList
if len(name) == 0 {
return append(allErrors, field.Required(fldPath, ""))
}
if errs := IsDNS1123Subdomain(name); len(errs) > 0 {
return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ",")))
}
if len(strings.Split(name, ".")) < 3 {
return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots"))
}
return allErrors
}
const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueFmt string = "(" + qualifiedNameFmt + ")?"
const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character"
const LabelValueMaxLength int = 63 const LabelValueMaxLength int = 63
@ -188,6 +205,14 @@ func IsValidPortNum(port int) []string {
return []string{InclusiveRangeError(1, 65535)} return []string{InclusiveRangeError(1, 65535)}
} }
// IsInRange tests that the argument is in an inclusive range.
func IsInRange(value int, min int, max int) []string {
if value >= min && value <= max {
return nil
}
return []string{InclusiveRangeError(min, max)}
}
// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 // Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1
// TODO: once we have a type for UID/GID we should make these that type. // TODO: once we have a type for UID/GID we should make these that type.
const ( const (

Some files were not shown because too many files have changed in this diff Show More