Merge pull request #2430 from Random-Liu/update-cri-to-v1.11.0

Update cri to v1.11.0.
This commit is contained in:
Phil Estes 2018-06-28 16:25:26 -04:00 committed by GitHub
commit a044b047e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
267 changed files with 23066 additions and 15230 deletions

View File

@ -42,8 +42,9 @@ github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
gotest.tools v2.1.0 gotest.tools v2.1.0
github.com/google/go-cmp v0.1.0 github.com/google/go-cmp v0.1.0
github.com/containerd/cri 8bcb9a95394e8d7845da1d6a994d3ac2a86d22f0 # cri dependencies
github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7 github.com/containerd/cri v1.11.0
github.com/containerd/go-cni 5882530828ecf62032409b298a3e8b19e08b6534
github.com/blang/semver v3.1.0 github.com/blang/semver v3.1.0
github.com/containernetworking/cni v0.6.0 github.com/containernetworking/cni v0.6.0
github.com/containernetworking/plugins v0.7.0 github.com/containernetworking/plugins v0.7.0
@ -57,22 +58,26 @@ github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
github.com/json-iterator/go 1.0.4 github.com/json-iterator/go f2b4162afba35581b6d4a50d3b8f34e33c144682
github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d github.com/modern-go/reflect2 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
github.com/modern-go/concurrent 1.0.3
github.com/opencontainers/runtime-tools v0.6.0
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206 github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/spf13/pflag v1.0.0
github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067 golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631 golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
k8s.io/api 7e796de92438aede7cb5d6bcf6c10f4fa65db560 k8s.io/api 9e5ffd1f1320950b238cfce291b926411f0af722
k8s.io/apimachinery fcb9a12f7875d01f8390b28faedc37dcf2e713b9 k8s.io/apimachinery ed135c5b96450fd24e5e981c708114fbbd950697
k8s.io/apiserver 4a8377c547bbff4576a35b5b5bf4026d9b5aa763 k8s.io/apiserver a90e3a95c2e91b944bfca8225c4e0d12e42a9eb5
k8s.io/client-go b9a0cf870f239c4a4ecfd3feb075a50e7cbe1473 k8s.io/client-go 03bfb9bdcfe5482795b999f39ca3ed9ad42ce5bb
k8s.io/kubernetes v1.10.0 k8s.io/kubernetes v1.11.0
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e k8s.io/utils 733eca437aa39379e4bcc25e726439dfca40fcff
# zfs dependencies # zfs dependencies
github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec github.com/containerd/zfs 9a0b8b8b5982014b729cd34eb7cd7a11062aa6ec

View File

@ -96,6 +96,10 @@ type PluginConfig struct {
SystemdCgroup bool `toml:"systemd_cgroup" json:"systemdCgroup"` SystemdCgroup bool `toml:"systemd_cgroup" json:"systemdCgroup"`
// EnableTLSStreaming indicates to enable the TLS streaming support. // EnableTLSStreaming indicates to enable the TLS streaming support.
EnableTLSStreaming bool `toml:"enable_tls_streaming" json:"enableTLSStreaming"` EnableTLSStreaming bool `toml:"enable_tls_streaming" json:"enableTLSStreaming"`
// MaxContainerLogLineSize is the maximum log line size in bytes for a container.
// Log line longer than the limit will be split into multiple lines. Non-positive
// value means no limit.
MaxContainerLogLineSize int `toml:"max_container_log_line_size" json:"maxContainerLogSize"`
} }
// Config contains all configurations for cri server. // Config contains all configurations for cri server.
@ -129,13 +133,14 @@ func DefaultConfig() PluginConfig {
Root: "", Root: "",
}, },
}, },
StreamServerAddress: "", StreamServerAddress: "",
StreamServerPort: "10010", StreamServerPort: "10010",
EnableSelinux: false, EnableSelinux: false,
EnableTLSStreaming: false, EnableTLSStreaming: false,
SandboxImage: "k8s.gcr.io/pause:3.1", SandboxImage: "k8s.gcr.io/pause:3.1",
StatsCollectPeriod: 10, StatsCollectPeriod: 10,
SystemdCgroup: false, SystemdCgroup: false,
MaxContainerLogLineSize: 16 * 1024,
Registry: Registry{ Registry: Registry{
Mirrors: map[string]Mirror{ Mirrors: map[string]Mirror{
"docker.io": { "docker.io": {

View File

@ -53,7 +53,6 @@ func WithNewSnapshot(id string, i containerd.Image) containerd.NewContainerOpts
// WithVolumes copies ownership of volume in rootfs to its corresponding host path. // WithVolumes copies ownership of volume in rootfs to its corresponding host path.
// It doesn't update runtime spec. // It doesn't update runtime spec.
// The passed in map is a host path to container path map for all volumes. // The passed in map is a host path to container path map for all volumes.
// TODO(random-liu): Figure out whether we need to copy volume content.
func WithVolumes(volumeMounts map[string]string) containerd.NewContainerOpts { func WithVolumes(volumeMounts map[string]string) containerd.NewContainerOpts {
return func(ctx context.Context, client *containerd.Client, c *containers.Container) (err error) { return func(ctx context.Context, client *containerd.Client, c *containers.Container) (err error) {
if c.Snapshotter == "" { if c.Snapshotter == "" {
@ -108,14 +107,6 @@ func WithVolumes(volumeMounts map[string]string) containerd.NewContainerOpts {
// copyExistingContents copies from the source to the destination and // copyExistingContents copies from the source to the destination and
// ensures the ownership is appropriately set. // ensures the ownership is appropriately set.
func copyExistingContents(source, destination string) error { func copyExistingContents(source, destination string) error {
srcList, err := ioutil.ReadDir(source)
if err != nil {
return err
}
if len(srcList) == 0 {
// Skip copying if source directory is empty.
return nil
}
dstList, err := ioutil.ReadDir(destination) dstList, err := ioutil.ReadDir(destination)
if err != nil { if err != nil {
return err return err

View File

@ -16,7 +16,10 @@ limitations under the License.
package ioutil package ioutil
import "io" import (
"io"
"sync"
)
// writeCloseInformer wraps passed in write closer with a close channel. // writeCloseInformer wraps passed in write closer with a close channel.
// Caller could wait on the close channel for the write closer to be // Caller could wait on the close channel for the write closer to be
@ -66,3 +69,34 @@ func (n *nopWriteCloser) Write(p []byte) (int, error) {
func (n *nopWriteCloser) Close() error { func (n *nopWriteCloser) Close() error {
return nil return nil
} }
// serialWriteCloser wraps a write closer and makes sure all writes
// are done in serial.
// Parallel write won't intersect with each other. Use case:
// 1) Pipe: Write content longer than PIPE_BUF.
// See http://man7.org/linux/man-pages/man7/pipe.7.html
// 2) <3.14 Linux Kernel: write is not atomic
// See http://man7.org/linux/man-pages/man2/write.2.html
type serialWriteCloser struct {
mu sync.Mutex
wc io.WriteCloser
}
// NewSerialWriteCloser creates a SerialWriteCloser from a write closer.
func NewSerialWriteCloser(wc io.WriteCloser) io.WriteCloser {
return &serialWriteCloser{wc: wc}
}
// Write writes a group of byte arrays in order atomically.
func (s *serialWriteCloser) Write(data []byte) (int, error) {
s.mu.Lock()
defer s.mu.Unlock()
return s.wc.Write(data)
}
// Close closes the write closer.
func (s *serialWriteCloser) Close() error {
s.mu.Lock()
defer s.mu.Unlock()
return s.wc.Close()
}

View File

@ -372,6 +372,11 @@ func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxP
securityContext.GetCapabilities()) securityContext.GetCapabilities())
} }
} }
// Clear all ambient capabilities. The implication of non-root + caps
// is not clearly defined in Kubernetes.
// See https://github.com/kubernetes/kubernetes/issues/56374
// Keep docker's behavior for now.
g.Config.Process.Capabilities.Ambient = []string{}
g.SetProcessSelinuxLabel(processLabel) g.SetProcessSelinuxLabel(processLabel)
g.SetLinuxMountLabel(mountLabel) g.SetLinuxMountLabel(mountLabel)
@ -402,7 +407,7 @@ func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxP
g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer) g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer)
g.AddAnnotation(annotations.SandboxID, sandboxID) g.AddAnnotation(annotations.SandboxID, sandboxID)
return g.Spec(), nil return g.Config, nil
} }
// generateVolumeMounts sets up image volumes for container. Rely on the removal of container // generateVolumeMounts sets up image volumes for container. Rely on the removal of container
@ -528,7 +533,7 @@ func clearReadOnly(m *runtimespec.Mount) {
// addDevices set device mapping without privilege. // addDevices set device mapping without privilege.
func (c *criService) addOCIDevices(g *generate.Generator, devs []*runtime.Device) error { func (c *criService) addOCIDevices(g *generate.Generator, devs []*runtime.Device) error {
spec := g.Spec() spec := g.Config
for _, device := range devs { for _, device := range devs {
path, err := c.os.ResolveSymbolicLink(device.HostPath) path, err := c.os.ResolveSymbolicLink(device.HostPath)
if err != nil { if err != nil {
@ -560,7 +565,7 @@ func (c *criService) addOCIDevices(g *generate.Generator, devs []*runtime.Device
// addDevices set device mapping with privilege. // addDevices set device mapping with privilege.
func setOCIDevicesPrivileged(g *generate.Generator) error { func setOCIDevicesPrivileged(g *generate.Generator) error {
spec := g.Spec() spec := g.Config
hostDevices, err := devices.HostDevices() hostDevices, err := devices.HostDevices()
if err != nil { if err != nil {
return err return err
@ -592,7 +597,12 @@ func setOCIDevicesPrivileged(g *generate.Generator) error {
// addOCIBindMounts adds bind mounts. // addOCIBindMounts adds bind mounts.
func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, mountLabel string) error { func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.Mount, mountLabel string) error {
// Mount cgroup into the container as readonly, which inherits docker's behavior. // Mount cgroup into the container as readonly, which inherits docker's behavior.
g.AddCgroupsMount("ro") // nolint: errcheck g.AddMount(runtimespec.Mount{
Source: "cgroup",
Destination: "/sys/fs/cgroup",
Type: "cgroup",
Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
})
for _, mount := range mounts { for _, mount := range mounts {
dst := mount.GetContainerPath() dst := mount.GetContainerPath()
src := mount.GetHostPath() src := mount.GetHostPath()
@ -630,8 +640,8 @@ func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.M
return err return err
} }
options = append(options, "rslave") options = append(options, "rslave")
if g.Spec().Linux.RootfsPropagation != "rshared" && if g.Config.Linux.RootfsPropagation != "rshared" &&
g.Spec().Linux.RootfsPropagation != "rslave" { g.Config.Linux.RootfsPropagation != "rslave" {
g.SetLinuxRootPropagation("rslave") // nolint: errcheck g.SetLinuxRootPropagation("rslave") // nolint: errcheck
} }
default: default:
@ -652,14 +662,19 @@ func (c *criService) addOCIBindMounts(g *generate.Generator, mounts []*runtime.M
return errors.Wrapf(err, "relabel %q with %q failed", src, mountLabel) return errors.Wrapf(err, "relabel %q with %q failed", src, mountLabel)
} }
} }
g.AddBindMount(src, dst, options) g.AddMount(runtimespec.Mount{
Source: src,
Destination: dst,
Type: "bind",
Options: options,
})
} }
return nil return nil
} }
func setOCIBindMountsPrivileged(g *generate.Generator) { func setOCIBindMountsPrivileged(g *generate.Generator) {
spec := g.Spec() spec := g.Config
// clear readonly for /sys and cgroup // clear readonly for /sys and cgroup
for i, m := range spec.Mounts { for i, m := range spec.Mounts {
if spec.Mounts[i].Destination == "/sys" { if spec.Mounts[i].Destination == "/sys" {
@ -699,6 +714,40 @@ func getOCICapabilitiesList() []string {
return caps return caps
} }
// Adds capabilities to all sets relevant to root (bounding, permitted, effective, inheritable)
func addProcessRootCapability(g *generate.Generator, c string) error {
if err := g.AddProcessCapabilityBounding(c); err != nil {
return err
}
if err := g.AddProcessCapabilityPermitted(c); err != nil {
return err
}
if err := g.AddProcessCapabilityEffective(c); err != nil {
return err
}
if err := g.AddProcessCapabilityInheritable(c); err != nil {
return err
}
return nil
}
// Drops capabilities to all sets relevant to root (bounding, permitted, effective, inheritable)
func dropProcessRootCapability(g *generate.Generator, c string) error {
if err := g.DropProcessCapabilityBounding(c); err != nil {
return err
}
if err := g.DropProcessCapabilityPermitted(c); err != nil {
return err
}
if err := g.DropProcessCapabilityEffective(c); err != nil {
return err
}
if err := g.DropProcessCapabilityInheritable(c); err != nil {
return err
}
return nil
}
// setOCICapabilities adds/drops process capabilities. // setOCICapabilities adds/drops process capabilities.
func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability) error { func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability) error {
if capabilities == nil { if capabilities == nil {
@ -711,14 +760,14 @@ func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability)
// will be all capabilities without `CAP_CHOWN`. // will be all capabilities without `CAP_CHOWN`.
if util.InStringSlice(capabilities.GetAddCapabilities(), "ALL") { if util.InStringSlice(capabilities.GetAddCapabilities(), "ALL") {
for _, c := range getOCICapabilitiesList() { for _, c := range getOCICapabilitiesList() {
if err := g.AddProcessCapability(c); err != nil { if err := addProcessRootCapability(g, c); err != nil {
return err return err
} }
} }
} }
if util.InStringSlice(capabilities.GetDropCapabilities(), "ALL") { if util.InStringSlice(capabilities.GetDropCapabilities(), "ALL") {
for _, c := range getOCICapabilitiesList() { for _, c := range getOCICapabilitiesList() {
if err := g.DropProcessCapability(c); err != nil { if err := dropProcessRootCapability(g, c); err != nil {
return err return err
} }
} }
@ -729,7 +778,7 @@ func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability)
continue continue
} }
// Capabilities in CRI doesn't have `CAP_` prefix, so add it. // Capabilities in CRI doesn't have `CAP_` prefix, so add it.
if err := g.AddProcessCapability("CAP_" + strings.ToUpper(c)); err != nil { if err := addProcessRootCapability(g, "CAP_"+strings.ToUpper(c)); err != nil {
return err return err
} }
} }
@ -738,7 +787,7 @@ func setOCICapabilities(g *generate.Generator, capabilities *runtime.Capability)
if strings.ToUpper(c) == "ALL" { if strings.ToUpper(c) == "ALL" {
continue continue
} }
if err := g.DropProcessCapability("CAP_" + strings.ToUpper(c)); err != nil { if err := dropProcessRootCapability(g, "CAP_"+strings.ToUpper(c)); err != nil {
return err return err
} }
} }
@ -772,6 +821,10 @@ func defaultRuntimeSpec(id string) (*runtimespec.Spec, error) {
if mount.Destination == "/run" { if mount.Destination == "/run" {
continue continue
} }
// CRI plugin handles `/dev/shm` itself.
if mount.Destination == "/dev/shm" {
continue
}
mounts = append(mounts, mount) mounts = append(mounts, mount)
} }
spec.Mounts = mounts spec.Mounts = mounts

View File

@ -102,7 +102,7 @@ func (c *criService) execInContainer(ctx context.Context, id string, opts execOp
if opts.tty { if opts.tty {
g := newSpecGenerator(spec) g := newSpecGenerator(spec)
g.AddProcessEnv("TERM", "xterm") g.AddProcessEnv("TERM", "xterm")
spec = g.Spec() spec = g.Config
} }
pspec := spec.Process pspec := spec.Process
pspec.Args = opts.cmd pspec.Args = opts.cmd

View File

@ -36,7 +36,7 @@ func (c *criService) ReopenContainerLog(ctx context.Context, r *runtime.ReopenCo
} }
// Create new container logger and replace the existing ones. // Create new container logger and replace the existing ones.
stdoutWC, stderrWC, err := createContainerLoggers(container.LogPath, container.Config.GetTty()) stdoutWC, stderrWC, err := c.createContainerLoggers(container.LogPath, container.Config.GetTty())
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -18,6 +18,7 @@ package server
import ( import (
"io" "io"
"os"
"time" "time"
"github.com/containerd/containerd" "github.com/containerd/containerd"
@ -29,6 +30,7 @@ import (
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
ctrdutil "github.com/containerd/cri/pkg/containerd/util" ctrdutil "github.com/containerd/cri/pkg/containerd/util"
cioutil "github.com/containerd/cri/pkg/ioutil"
cio "github.com/containerd/cri/pkg/server/io" cio "github.com/containerd/cri/pkg/server/io"
containerstore "github.com/containerd/cri/pkg/store/container" containerstore "github.com/containerd/cri/pkg/store/container"
sandboxstore "github.com/containerd/cri/pkg/store/sandbox" sandboxstore "github.com/containerd/cri/pkg/store/sandbox"
@ -97,20 +99,10 @@ func (c *criService) startContainer(ctx context.Context,
} }
ioCreation := func(id string) (_ containerdio.IO, err error) { ioCreation := func(id string) (_ containerdio.IO, err error) {
stdoutWC, stderrWC, err := createContainerLoggers(meta.LogPath, config.GetTty()) stdoutWC, stderrWC, err := c.createContainerLoggers(meta.LogPath, config.GetTty())
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to create container loggers") return nil, errors.Wrap(err, "failed to create container loggers")
} }
defer func() {
if err != nil {
if stdoutWC != nil {
stdoutWC.Close()
}
if stderrWC != nil {
stderrWC.Close()
}
}
}()
cntr.IO.AddOutput("log", stdoutWC, stderrWC) cntr.IO.AddOutput("log", stdoutWC, stderrWC)
cntr.IO.Pipe() cntr.IO.Pipe()
return cntr.IO, nil return cntr.IO, nil
@ -142,24 +134,36 @@ func (c *criService) startContainer(ctx context.Context,
return nil return nil
} }
// Create container loggers and return write closer for stdout and stderr. // createContainerLoggers creates container loggers and return write closer for stdout and stderr.
func createContainerLoggers(logPath string, tty bool) (stdout io.WriteCloser, stderr io.WriteCloser, err error) { func (c *criService) createContainerLoggers(logPath string, tty bool) (stdout io.WriteCloser, stderr io.WriteCloser, err error) {
if logPath != "" { if logPath != "" {
// Only generate container log when log path is specified. // Only generate container log when log path is specified.
if stdout, err = cio.NewCRILogger(logPath, cio.Stdout); err != nil { f, err := os.OpenFile(logPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640)
return nil, nil, errors.Wrap(err, "failed to start container stdout logger") if err != nil {
return nil, nil, errors.Wrap(err, "failed to create and open log file")
} }
defer func() { defer func() {
if err != nil { if err != nil {
stdout.Close() f.Close()
} }
}() }()
var stdoutCh, stderrCh <-chan struct{}
wc := cioutil.NewSerialWriteCloser(f)
stdout, stdoutCh = cio.NewCRILogger(logPath, wc, cio.Stdout, c.config.MaxContainerLogLineSize)
// Only redirect stderr when there is no tty. // Only redirect stderr when there is no tty.
if !tty { if !tty {
if stderr, err = cio.NewCRILogger(logPath, cio.Stderr); err != nil { stderr, stderrCh = cio.NewCRILogger(logPath, wc, cio.Stderr, c.config.MaxContainerLogLineSize)
return nil, nil, errors.Wrap(err, "failed to start container stderr logger")
}
} }
go func() {
if stdoutCh != nil {
<-stdoutCh
}
if stderrCh != nil {
<-stderrCh
}
logrus.Debugf("Finish redirecting log file %q, closing it", logPath)
f.Close()
}()
} else { } else {
stdout = cio.NewDiscardLogger() stdout = cio.NewDiscardLogger()
stderr = cio.NewDiscardLogger() stderr = cio.NewDiscardLogger()

View File

@ -157,5 +157,5 @@ func updateOCILinuxResource(spec *runtimespec.Spec, new *runtime.LinuxContainerR
g.SetLinuxResourcesCPUMems(new.GetCpusetMems()) g.SetLinuxResourcesCPUMems(new.GetCpusetMems())
} }
return g.Spec(), nil return g.Config, nil
} }

View File

@ -186,6 +186,7 @@ func (em *eventMonitor) handleEvent(any interface{}) error {
// TODO(random-liu): [P2] Handle containerd-shim exit. // TODO(random-liu): [P2] Handle containerd-shim exit.
case *eventtypes.TaskExit: case *eventtypes.TaskExit:
e := any.(*eventtypes.TaskExit) e := any.(*eventtypes.TaskExit)
logrus.Infof("TaskExit event %+v", e)
cntr, err := em.containerStore.Get(e.ContainerID) cntr, err := em.containerStore.Get(e.ContainerID)
if err == nil { if err == nil {
if err := handleContainerExit(ctx, e, cntr); err != nil { if err := handleContainerExit(ctx, e, cntr); err != nil {

View File

@ -21,10 +21,8 @@ import (
"bytes" "bytes"
"io" "io"
"io/ioutil" "io/ioutil"
"os"
"time" "time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
@ -38,11 +36,8 @@ const (
eol = '\n' eol = '\n'
// timestampFormat is the timestamp format used in CRI logging format. // timestampFormat is the timestamp format used in CRI logging format.
timestampFormat = time.RFC3339Nano timestampFormat = time.RFC3339Nano
// pipeBufSize is the system PIPE_BUF size, on linux it is 4096 bytes. // defaultBufSize is the default size of the read buffer in bytes.
// POSIX.1 says that write less than PIPE_BUF is atmoic. defaultBufSize = 4096
pipeBufSize = 4096
// bufSize is the size of the read buffer.
bufSize = pipeBufSize - len(timestampFormat) - len(Stdout) - len(runtime.LogTagPartial) - 3 /*3 delimiter*/ - 1 /*eol*/
) )
// NewDiscardLogger creates logger which discards all the input. // NewDiscardLogger creates logger which discards all the input.
@ -51,46 +46,91 @@ func NewDiscardLogger() io.WriteCloser {
} }
// NewCRILogger returns a write closer which redirect container log into // NewCRILogger returns a write closer which redirect container log into
// log file, and decorate the log line into CRI defined format. // log file, and decorate the log line into CRI defined format. It also
func NewCRILogger(path string, stream StreamType) (io.WriteCloser, error) { // returns a channel which indicates whether the logger is stopped.
logrus.Debugf("Start writing log file %q", path) // maxLen is the max length limit of a line. A line longer than the
// limit will be cut into multiple lines.
func NewCRILogger(path string, w io.Writer, stream StreamType, maxLen int) (io.WriteCloser, <-chan struct{}) {
logrus.Debugf("Start writing stream %q to log file %q", stream, path)
prc, pwc := io.Pipe() prc, pwc := io.Pipe()
f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0640) stop := make(chan struct{})
if err != nil { go func() {
return nil, errors.Wrap(err, "failed to open log file") redirectLogs(path, prc, w, stream, maxLen)
} close(stop)
go redirectLogs(path, prc, f, stream) }()
return pwc, nil return pwc, stop
} }
func redirectLogs(path string, rc io.ReadCloser, wc io.WriteCloser, stream StreamType) { func redirectLogs(path string, rc io.ReadCloser, w io.Writer, s StreamType, maxLen int) {
defer rc.Close() defer rc.Close()
defer wc.Close() var (
streamBytes := []byte(stream) stream = []byte(s)
delimiterBytes := []byte{delimiter} delimiter = []byte{delimiter}
partialBytes := []byte(runtime.LogTagPartial) partial = []byte(runtime.LogTagPartial)
fullBytes := []byte(runtime.LogTagFull) full = []byte(runtime.LogTagFull)
r := bufio.NewReaderSize(rc, bufSize) buf [][]byte
for { length int
lineBytes, isPrefix, err := r.ReadLine() bufSize = defaultBufSize
if err == io.EOF { )
logrus.Debugf("Finish redirecting log file %q", path) // Make sure bufSize <= maxLen
return if maxLen > 0 && maxLen < bufSize {
} bufSize = maxLen
if err != nil {
logrus.WithError(err).Errorf("An error occurred when redirecting log file %q", path)
return
}
tagBytes := fullBytes
if isPrefix {
tagBytes = partialBytes
}
timestampBytes := time.Now().AppendFormat(nil, time.RFC3339Nano)
data := bytes.Join([][]byte{timestampBytes, streamBytes, tagBytes, lineBytes}, delimiterBytes)
data = append(data, eol)
if _, err := wc.Write(data); err != nil {
logrus.WithError(err).Errorf("Fail to write %q log to log file %q", stream, path)
}
// Continue on write error to drain the input.
} }
r := bufio.NewReaderSize(rc, bufSize)
writeLine := func(tag, line []byte) {
timestamp := time.Now().AppendFormat(nil, timestampFormat)
data := bytes.Join([][]byte{timestamp, stream, tag, line}, delimiter)
data = append(data, eol)
if _, err := w.Write(data); err != nil {
logrus.WithError(err).Errorf("Fail to write %q log to log file %q", s, path)
// Continue on write error to drain the container output.
}
}
for {
var stop bool
newLine, isPrefix, err := r.ReadLine()
if err != nil {
if err == io.EOF {
logrus.Debugf("Getting EOF from stream %q while redirecting to log file %q", s, path)
} else {
logrus.WithError(err).Errorf("An error occurred when redirecting stream %q to log file %q", s, path)
}
if length == 0 {
// No content left to write, break.
break
}
// Stop after writing the content left in buffer.
stop = true
} else {
// Buffer returned by ReadLine will change after
// next read, copy it.
l := make([]byte, len(newLine))
copy(l, newLine)
buf = append(buf, l)
length += len(l)
}
if maxLen > 0 && length > maxLen {
exceedLen := length - maxLen
last := buf[len(buf)-1]
if exceedLen > len(last) {
// exceedLen must <= len(last), or else the buffer
// should have be written in the previous iteration.
panic("exceed length should <= last buffer size")
}
buf[len(buf)-1] = last[:len(last)-exceedLen]
writeLine(partial, bytes.Join(buf, nil))
buf = [][]byte{last[len(last)-exceedLen:]}
length = exceedLen
}
if isPrefix {
continue
}
writeLine(full, bytes.Join(buf, nil))
buf = nil
length = 0
if stop {
break
}
}
logrus.Debugf("Finish redirecting stream %q to log file %q", s, path)
} }

View File

@ -78,9 +78,7 @@ func (c *criService) recover(ctx context.Context) error {
return errors.Wrap(err, "failed to list containers") return errors.Wrap(err, "failed to list containers")
} }
for _, container := range containers { for _, container := range containers {
containerDir := c.getContainerRootDir(container.ID()) cntr, err := c.loadContainer(ctx, container)
volatileContainerDir := c.getVolatileContainerRootDir(container.ID())
cntr, err := loadContainer(ctx, container, containerDir, volatileContainerDir)
if err != nil { if err != nil {
logrus.WithError(err).Errorf("Failed to load container %q", container.ID()) logrus.WithError(err).Errorf("Failed to load container %q", container.ID())
continue continue
@ -149,8 +147,10 @@ func (c *criService) recover(ctx context.Context) error {
} }
// loadContainer loads container from containerd and status checkpoint. // loadContainer loads container from containerd and status checkpoint.
func loadContainer(ctx context.Context, cntr containerd.Container, containerDir, volatileContainerDir string) (containerstore.Container, error) { func (c *criService) loadContainer(ctx context.Context, cntr containerd.Container) (containerstore.Container, error) {
id := cntr.ID() id := cntr.ID()
containerDir := c.getContainerRootDir(id)
volatileContainerDir := c.getVolatileContainerRootDir(id)
var container containerstore.Container var container containerstore.Container
// Load container metadata. // Load container metadata.
exts, err := cntr.Extensions(ctx) exts, err := cntr.Extensions(ctx)
@ -176,11 +176,21 @@ func loadContainer(ctx context.Context, cntr containerd.Container, containerDir,
// Load up-to-date status from containerd. // Load up-to-date status from containerd.
var containerIO *cio.ContainerIO var containerIO *cio.ContainerIO
t, err := cntr.Task(ctx, func(fifos *containerdio.FIFOSet) (containerdio.IO, error) { t, err := cntr.Task(ctx, func(fifos *containerdio.FIFOSet) (_ containerdio.IO, err error) {
stdoutWC, stderrWC, err := createContainerLoggers(meta.LogPath, meta.Config.GetTty()) stdoutWC, stderrWC, err := c.createContainerLoggers(meta.LogPath, meta.Config.GetTty())
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() {
if err != nil {
if stdoutWC != nil {
stdoutWC.Close()
}
if stderrWC != nil {
stderrWC.Close()
}
}
}()
containerIO, err = cio.NewContainerIO(id, containerIO, err = cio.NewContainerIO(id,
cio.WithFIFOs(fifos), cio.WithFIFOs(fifos),
) )

View File

@ -344,9 +344,9 @@ func (c *criService) generateSandboxContainerSpec(id string, config *runtime.Pod
g.SetProcessCwd(imageConfig.WorkingDir) g.SetProcessCwd(imageConfig.WorkingDir)
} }
if len(imageConfig.Entrypoint) == 0 { if len(imageConfig.Entrypoint) == 0 && len(imageConfig.Cmd) == 0 {
// Pause image must have entrypoint. // Pause image must have entrypoint or cmd.
return nil, errors.Errorf("invalid empty entrypoint in image config %+v", imageConfig) return nil, errors.Errorf("invalid empty entrypoint and cmd in image config %+v", imageConfig)
} }
// Set process commands. // Set process commands.
g.SetProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...)) g.SetProcessArgs(append(imageConfig.Entrypoint, imageConfig.Cmd...))
@ -388,6 +388,19 @@ func (c *criService) generateSandboxContainerSpec(id string, config *runtime.Pod
g.RemoveLinuxNamespace(string(runtimespec.IPCNamespace)) // nolint: errcheck g.RemoveLinuxNamespace(string(runtimespec.IPCNamespace)) // nolint: errcheck
} }
// It's fine to generate the spec before the sandbox /dev/shm
// is actually created.
sandboxDevShm := c.getSandboxDevShm(id)
if nsOptions.GetIpc() == runtime.NamespaceMode_NODE {
sandboxDevShm = devShm
}
g.AddMount(runtimespec.Mount{
Source: sandboxDevShm,
Destination: devShm,
Type: "bind",
Options: []string{"rbind", "ro"},
})
selinuxOpt := securityContext.GetSelinuxOptions() selinuxOpt := securityContext.GetSelinuxOptions()
processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt) processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt)
if err != nil { if err != nil {
@ -415,7 +428,7 @@ func (c *criService) generateSandboxContainerSpec(id string, config *runtime.Pod
g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox) g.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeSandbox)
g.AddAnnotation(annotations.SandboxID, id) g.AddAnnotation(annotations.SandboxID, id)
return g.Spec(), nil return g.Config, nil
} }
// setupSandboxFiles sets up necessary sandbox files including /dev/shm, /etc/hosts // setupSandboxFiles sets up necessary sandbox files including /dev/shm, /etc/hosts
@ -524,7 +537,7 @@ func (c *criService) setupPod(id string, path string, config *runtime.PodSandbox
} }
// Check if the default interface has IP config // Check if the default interface has IP config
if configs, ok := result.Interfaces[defaultIfName]; ok && len(configs.IPConfigs) > 0 { if configs, ok := result.Interfaces[defaultIfName]; ok && len(configs.IPConfigs) > 0 {
return configs.IPConfigs[0].IP.String(), nil return selectPodIP(configs.IPConfigs), nil
} }
// If it comes here then the result was invalid so destroy the pod network and return error // If it comes here then the result was invalid so destroy the pod network and return error
if err := c.teardownPod(id, path, config); err != nil { if err := c.teardownPod(id, path, config); err != nil {
@ -550,6 +563,16 @@ func toCNIPortMappings(criPortMappings []*runtime.PortMapping) []cni.PortMapping
return portMappings return portMappings
} }
// selectPodIP select an ip from the ip list. It prefers ipv4 more than ipv6.
func selectPodIP(ipConfigs []*cni.IPConfig) string {
for _, c := range ipConfigs {
if c.IP.To4() != nil {
return c.IP.String()
}
}
return ipConfigs[0].IP.String()
}
// untrustedWorkload returns true if the sandbox contains untrusted workload. // untrustedWorkload returns true if the sandbox contains untrusted workload.
func untrustedWorkload(config *runtime.PodSandboxConfig) bool { func untrustedWorkload(config *runtime.PodSandboxConfig) bool {
return config.GetAnnotations()[annotations.UntrustedWorkload] == "true" return config.GetAnnotations()[annotations.UntrustedWorkload] == "true"

View File

@ -128,6 +128,10 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
selinux.SetDisabled() selinux.SetDisabled()
} }
if client.SnapshotService(c.config.ContainerdConfig.Snapshotter) == nil {
return nil, errors.Errorf("failed to find snapshotter %q", c.config.ContainerdConfig.Snapshotter)
}
c.imageFSPath = imageFSPath(config.ContainerdRootDir, config.ContainerdConfig.Snapshotter) c.imageFSPath = imageFSPath(config.ContainerdRootDir, config.ContainerdConfig.Snapshotter)
logrus.Infof("Get image filesystem path %q", c.imageFSPath) logrus.Infof("Get image filesystem path %q", c.imageFSPath)
@ -144,7 +148,7 @@ func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIServi
// Try to load the config if it exists. Just log the error if load fails // Try to load the config if it exists. Just log the error if load fails
// This is not disruptive for containerd to panic // This is not disruptive for containerd to panic
if err := c.netPlugin.Load(cni.WithLoNetwork(), cni.WithDefaultConf()); err != nil { if err := c.netPlugin.Load(cni.WithLoNetwork, cni.WithDefaultConf); err != nil {
logrus.WithError(err).Error("Failed to load cni during init, please check CRI plugin status before setting up network for pods") logrus.WithError(err).Error("Failed to load cni during init, please check CRI plugin status before setting up network for pods")
} }
// prepare streaming server // prepare streaming server

View File

@ -22,6 +22,7 @@ import (
goruntime "runtime" goruntime "runtime"
cni "github.com/containerd/go-cni" cni "github.com/containerd/go-cni"
"github.com/sirupsen/logrus"
"golang.org/x/net/context" "golang.org/x/net/context"
runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2" runtime "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
) )
@ -41,14 +42,16 @@ func (c *criService) Status(ctx context.Context, r *runtime.StatusRequest) (*run
Type: runtime.NetworkReady, Type: runtime.NetworkReady,
Status: true, Status: true,
} }
// Load the latest cni configuration to be in sync with the latest network configuration
if err := c.netPlugin.Load(cni.WithLoNetwork, cni.WithDefaultConf); err != nil {
logrus.WithError(err).Errorf("Failed to load cni configuration")
}
// Check the status of the cni initialization // Check the status of the cni initialization
if err := c.netPlugin.Status(); err != nil { if err := c.netPlugin.Status(); err != nil {
// If it is not initialized, then load the config and retry networkCondition.Status = false
if err = c.netPlugin.Load(cni.WithLoNetwork(), cni.WithDefaultConf()); err != nil { networkCondition.Reason = networkNotReadyReason
networkCondition.Status = false networkCondition.Message = fmt.Sprintf("Network plugin returns error: %v", err)
networkCondition.Reason = networkNotReadyReason
networkCondition.Message = fmt.Sprintf("Network plugin returns error: %v", err)
}
} }
resp := &runtime.StatusResponse{ resp := &runtime.StatusResponse{

View File

@ -52,7 +52,7 @@ func (c *criService) UpdateRuntimeConfig(ctx context.Context, r *runtime.UpdateR
if err := c.netPlugin.Status(); err == nil { if err := c.netPlugin.Status(); err == nil {
logrus.Infof("Network plugin is ready, skip generating cni config from template %q", confTemplate) logrus.Infof("Network plugin is ready, skip generating cni config from template %q", confTemplate)
return &runtime.UpdateRuntimeConfigResponse{}, nil return &runtime.UpdateRuntimeConfigResponse{}, nil
} else if err := c.netPlugin.Load(cni.WithLoNetwork(), cni.WithDefaultConf()); err == nil { } else if err := c.netPlugin.Load(cni.WithLoNetwork, cni.WithDefaultConf); err == nil {
logrus.Infof("CNI config is successfully loaded, skip generating cni config from template %q", confTemplate) logrus.Infof("CNI config is successfully loaded, skip generating cni config from template %q", confTemplate)
return &runtime.UpdateRuntimeConfigResponse{}, nil return &runtime.UpdateRuntimeConfigResponse{}, nil
} }

View File

@ -3,11 +3,11 @@ github.com/blang/semver v3.1.0
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895 github.com/BurntSushi/toml a368813c5e648fee92e5f6c30e3944ff9d5e8895
github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130 github.com/containerd/cgroups fe281dd265766145e943a034aa41086474ea6130
github.com/containerd/console cb7008ab3d8359b78c5f464cb7cf160107ad5925 github.com/containerd/console 9290d21dc56074581f619579c43d970b4514bc08
github.com/containerd/containerd 84bebdd91d347c99069d1705b7d4e6d6f746160c github.com/containerd/containerd 84bebdd91d347c99069d1705b7d4e6d6f746160c
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/containerd/go-cni f2d7272f12d045b16ed924f50e91f9f9cecc55a7 github.com/containerd/go-cni 5882530828ecf62032409b298a3e8b19e08b6534
github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5 github.com/containerd/go-runc f271fa2021de855d4d918dbef83c5fe19db1bdd5
github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788 github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
github.com/containernetworking/cni v0.6.0 github.com/containernetworking/cni v0.6.0
@ -31,15 +31,17 @@ github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55
github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f github.com/hashicorp/go-multierror ed905158d87462226a13fe39ddf685ea65f1c11f
github.com/json-iterator/go 1.0.4 github.com/json-iterator/go f2b4162afba35581b6d4a50d3b8f34e33c144682
github.com/matttproud/golang_protobuf_extensions v1.0.0 github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/Microsoft/go-winio v0.4.7 github.com/Microsoft/go-winio v0.4.7
github.com/Microsoft/hcsshim v0.6.11 github.com/Microsoft/hcsshim v0.6.11
github.com/modern-go/reflect2 05fbef0ca5da472bbf96c9322b84a53edc03c9fd
github.com/modern-go/concurrent 1.0.3
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
github.com/opencontainers/image-spec v1.0.1 github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340 github.com/opencontainers/runc 69663f0bd4b60df09991c08812a60108003fa340
github.com/opencontainers/runtime-spec v1.0.1 github.com/opencontainers/runtime-spec v1.0.1
github.com/opencontainers/runtime-tools 6073aff4ac61897f75895123f7e24135204a404d github.com/opencontainers/runtime-tools v0.6.0
github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206 github.com/opencontainers/selinux 4a2974bf1ee960774ffd517717f1f45325af0206
github.com/pkg/errors v0.8.0 github.com/pkg/errors v0.8.0
github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib v1.0.0
@ -49,12 +51,14 @@ github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
github.com/sirupsen/logrus v1.0.0 github.com/sirupsen/logrus v1.0.0
github.com/spf13/pflag v1.0.0
github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577 github.com/stevvooe/ttrpc d4528379866b0ce7e9d71f3eb96f0582fc374577
github.com/stretchr/testify v1.1.4 github.com/stretchr/testify v1.1.4
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16 github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc github.com/tchap/go-patricia 5ad6cdb7538b0097d5598c7e57f0a24072adf7dc
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
github.com/xeipuuv/gojsonschema 1d523034197ff1f222f6429836dd36a2457a1874
golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067 golang.org/x/crypto 49796115aa4b964c318aad4f3084fdb41e9aa067
golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac golang.org/x/net b3756b4b77d7b13260a0a2ec658753cf48922eac
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
@ -65,9 +69,9 @@ google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
google.golang.org/grpc v1.12.0 google.golang.org/grpc v1.12.0
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77 gopkg.in/yaml.v2 53feefa2559fb8dfa8d81baad31be332c97d6c77
k8s.io/api 7e796de92438aede7cb5d6bcf6c10f4fa65db560 k8s.io/api 9e5ffd1f1320950b238cfce291b926411f0af722
k8s.io/apimachinery fcb9a12f7875d01f8390b28faedc37dcf2e713b9 k8s.io/apimachinery ed135c5b96450fd24e5e981c708114fbbd950697
k8s.io/apiserver 4a8377c547bbff4576a35b5b5bf4026d9b5aa763 k8s.io/apiserver a90e3a95c2e91b944bfca8225c4e0d12e42a9eb5
k8s.io/client-go b9a0cf870f239c4a4ecfd3feb075a50e7cbe1473 k8s.io/client-go 03bfb9bdcfe5482795b999f39ca3ed9ad42ce5bb
k8s.io/kubernetes v1.10.0 k8s.io/kubernetes v1.11.0
k8s.io/utils 258e2a2fa64568210fbd6267cf1d8fd87c3cb86e k8s.io/utils 733eca437aa39379e4bcc25e726439dfca40fcff

View File

@ -1,7 +1,10 @@
[![Build Status](https://travis-ci.org/containerd/go-cni.svg?branch=master)](https://travis-ci.org/containerd/go-cni)
# go-cni # go-cni
A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to: A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to:
- Load CNI network config from different sources
- Setup networks for container namespace - Setup networks for container namespace
- Remove networks from container namespace - Remove networks from container namespace
- Query status of CNI network plugin initialization - Query status of CNI network plugin initialization
@ -16,11 +19,17 @@ func main() {
defaultIfName := "eth0" defaultIfName := "eth0"
// Initialize library // Initialize library
l = gocni.New(gocni.WithMinNetworkCount(2), l = gocni.New(gocni.WithMinNetworkCount(2),
gocni.WithLoNetwork(),
gocni.WithPluginConfDir("/etc/mycni/net.d"), gocni.WithPluginConfDir("/etc/mycni/net.d"),
gocni.WithPluginDir([]string{"/opt/mycni/bin", "/opt/cni/bin"}), gocni.WithPluginDir([]string{"/opt/mycni/bin", "/opt/cni/bin"}),
gocni.WithDefaultIfName(defaultIfName)) gocni.WithDefaultIfName(defaultIfName))
// Load the cni configuration
err:= l.Load(gocni.WithLoNetwork,gocni.WithDefaultConf)
if err != nil{
log.Errorf("failed to load cni configuration: %v", err)
return
}
// Setup network for namespace. // Setup network for namespace.
labels := map[string]string{ labels := map[string]string{
"K8S_POD_NAMESPACE": "namespace1", "K8S_POD_NAMESPACE": "namespace1",
@ -29,16 +38,10 @@ func main() {
} }
result, err := l.Setup(id, netns, gocni.WithLabels(labels)) result, err := l.Setup(id, netns, gocni.WithLabels(labels))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to setup network for namespace %q: %v", id, err) log.Errorf("failed to setup network for namespace %q: %v",id, err)
return
} }
defer func() {
if retErr != nil {
// Teardown network if an error is returned.
if err := l.Remove(id, netns, gocni.WithLabels(labels)); err != nil {
fmt.Errorf("Failed to destroy network for namespace %q", id)
}
}
}()
// Get IP of the default interface // Get IP of the default interface
IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String() IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String()
fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP) fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP)

View File

@ -31,7 +31,7 @@ type CNI interface {
// Remove tears down the network of the namespace. // Remove tears down the network of the namespace.
Remove(id string, path string, opts ...NamespaceOpts) error Remove(id string, path string, opts ...NamespaceOpts) error
// Load loads the cni network config // Load loads the cni network config
Load(opts ...LoadOption) error Load(opts ...CNIOpt) error
// Status checks the status of the cni initialization // Status checks the status of the cni initialization
Status() error Status() error
} }
@ -59,7 +59,7 @@ func defaultCNIConfig() *libcni {
} }
} }
func New(config ...ConfigOption) (CNI, error) { func New(config ...CNIOpt) (CNI, error) {
cni := defaultCNIConfig() cni := defaultCNIConfig()
var err error var err error
for _, c := range config { for _, c := range config {
@ -70,8 +70,10 @@ func New(config ...ConfigOption) (CNI, error) {
return cni, nil return cni, nil
} }
func (c *libcni) Load(opts ...LoadOption) error { func (c *libcni) Load(opts ...CNIOpt) error {
var err error var err error
c.Lock()
defer c.Unlock()
// Reset the networks on a load operation to ensure // Reset the networks on a load operation to ensure
// config happens on a clean slate // config happens on a clean slate
c.reset() c.reset()
@ -81,30 +83,27 @@ func (c *libcni) Load(opts ...LoadOption) error {
return errors.Wrapf(ErrLoad, fmt.Sprintf("cni config load failed: %v", err)) return errors.Wrapf(ErrLoad, fmt.Sprintf("cni config load failed: %v", err))
} }
} }
return c.Status() return nil
} }
func (c *libcni) Status() error { func (c *libcni) Status() error {
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
if len(c.networks) < c.networkCount { return c.status()
return ErrCNINotInitialized
}
return nil
} }
// Setup setups the network in the namespace // Setup setups the network in the namespace
func (c *libcni) Setup(id string, path string, opts ...NamespaceOpts) (*CNIResult, error) { func (c *libcni) Setup(id string, path string, opts ...NamespaceOpts) (*CNIResult, error) {
if err:=c.Status();err!=nil{ c.RLock()
return nil,err defer c.RUnlock()
if err := c.status(); err != nil {
return nil, err
} }
ns, err := newNamespace(id, path, opts...) ns, err := newNamespace(id, path, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var results []*current.Result var results []*current.Result
c.RLock()
defer c.RUnlock()
for _, network := range c.networks { for _, network := range c.networks {
r, err := network.Attach(ns) r, err := network.Attach(ns)
if err != nil { if err != nil {
@ -117,15 +116,15 @@ func (c *libcni) Setup(id string, path string, opts ...NamespaceOpts) (*CNIResul
// Remove removes the network config from the namespace // Remove removes the network config from the namespace
func (c *libcni) Remove(id string, path string, opts ...NamespaceOpts) error { func (c *libcni) Remove(id string, path string, opts ...NamespaceOpts) error {
if err:=c.Status();err!=nil{ c.RLock()
return err defer c.RUnlock()
} if err := c.status(); err != nil {
return err
}
ns, err := newNamespace(id, path, opts...) ns, err := newNamespace(id, path, opts...)
if err != nil { if err != nil {
return err return err
} }
c.RLock()
defer c.RUnlock()
for _, network := range c.networks { for _, network := range c.networks {
if err := network.Remove(ns); err != nil { if err := network.Remove(ns); err != nil {
return err return err
@ -135,7 +134,12 @@ func (c *libcni) Remove(id string, path string, opts ...NamespaceOpts) error {
} }
func (c *libcni) reset() { func (c *libcni) reset() {
c.Lock()
defer c.Unlock()
c.networks = nil c.networks = nil
} }
func (c *libcni) status() error {
if len(c.networks) < c.networkCount {
return ErrCNINotInitialized
}
return nil
}

View File

@ -24,11 +24,11 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
type ConfigOption func(c *libcni) error type CNIOpt func(c *libcni) error
// WithInterfacePrefix sets the prefix for network interfaces // WithInterfacePrefix sets the prefix for network interfaces
// e.g. eth or wlan // e.g. eth or wlan
func WithInterfacePrefix(prefix string) ConfigOption { func WithInterfacePrefix(prefix string) CNIOpt {
return func(c *libcni) error { return func(c *libcni) error {
c.prefix = prefix c.prefix = prefix
return nil return nil
@ -37,7 +37,7 @@ func WithInterfacePrefix(prefix string) ConfigOption {
// WithPluginDir can be used to set the locations of // WithPluginDir can be used to set the locations of
// the cni plugin binaries // the cni plugin binaries
func WithPluginDir(dirs []string) ConfigOption { func WithPluginDir(dirs []string) CNIOpt {
return func(c *libcni) error { return func(c *libcni) error {
c.pluginDirs = dirs c.pluginDirs = dirs
c.cniConfig = &cnilibrary.CNIConfig{Path: dirs} c.cniConfig = &cnilibrary.CNIConfig{Path: dirs}
@ -47,7 +47,7 @@ func WithPluginDir(dirs []string) ConfigOption {
// WithPluginConfDir can be used to configure the // WithPluginConfDir can be used to configure the
// cni configuration directory. // cni configuration directory.
func WithPluginConfDir(dir string) ConfigOption { func WithPluginConfDir(dir string) CNIOpt {
return func(c *libcni) error { return func(c *libcni) error {
c.pluginConfDir = dir c.pluginConfDir = dir
return nil return nil
@ -57,23 +57,17 @@ func WithPluginConfDir(dir string) ConfigOption {
// WithMinNetworkCount can be used to configure the // WithMinNetworkCount can be used to configure the
// minimum networks to be configured and initalized // minimum networks to be configured and initalized
// for the status to report success. By default its 1. // for the status to report success. By default its 1.
func WithMinNetworkCount(count int) ConfigOption { func WithMinNetworkCount(count int) CNIOpt {
return func(c *libcni) error { return func(c *libcni) error {
c.networkCount = count c.networkCount = count
return nil return nil
} }
} }
// LoadOption can be used with Load API
// to load network configuration from different
// sources.
type LoadOption func(c *libcni) error
// WithLoNetwork can be used to load the loopback // WithLoNetwork can be used to load the loopback
// network config. // network config.
func WithLoNetwork() LoadOption { func WithLoNetwork(c *libcni) error {
return func(c *libcni) error { loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{
loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{
"cniVersion": "0.3.1", "cniVersion": "0.3.1",
"name": "cni-loopback", "name": "cni-loopback",
"plugins": [{ "plugins": [{
@ -81,20 +75,17 @@ func WithLoNetwork() LoadOption {
}] }]
}`)) }`))
c.Lock() c.networks = append(c.networks, &Network{
defer c.Unlock() cni: c.cniConfig,
c.networks = append(c.networks,&Network{ config: loConfig,
cni: c.cniConfig, ifName: "lo",
config: loConfig, })
ifName: "lo", return nil
})
return nil
}
} }
// WithConf can be used to load config directly // WithConf can be used to load config directly
// from byte. // from byte.
func WithConf(bytes []byte) LoadOption { func WithConf(bytes []byte) CNIOpt {
return func(c *libcni) error { return func(c *libcni) error {
conf, err := cnilibrary.ConfFromBytes(bytes) conf, err := cnilibrary.ConfFromBytes(bytes)
if err != nil { if err != nil {
@ -104,8 +95,6 @@ func WithConf(bytes []byte) LoadOption {
if err != nil { if err != nil {
return err return err
} }
c.Lock()
defer c.Unlock()
c.networks = append(c.networks, &Network{ c.networks = append(c.networks, &Network{
cni: c.cniConfig, cni: c.cniConfig,
config: confList, config: confList,
@ -118,7 +107,7 @@ func WithConf(bytes []byte) LoadOption {
// WithConfFile can be used to load network config // WithConfFile can be used to load network config
// from an .conf file. Supported with absolute fileName // from an .conf file. Supported with absolute fileName
// with path only. // with path only.
func WithConfFile(fileName string) LoadOption { func WithConfFile(fileName string) CNIOpt {
return func(c *libcni) error { return func(c *libcni) error {
conf, err := cnilibrary.ConfFromFile(fileName) conf, err := cnilibrary.ConfFromFile(fileName)
if err != nil { if err != nil {
@ -129,8 +118,6 @@ func WithConfFile(fileName string) LoadOption {
if err != nil { if err != nil {
return err return err
} }
c.Lock()
defer c.Unlock()
c.networks = append(c.networks, &Network{ c.networks = append(c.networks, &Network{
cni: c.cniConfig, cni: c.cniConfig,
config: confList, config: confList,
@ -143,15 +130,13 @@ func WithConfFile(fileName string) LoadOption {
// WithConfListFile can be used to load network config // WithConfListFile can be used to load network config
// from an .conflist file. Supported with absolute fileName // from an .conflist file. Supported with absolute fileName
// with path only. // with path only.
func WithConfListFile(fileName string) LoadOption { func WithConfListFile(fileName string) CNIOpt {
return func(c *libcni) error { return func(c *libcni) error {
confList, err := cnilibrary.ConfListFromFile(fileName) confList, err := cnilibrary.ConfListFromFile(fileName)
if err != nil { if err != nil {
return err return err
} }
c.Lock() c.networks = append(c.networks, &Network{
defer c.Unlock()
c.networks = append(c.networks,&Network{
cni: c.cniConfig, cni: c.cniConfig,
config: confList, config: confList,
ifName: getIfName(c.prefix, 0), ifName: getIfName(c.prefix, 0),
@ -163,64 +148,60 @@ func WithConfListFile(fileName string) LoadOption {
// WithDefaultConf can be used to detect network config // WithDefaultConf can be used to detect network config
// files from the configured cni config directory and load // files from the configured cni config directory and load
// them. // them.
func WithDefaultConf() LoadOption { func WithDefaultConf(c *libcni) error {
return func(c *libcni) error { files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"})
files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"}) switch {
switch { case err != nil:
case err != nil: return errors.Wrapf(ErrRead, "failed to read config file: %v", err)
return errors.Wrapf(ErrRead, "failed to read config file: %v", err) case len(files) == 0:
case len(files) == 0: return errors.Wrapf(ErrCNINotInitialized, "no network config found in %s", c.pluginConfDir)
return errors.Wrapf(ErrCNINotInitialized, "no network config found in %s", c.pluginConfDir)
}
// files contains the network config files associated with cni network.
// Use lexicographical way as a defined order for network config files.
sort.Strings(files)
// Since the CNI spec does not specify a way to detect default networks,
// the convention chosen is - the first network configuration in the sorted
// list of network conf files as the default network and choose the default
// interface provided during init as the network interface for this default
// network. For every other network use a generated interface id.
i := 0
c.Lock()
defer c.Unlock()
for _, confFile := range files {
var confList *cnilibrary.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
confList, err = cnilibrary.ConfListFromFile(confFile)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config list file %s: %v", confFile, err)
}
} else {
conf, err := cnilibrary.ConfFromFile(confFile)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config file %s: %v", confFile, err)
}
// Ensure the config has a "type" so we know what plugin to run.
// Also catches the case where somebody put a conflist into a conf file.
if conf.Network.Type == "" {
return errors.Wrapf(ErrInvalidConfig, "network type not found in %s", confFile)
}
confList, err = cnilibrary.ConfListFromConf(conf)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to convert CNI config file %s to list: %v", confFile, err)
}
}
if len(confList.Plugins) == 0 {
return errors.Wrapf(ErrInvalidConfig, "CNI config list %s has no networks, skipping", confFile)
}
c.networks = append(c.networks, &Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, i),
})
i++
}
if len(c.networks) == 0 {
return errors.Wrapf(ErrCNINotInitialized, "no valid networks found in %s", c.pluginDirs)
}
return nil
} }
// files contains the network config files associated with cni network.
// Use lexicographical way as a defined order for network config files.
sort.Strings(files)
// Since the CNI spec does not specify a way to detect default networks,
// the convention chosen is - the first network configuration in the sorted
// list of network conf files as the default network and choose the default
// interface provided during init as the network interface for this default
// network. For every other network use a generated interface id.
i := 0
for _, confFile := range files {
var confList *cnilibrary.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
confList, err = cnilibrary.ConfListFromFile(confFile)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config list file %s: %v", confFile, err)
}
} else {
conf, err := cnilibrary.ConfFromFile(confFile)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to load CNI config file %s: %v", confFile, err)
}
// Ensure the config has a "type" so we know what plugin to run.
// Also catches the case where somebody put a conflist into a conf file.
if conf.Network.Type == "" {
return errors.Wrapf(ErrInvalidConfig, "network type not found in %s", confFile)
}
confList, err = cnilibrary.ConfListFromConf(conf)
if err != nil {
return errors.Wrapf(ErrInvalidConfig, "failed to convert CNI config file %s to list: %v", confFile, err)
}
}
if len(confList.Plugins) == 0 {
return errors.Wrapf(ErrInvalidConfig, "CNI config list %s has no networks, skipping", confFile)
}
c.networks = append(c.networks, &Network{
cni: c.cniConfig,
config: confList,
ifName: getIfName(c.prefix, i),
})
i++
}
if len(c.networks) == 0 {
return errors.Wrapf(ErrCNINotInitialized, "no valid networks found in %s", c.pluginDirs)
}
return nil
} }

View File

@ -8,6 +8,8 @@
A high-performance 100% compatible drop-in replacement of "encoding/json" A high-performance 100% compatible drop-in replacement of "encoding/json"
You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
``` ```
Go开发者们请加入我们滴滴出行平台技术部 taowen@didichuxing.com Go开发者们请加入我们滴滴出行平台技术部 taowen@didichuxing.com
``` ```
@ -29,6 +31,9 @@ Raw Result (easyjson requires static code generation)
| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | | easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | | jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
Always benchmark with your own workload.
The result depends heavily on the data input.
# Usage # Usage
100% compatibility with standard lib 100% compatibility with standard lib

View File

@ -16,15 +16,6 @@ func Unmarshal(data []byte, v interface{}) error {
return ConfigDefault.Unmarshal(data, v) return ConfigDefault.Unmarshal(data, v)
} }
func lastNotSpacePos(data []byte) int {
for i := len(data) - 1; i >= 0; i-- {
if data[i] != ' ' && data[i] != '\t' && data[i] != '\r' && data[i] != '\n' {
return i + 1
}
}
return 0
}
// UnmarshalFromString convenient method to read from string instead of []byte // UnmarshalFromString convenient method to read from string instead of []byte
func UnmarshalFromString(str string, v interface{}) error { func UnmarshalFromString(str string, v interface{}) error {
return ConfigDefault.UnmarshalFromString(str, v) return ConfigDefault.UnmarshalFromString(str, v)
@ -71,6 +62,11 @@ type Decoder struct {
// Decode decode JSON into interface{} // Decode decode JSON into interface{}
func (adapter *Decoder) Decode(obj interface{}) error { func (adapter *Decoder) Decode(obj interface{}) error {
if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
if !adapter.iter.loadMore() {
return io.EOF
}
}
adapter.iter.ReadVal(obj) adapter.iter.ReadVal(obj)
err := adapter.iter.Error err := adapter.iter.Error
if err == io.EOF { if err == io.EOF {
@ -81,7 +77,14 @@ func (adapter *Decoder) Decode(obj interface{}) error {
// More is there more? // More is there more?
func (adapter *Decoder) More() bool { func (adapter *Decoder) More() bool {
return adapter.iter.head != adapter.iter.tail iter := adapter.iter
if iter.Error != nil {
return false
}
if iter.head != iter.tail {
return true
}
return iter.loadMore()
} }
// Buffered remaining buffer // Buffered remaining buffer
@ -90,11 +93,21 @@ func (adapter *Decoder) Buffered() io.Reader {
return bytes.NewReader(remaining) return bytes.NewReader(remaining)
} }
// UseNumber for number JSON element, use float64 or json.NumberValue (alias of string) // UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (adapter *Decoder) UseNumber() { func (adapter *Decoder) UseNumber() {
origCfg := adapter.iter.cfg.configBeforeFrozen cfg := adapter.iter.cfg.configBeforeFrozen
origCfg.UseNumber = true cfg.UseNumber = true
adapter.iter.cfg = origCfg.Froze().(*frozenConfig) adapter.iter.cfg = cfg.frozeWithCacheReuse()
}
// DisallowUnknownFields causes the Decoder to return an error when the destination
// is a struct and the input contains object keys which do not match any
// non-ignored, exported fields in the destination.
func (adapter *Decoder) DisallowUnknownFields() {
cfg := adapter.iter.cfg.configBeforeFrozen
cfg.DisallowUnknownFields = true
adapter.iter.cfg = cfg.frozeWithCacheReuse()
} }
// NewEncoder same as json.NewEncoder // NewEncoder same as json.NewEncoder
@ -117,14 +130,16 @@ func (adapter *Encoder) Encode(val interface{}) error {
// SetIndent set the indention. Prefix is not supported // SetIndent set the indention. Prefix is not supported
func (adapter *Encoder) SetIndent(prefix, indent string) { func (adapter *Encoder) SetIndent(prefix, indent string) {
adapter.stream.cfg.indentionStep = len(indent) config := adapter.stream.cfg.configBeforeFrozen
config.IndentionStep = len(indent)
adapter.stream.cfg = config.frozeWithCacheReuse()
} }
// SetEscapeHTML escape html by default, set to false to disable // SetEscapeHTML escape html by default, set to false to disable
func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
config := adapter.stream.cfg.configBeforeFrozen config := adapter.stream.cfg.configBeforeFrozen
config.EscapeHTML = escapeHTML config.EscapeHTML = escapeHTML
adapter.stream.cfg = config.Froze().(*frozenConfig) adapter.stream.cfg = config.frozeWithCacheReuse()
} }
// Valid reports whether data is a valid JSON encoding. // Valid reports whether data is a valid JSON encoding.

View File

@ -3,8 +3,11 @@ package jsoniter
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/modern-go/reflect2"
"io" "io"
"reflect" "reflect"
"strconv"
"unsafe"
) )
// Any generic object representation. // Any generic object representation.
@ -25,7 +28,6 @@ type Any interface {
ToString() string ToString() string
ToVal(val interface{}) ToVal(val interface{})
Get(path ...interface{}) Any Get(path ...interface{}) Any
// TODO: add Set
Size() int Size() int
Keys() []string Keys() []string
GetInterface() interface{} GetInterface() interface{}
@ -35,7 +37,7 @@ type Any interface {
type baseAny struct{} type baseAny struct{}
func (any *baseAny) Get(path ...interface{}) Any { func (any *baseAny) Get(path ...interface{}) Any {
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
} }
func (any *baseAny) Size() int { func (any *baseAny) Size() int {
@ -89,7 +91,7 @@ func Wrap(val interface{}) Any {
if isAny { if isAny {
return asAny return asAny
} }
typ := reflect.TypeOf(val) typ := reflect2.TypeOf(val)
switch typ.Kind() { switch typ.Kind() {
case reflect.Slice: case reflect.Slice:
return wrapArray(val) return wrapArray(val)
@ -100,6 +102,9 @@ func Wrap(val interface{}) Any {
case reflect.String: case reflect.String:
return WrapString(val.(string)) return WrapString(val.(string))
case reflect.Int: case reflect.Int:
if strconv.IntSize == 32 {
return WrapInt32(int32(val.(int)))
}
return WrapInt64(int64(val.(int))) return WrapInt64(int64(val.(int)))
case reflect.Int8: case reflect.Int8:
return WrapInt32(int32(val.(int8))) return WrapInt32(int32(val.(int8)))
@ -110,7 +115,15 @@ func Wrap(val interface{}) Any {
case reflect.Int64: case reflect.Int64:
return WrapInt64(val.(int64)) return WrapInt64(val.(int64))
case reflect.Uint: case reflect.Uint:
if strconv.IntSize == 32 {
return WrapUint32(uint32(val.(uint)))
}
return WrapUint64(uint64(val.(uint))) return WrapUint64(uint64(val.(uint)))
case reflect.Uintptr:
if ptrSize == 32 {
return WrapUint32(uint32(val.(uintptr)))
}
return WrapUint64(uint64(val.(uintptr)))
case reflect.Uint8: case reflect.Uint8:
return WrapUint32(uint32(val.(uint8))) return WrapUint32(uint32(val.(uint8)))
case reflect.Uint16: case reflect.Uint16:
@ -243,3 +256,66 @@ func locatePath(iter *Iterator, path []interface{}) Any {
} }
return iter.readAny() return iter.readAny()
} }
var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
if typ == anyType {
return &directAnyCodec{}
}
if typ.Implements(anyType) {
return &anyCodec{
valType: typ,
}
}
return nil
}
func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
if typ == anyType {
return &directAnyCodec{}
}
if typ.Implements(anyType) {
return &anyCodec{
valType: typ,
}
}
return nil
}
type anyCodec struct {
valType reflect2.Type
}
func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
panic("not implemented")
}
func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
obj := codec.valType.UnsafeIndirect(ptr)
any := obj.(Any)
any.WriteTo(stream)
}
func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
obj := codec.valType.UnsafeIndirect(ptr)
any := obj.(Any)
return any.Size() == 0
}
type directAnyCodec struct {
}
func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*(*Any)(ptr) = iter.readAny()
}
func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
any := *(*Any)(ptr)
any.WriteTo(stream)
}
func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
any := *(*Any)(ptr)
return any.Size() == 0
}

View File

@ -1,6 +1,9 @@
package jsoniter package jsoniter
import "unsafe" import (
"io"
"unsafe"
)
type numberLazyAny struct { type numberLazyAny struct {
baseAny baseAny
@ -29,7 +32,9 @@ func (any *numberLazyAny) ToInt() int {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt() val := iter.ReadInt()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }
@ -37,7 +42,9 @@ func (any *numberLazyAny) ToInt32() int32 {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt32() val := iter.ReadInt32()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }
@ -45,7 +52,9 @@ func (any *numberLazyAny) ToInt64() int64 {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadInt64() val := iter.ReadInt64()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }
@ -53,7 +62,9 @@ func (any *numberLazyAny) ToUint() uint {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint() val := iter.ReadUint()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }
@ -61,7 +72,9 @@ func (any *numberLazyAny) ToUint32() uint32 {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint32() val := iter.ReadUint32()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }
@ -69,7 +82,9 @@ func (any *numberLazyAny) ToUint64() uint64 {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadUint64() val := iter.ReadUint64()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }
@ -77,7 +92,9 @@ func (any *numberLazyAny) ToFloat32() float32 {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadFloat32() val := iter.ReadFloat32()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }
@ -85,7 +102,9 @@ func (any *numberLazyAny) ToFloat64() float64 {
iter := any.cfg.BorrowIterator(any.buf) iter := any.cfg.BorrowIterator(any.buf)
defer any.cfg.ReturnIterator(iter) defer any.cfg.ReturnIterator(iter)
val := iter.ReadFloat64() val := iter.ReadFloat64()
any.err = iter.Error if iter.Error != nil && iter.Error != io.EOF {
any.err = iter.Error
}
return val return val
} }

View File

@ -14,7 +14,7 @@ func (any *stringAny) Get(path ...interface{}) Any {
if len(path) == 0 { if len(path) == 0 {
return any return any
} }
return &invalidAny{baseAny{}, fmt.Errorf("Get %v from simple value", path)} return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
} }
func (any *stringAny) Parse() *Iterator { func (any *stringAny) Parse() *Iterator {

View File

@ -2,11 +2,13 @@ package jsoniter
import ( import (
"encoding/json" "encoding/json"
"errors"
"io" "io"
"reflect" "reflect"
"sync/atomic" "sync"
"unsafe" "unsafe"
"github.com/modern-go/concurrent"
"github.com/modern-go/reflect2"
) )
// Config customize how the API should behave. // Config customize how the API should behave.
@ -17,21 +19,12 @@ type Config struct {
EscapeHTML bool EscapeHTML bool
SortMapKeys bool SortMapKeys bool
UseNumber bool UseNumber bool
DisallowUnknownFields bool
TagKey string TagKey string
OnlyTaggedField bool
ValidateJsonRawMessage bool ValidateJsonRawMessage bool
ObjectFieldMustBeSimpleString bool ObjectFieldMustBeSimpleString bool
} CaseSensitive bool
type frozenConfig struct {
configBeforeFrozen Config
sortMapKeys bool
indentionStep int
objectFieldMustBeSimpleString bool
decoderCache unsafe.Pointer
encoderCache unsafe.Pointer
extensions []Extension
streamPool chan *Stream
iteratorPool chan *Iterator
} }
// API the public interface of this package. // API the public interface of this package.
@ -48,6 +41,9 @@ type API interface {
NewEncoder(writer io.Writer) *Encoder NewEncoder(writer io.Writer) *Encoder
NewDecoder(reader io.Reader) *Decoder NewDecoder(reader io.Reader) *Decoder
Valid(data []byte) bool Valid(data []byte) bool
RegisterExtension(extension Extension)
DecoderOf(typ reflect2.Type) ValDecoder
EncoderOf(typ reflect2.Type) ValEncoder
} }
// ConfigDefault the default API // ConfigDefault the default API
@ -69,35 +65,120 @@ var ConfigFastest = Config{
ObjectFieldMustBeSimpleString: true, // do not unescape object field ObjectFieldMustBeSimpleString: true, // do not unescape object field
}.Froze() }.Froze()
type frozenConfig struct {
configBeforeFrozen Config
sortMapKeys bool
indentionStep int
objectFieldMustBeSimpleString bool
onlyTaggedField bool
disallowUnknownFields bool
decoderCache *concurrent.Map
encoderCache *concurrent.Map
extensions []Extension
streamPool *sync.Pool
iteratorPool *sync.Pool
caseSensitive bool
}
func (cfg *frozenConfig) initCache() {
cfg.decoderCache = concurrent.NewMap()
cfg.encoderCache = concurrent.NewMap()
}
func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
cfg.decoderCache.Store(cacheKey, decoder)
}
func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
cfg.encoderCache.Store(cacheKey, encoder)
}
func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
decoder, found := cfg.decoderCache.Load(cacheKey)
if found {
return decoder.(ValDecoder)
}
return nil
}
func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
encoder, found := cfg.encoderCache.Load(cacheKey)
if found {
return encoder.(ValEncoder)
}
return nil
}
var cfgCache = concurrent.NewMap()
func getFrozenConfigFromCache(cfg Config) *frozenConfig {
obj, found := cfgCache.Load(cfg)
if found {
return obj.(*frozenConfig)
}
return nil
}
func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
cfgCache.Store(cfg, frozenConfig)
}
// Froze forge API from config // Froze forge API from config
func (cfg Config) Froze() API { func (cfg Config) Froze() API {
// TODO: cache frozen config api := &frozenConfig{
frozenConfig := &frozenConfig{
sortMapKeys: cfg.SortMapKeys, sortMapKeys: cfg.SortMapKeys,
indentionStep: cfg.IndentionStep, indentionStep: cfg.IndentionStep,
objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
streamPool: make(chan *Stream, 16), onlyTaggedField: cfg.OnlyTaggedField,
iteratorPool: make(chan *Iterator, 16), disallowUnknownFields: cfg.DisallowUnknownFields,
caseSensitive: cfg.CaseSensitive,
} }
atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{})) api.streamPool = &sync.Pool{
atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{})) New: func() interface{} {
return NewStream(api, nil, 512)
},
}
api.iteratorPool = &sync.Pool{
New: func() interface{} {
return NewIterator(api)
},
}
api.initCache()
encoderExtension := EncoderExtension{}
decoderExtension := DecoderExtension{}
if cfg.MarshalFloatWith6Digits { if cfg.MarshalFloatWith6Digits {
frozenConfig.marshalFloatWith6Digits() api.marshalFloatWith6Digits(encoderExtension)
} }
if cfg.EscapeHTML { if cfg.EscapeHTML {
frozenConfig.escapeHTML() api.escapeHTML(encoderExtension)
} }
if cfg.UseNumber { if cfg.UseNumber {
frozenConfig.useNumber() api.useNumber(decoderExtension)
} }
if cfg.ValidateJsonRawMessage { if cfg.ValidateJsonRawMessage {
frozenConfig.validateJsonRawMessage() api.validateJsonRawMessage(encoderExtension)
} }
frozenConfig.configBeforeFrozen = cfg if len(encoderExtension) > 0 {
return frozenConfig api.extensions = append(api.extensions, encoderExtension)
}
if len(decoderExtension) > 0 {
api.extensions = append(api.extensions, decoderExtension)
}
api.configBeforeFrozen = cfg
return api
} }
func (cfg *frozenConfig) validateJsonRawMessage() { func (cfg Config) frozeWithCacheReuse() *frozenConfig {
api := getFrozenConfigFromCache(cfg)
if api != nil {
return api
}
api = cfg.Froze().(*frozenConfig)
addFrozenConfigToCache(cfg, api)
return api
}
func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
rawMessage := *(*json.RawMessage)(ptr) rawMessage := *(*json.RawMessage)(ptr)
iter := cfg.BorrowIterator([]byte(rawMessage)) iter := cfg.BorrowIterator([]byte(rawMessage))
@ -111,18 +192,23 @@ func (cfg *frozenConfig) validateJsonRawMessage() {
}, func(ptr unsafe.Pointer) bool { }, func(ptr unsafe.Pointer) bool {
return false return false
}} }}
cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder) extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder) extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
} }
func (cfg *frozenConfig) useNumber() { func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
exitingValue := *((*interface{})(ptr))
if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
iter.ReadVal(exitingValue)
return
}
if iter.WhatIsNext() == NumberValue { if iter.WhatIsNext() == NumberValue {
*((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
} else { } else {
*((*interface{})(ptr)) = iter.Read() *((*interface{})(ptr)) = iter.Read()
} }
}}) }}
} }
func (cfg *frozenConfig) getTagKey() string { func (cfg *frozenConfig) getTagKey() string {
tagKey := cfg.configBeforeFrozen.TagKey tagKey := cfg.configBeforeFrozen.TagKey
@ -132,7 +218,7 @@ func (cfg *frozenConfig) getTagKey() string {
return tagKey return tagKey
} }
func (cfg *frozenConfig) registerExtension(extension Extension) { func (cfg *frozenConfig) RegisterExtension(extension Extension) {
cfg.extensions = append(cfg.extensions, extension) cfg.extensions = append(cfg.extensions, extension)
} }
@ -143,10 +229,6 @@ func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat32Lossy(*((*float32)(ptr))) stream.WriteFloat32Lossy(*((*float32)(ptr)))
} }
func (encoder *lossyFloat32Encoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float32)(ptr)) == 0 return *((*float32)(ptr)) == 0
} }
@ -158,20 +240,16 @@ func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat64Lossy(*((*float64)(ptr))) stream.WriteFloat64Lossy(*((*float64)(ptr)))
} }
func (encoder *lossyFloat64Encoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float64)(ptr)) == 0 return *((*float64)(ptr)) == 0
} }
// EnableLossyFloatMarshalling keeps 10**(-6) precision // EnableLossyFloatMarshalling keeps 10**(-6) precision
// for float variables for better performance. // for float variables for better performance.
func (cfg *frozenConfig) marshalFloatWith6Digits() { func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
// for better performance // for better performance
cfg.addEncoderToCache(reflect.TypeOf((*float32)(nil)).Elem(), &lossyFloat32Encoder{}) extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
cfg.addEncoderToCache(reflect.TypeOf((*float64)(nil)).Elem(), &lossyFloat64Encoder{}) extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
} }
type htmlEscapedStringEncoder struct { type htmlEscapedStringEncoder struct {
@ -182,56 +260,12 @@ func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stre
stream.WriteStringWithHTMLEscaped(str) stream.WriteStringWithHTMLEscaped(str)
} }
func (encoder *htmlEscapedStringEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*string)(ptr)) == "" return *((*string)(ptr)) == ""
} }
func (cfg *frozenConfig) escapeHTML() { func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
cfg.addEncoderToCache(reflect.TypeOf((*string)(nil)).Elem(), &htmlEscapedStringEncoder{}) encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
}
func (cfg *frozenConfig) addDecoderToCache(cacheKey reflect.Type, decoder ValDecoder) {
done := false
for !done {
ptr := atomic.LoadPointer(&cfg.decoderCache)
cache := *(*map[reflect.Type]ValDecoder)(ptr)
copied := map[reflect.Type]ValDecoder{}
for k, v := range cache {
copied[k] = v
}
copied[cacheKey] = decoder
done = atomic.CompareAndSwapPointer(&cfg.decoderCache, ptr, unsafe.Pointer(&copied))
}
}
func (cfg *frozenConfig) addEncoderToCache(cacheKey reflect.Type, encoder ValEncoder) {
done := false
for !done {
ptr := atomic.LoadPointer(&cfg.encoderCache)
cache := *(*map[reflect.Type]ValEncoder)(ptr)
copied := map[reflect.Type]ValEncoder{}
for k, v := range cache {
copied[k] = v
}
copied[cacheKey] = encoder
done = atomic.CompareAndSwapPointer(&cfg.encoderCache, ptr, unsafe.Pointer(&copied))
}
}
func (cfg *frozenConfig) getDecoderFromCache(cacheKey reflect.Type) ValDecoder {
ptr := atomic.LoadPointer(&cfg.decoderCache)
cache := *(*map[reflect.Type]ValDecoder)(ptr)
return cache[cacheKey]
}
func (cfg *frozenConfig) getEncoderFromCache(cacheKey reflect.Type) ValEncoder {
ptr := atomic.LoadPointer(&cfg.encoderCache)
cache := *(*map[reflect.Type]ValEncoder)(ptr)
return cache[cacheKey]
} }
func (cfg *frozenConfig) cleanDecoders() { func (cfg *frozenConfig) cleanDecoders() {
@ -280,24 +314,22 @@ func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]
} }
newCfg := cfg.configBeforeFrozen newCfg := cfg.configBeforeFrozen
newCfg.IndentionStep = len(indent) newCfg.IndentionStep = len(indent)
return newCfg.Froze().Marshal(v) return newCfg.frozeWithCacheReuse().Marshal(v)
} }
func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
data := []byte(str) data := []byte(str)
data = data[:lastNotSpacePos(data)]
iter := cfg.BorrowIterator(data) iter := cfg.BorrowIterator(data)
defer cfg.ReturnIterator(iter) defer cfg.ReturnIterator(iter)
iter.ReadVal(v) iter.ReadVal(v)
if iter.head == iter.tail { c := iter.nextToken()
iter.loadMore() if c == 0 {
} if iter.Error == io.EOF {
if iter.Error == io.EOF { return nil
return nil }
} return iter.Error
if iter.Error == nil {
iter.ReportError("UnmarshalFromString", "there are bytes left after unmarshal")
} }
iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
return iter.Error return iter.Error
} }
@ -308,24 +340,17 @@ func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
} }
func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
data = data[:lastNotSpacePos(data)]
iter := cfg.BorrowIterator(data) iter := cfg.BorrowIterator(data)
defer cfg.ReturnIterator(iter) defer cfg.ReturnIterator(iter)
typ := reflect.TypeOf(v)
if typ.Kind() != reflect.Ptr {
// return non-pointer error
return errors.New("the second param must be ptr type")
}
iter.ReadVal(v) iter.ReadVal(v)
if iter.head == iter.tail { c := iter.nextToken()
iter.loadMore() if c == 0 {
} if iter.Error == io.EOF {
if iter.Error == io.EOF { return nil
return nil }
} return iter.Error
if iter.Error == nil {
iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
} }
iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
return iter.Error return iter.Error
} }

View File

@ -1,31 +0,0 @@
package jsoniter
import (
"encoding/json"
"strconv"
)
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
func CastJsonNumber(val interface{}) (string, bool) {
switch typedVal := val.(type) {
case json.Number:
return string(typedVal), true
case Number:
return string(typedVal), true
}
return "", false
}

View File

@ -1,721 +0,0 @@
package jsoniter
import (
"encoding"
"encoding/json"
"fmt"
"reflect"
"time"
"unsafe"
)
// ValDecoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValDecoder with json.Decoder.
// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
//
// Reflection on type to create decoders, which is then cached
// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
// 1. create instance of new value, for example *int will need a int to be allocated
// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
// 3. assignment to map, both key and value will be reflect.Value
// For a simple struct binding, it will be reflect.Value free and allocation free
type ValDecoder interface {
Decode(ptr unsafe.Pointer, iter *Iterator)
}
// ValEncoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValEncoder with json.Encoder.
// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
type ValEncoder interface {
IsEmpty(ptr unsafe.Pointer) bool
Encode(ptr unsafe.Pointer, stream *Stream)
EncodeInterface(val interface{}, stream *Stream)
}
type checkIsEmpty interface {
IsEmpty(ptr unsafe.Pointer) bool
}
// WriteToStream the default implementation for TypeEncoder method EncodeInterface
func WriteToStream(val interface{}, stream *Stream, encoder ValEncoder) {
e := (*emptyInterface)(unsafe.Pointer(&val))
if e.word == nil {
stream.WriteNil()
return
}
if reflect.TypeOf(val).Kind() == reflect.Ptr {
encoder.Encode(unsafe.Pointer(&e.word), stream)
} else {
encoder.Encode(e.word, stream)
}
}
var jsonNumberType reflect.Type
var jsoniterNumberType reflect.Type
var jsonRawMessageType reflect.Type
var jsoniterRawMessageType reflect.Type
var anyType reflect.Type
var marshalerType reflect.Type
var unmarshalerType reflect.Type
var textMarshalerType reflect.Type
var textUnmarshalerType reflect.Type
func init() {
jsonNumberType = reflect.TypeOf((*json.Number)(nil)).Elem()
jsoniterNumberType = reflect.TypeOf((*Number)(nil)).Elem()
jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem()
jsoniterRawMessageType = reflect.TypeOf((*RawMessage)(nil)).Elem()
anyType = reflect.TypeOf((*Any)(nil)).Elem()
marshalerType = reflect.TypeOf((*json.Marshaler)(nil)).Elem()
unmarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
}
type OptionalDecoder struct {
ValueType reflect.Type
ValueDecoder ValDecoder
}
func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
*((*unsafe.Pointer)(ptr)) = nil
} else {
if *((*unsafe.Pointer)(ptr)) == nil {
//pointer to null, we have to allocate memory to hold the value
value := reflect.New(decoder.ValueType)
newPtr := extractInterface(value.Interface()).word
decoder.ValueDecoder.Decode(newPtr, iter)
*((*uintptr)(ptr)) = uintptr(newPtr)
} else {
//reuse existing instance
decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
}
}
}
type deferenceDecoder struct {
// only to deference a pointer
valueType reflect.Type
valueDecoder ValDecoder
}
func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if *((*unsafe.Pointer)(ptr)) == nil {
//pointer to null, we have to allocate memory to hold the value
value := reflect.New(decoder.valueType)
newPtr := extractInterface(value.Interface()).word
decoder.valueDecoder.Decode(newPtr, iter)
*((*uintptr)(ptr)) = uintptr(newPtr)
} else {
//reuse existing instance
decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
}
}
type OptionalEncoder struct {
ValueEncoder ValEncoder
}
func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *((*unsafe.Pointer)(ptr)) == nil {
stream.WriteNil()
} else {
encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
}
}
func (encoder *OptionalEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*unsafe.Pointer)(ptr)) == nil
}
type optionalMapEncoder struct {
valueEncoder ValEncoder
}
func (encoder *optionalMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *((*unsafe.Pointer)(ptr)) == nil {
stream.WriteNil()
} else {
encoder.valueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
}
}
func (encoder *optionalMapEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *optionalMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
p := *((*unsafe.Pointer)(ptr))
return p == nil || encoder.valueEncoder.IsEmpty(p)
}
type placeholderEncoder struct {
cfg *frozenConfig
cacheKey reflect.Type
}
func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.getRealEncoder().Encode(ptr, stream)
}
func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) {
encoder.getRealEncoder().EncodeInterface(val, stream)
}
func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.getRealEncoder().IsEmpty(ptr)
}
func (encoder *placeholderEncoder) getRealEncoder() ValEncoder {
for i := 0; i < 500; i++ {
realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey)
_, isPlaceholder := realDecoder.(*placeholderEncoder)
if isPlaceholder {
time.Sleep(10 * time.Millisecond)
} else {
return realDecoder
}
}
panic(fmt.Sprintf("real encoder not found for cache key: %v", encoder.cacheKey))
}
type placeholderDecoder struct {
cfg *frozenConfig
cacheKey reflect.Type
}
func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
for i := 0; i < 500; i++ {
realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey)
_, isPlaceholder := realDecoder.(*placeholderDecoder)
if isPlaceholder {
time.Sleep(10 * time.Millisecond)
} else {
realDecoder.Decode(ptr, iter)
return
}
}
panic(fmt.Sprintf("real decoder not found for cache key: %v", decoder.cacheKey))
}
// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
typ unsafe.Pointer
word unsafe.Pointer
}
// emptyInterface is the header for an interface with method (not interface{})
type nonEmptyInterface struct {
// see ../runtime/iface.go:/Itab
itab *struct {
ityp unsafe.Pointer // static interface type
typ unsafe.Pointer // dynamic concrete type
link unsafe.Pointer
bad int32
unused int32
fun [100000]unsafe.Pointer // method table
}
word unsafe.Pointer
}
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
func (iter *Iterator) ReadVal(obj interface{}) {
typ := reflect.TypeOf(obj)
cacheKey := typ.Elem()
decoder, err := decoderOfType(iter.cfg, cacheKey)
if err != nil {
iter.Error = err
return
}
e := (*emptyInterface)(unsafe.Pointer(&obj))
decoder.Decode(e.word, iter)
}
// WriteVal copy the go interface into underlying JSON, same as json.Marshal
func (stream *Stream) WriteVal(val interface{}) {
if nil == val {
stream.WriteNil()
return
}
typ := reflect.TypeOf(val)
cacheKey := typ
encoder, err := encoderOfType(stream.cfg, cacheKey)
if err != nil {
stream.Error = err
return
}
encoder.EncodeInterface(val, stream)
}
type prefix string
func (p prefix) addToDecoder(decoder ValDecoder, err error) (ValDecoder, error) {
if err != nil {
return nil, fmt.Errorf("%s: %s", p, err.Error())
}
return decoder, err
}
func (p prefix) addToEncoder(encoder ValEncoder, err error) (ValEncoder, error) {
if err != nil {
return nil, fmt.Errorf("%s: %s", p, err.Error())
}
return encoder, err
}
func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
cacheKey := typ
decoder := cfg.getDecoderFromCache(cacheKey)
if decoder != nil {
return decoder, nil
}
decoder = getTypeDecoderFromExtension(typ)
if decoder != nil {
cfg.addDecoderToCache(cacheKey, decoder)
return decoder, nil
}
decoder = &placeholderDecoder{cfg: cfg, cacheKey: cacheKey}
cfg.addDecoderToCache(cacheKey, decoder)
decoder, err := createDecoderOfType(cfg, typ)
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
cfg.addDecoderToCache(cacheKey, decoder)
return decoder, err
}
func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
typeName := typ.String()
if typ == jsonRawMessageType {
return &jsonRawMessageCodec{}, nil
}
if typ == jsoniterRawMessageType {
return &jsoniterRawMessageCodec{}, nil
}
if typ.AssignableTo(jsonNumberType) {
return &jsonNumberCodec{}, nil
}
if typ.AssignableTo(jsoniterNumberType) {
return &jsoniterNumberCodec{}, nil
}
if typ.Implements(unmarshalerType) {
templateInterface := reflect.New(typ).Elem().Interface()
var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)}
if typ.Kind() == reflect.Ptr {
decoder = &OptionalDecoder{typ.Elem(), decoder}
}
return decoder, nil
}
if reflect.PtrTo(typ).Implements(unmarshalerType) {
templateInterface := reflect.New(typ).Interface()
var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)}
return decoder, nil
}
if typ.Implements(textUnmarshalerType) {
templateInterface := reflect.New(typ).Elem().Interface()
var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)}
if typ.Kind() == reflect.Ptr {
decoder = &OptionalDecoder{typ.Elem(), decoder}
}
return decoder, nil
}
if reflect.PtrTo(typ).Implements(textUnmarshalerType) {
templateInterface := reflect.New(typ).Interface()
var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)}
return decoder, nil
}
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
sliceDecoder, err := prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ))
if err != nil {
return nil, err
}
return &base64Codec{sliceDecoder: sliceDecoder}, nil
}
if typ.Implements(anyType) {
return &anyCodec{}, nil
}
switch typ.Kind() {
case reflect.String:
if typeName != "string" {
return decoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem())
}
return &stringCodec{}, nil
case reflect.Int:
if typeName != "int" {
return decoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem())
}
return &intCodec{}, nil
case reflect.Int8:
if typeName != "int8" {
return decoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem())
}
return &int8Codec{}, nil
case reflect.Int16:
if typeName != "int16" {
return decoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem())
}
return &int16Codec{}, nil
case reflect.Int32:
if typeName != "int32" {
return decoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem())
}
return &int32Codec{}, nil
case reflect.Int64:
if typeName != "int64" {
return decoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem())
}
return &int64Codec{}, nil
case reflect.Uint:
if typeName != "uint" {
return decoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem())
}
return &uintCodec{}, nil
case reflect.Uint8:
if typeName != "uint8" {
return decoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem())
}
return &uint8Codec{}, nil
case reflect.Uint16:
if typeName != "uint16" {
return decoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem())
}
return &uint16Codec{}, nil
case reflect.Uint32:
if typeName != "uint32" {
return decoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem())
}
return &uint32Codec{}, nil
case reflect.Uintptr:
if typeName != "uintptr" {
return decoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem())
}
return &uintptrCodec{}, nil
case reflect.Uint64:
if typeName != "uint64" {
return decoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem())
}
return &uint64Codec{}, nil
case reflect.Float32:
if typeName != "float32" {
return decoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem())
}
return &float32Codec{}, nil
case reflect.Float64:
if typeName != "float64" {
return decoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem())
}
return &float64Codec{}, nil
case reflect.Bool:
if typeName != "bool" {
return decoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem())
}
return &boolCodec{}, nil
case reflect.Interface:
if typ.NumMethod() == 0 {
return &emptyInterfaceCodec{}, nil
}
return &nonEmptyInterfaceCodec{}, nil
case reflect.Struct:
return prefix(fmt.Sprintf("[%s]", typeName)).addToDecoder(decoderOfStruct(cfg, typ))
case reflect.Array:
return prefix("[array]").addToDecoder(decoderOfArray(cfg, typ))
case reflect.Slice:
return prefix("[slice]").addToDecoder(decoderOfSlice(cfg, typ))
case reflect.Map:
return prefix("[map]").addToDecoder(decoderOfMap(cfg, typ))
case reflect.Ptr:
return prefix("[optional]").addToDecoder(decoderOfOptional(cfg, typ))
default:
return nil, fmt.Errorf("unsupported type: %v", typ)
}
}
func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
cacheKey := typ
encoder := cfg.getEncoderFromCache(cacheKey)
if encoder != nil {
return encoder, nil
}
encoder = getTypeEncoderFromExtension(typ)
if encoder != nil {
cfg.addEncoderToCache(cacheKey, encoder)
return encoder, nil
}
encoder = &placeholderEncoder{cfg: cfg, cacheKey: cacheKey}
cfg.addEncoderToCache(cacheKey, encoder)
encoder, err := createEncoderOfType(cfg, typ)
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
cfg.addEncoderToCache(cacheKey, encoder)
return encoder, err
}
func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
if typ == jsonRawMessageType {
return &jsonRawMessageCodec{}, nil
}
if typ == jsoniterRawMessageType {
return &jsoniterRawMessageCodec{}, nil
}
if typ.AssignableTo(jsonNumberType) {
return &jsonNumberCodec{}, nil
}
if typ.AssignableTo(jsoniterNumberType) {
return &jsoniterNumberCodec{}, nil
}
if typ.Implements(marshalerType) {
checkIsEmpty, err := createCheckIsEmpty(typ)
if err != nil {
return nil, err
}
templateInterface := reflect.New(typ).Elem().Interface()
var encoder ValEncoder = &marshalerEncoder{
templateInterface: extractInterface(templateInterface),
checkIsEmpty: checkIsEmpty,
}
if typ.Kind() == reflect.Ptr {
encoder = &OptionalEncoder{encoder}
}
return encoder, nil
}
if reflect.PtrTo(typ).Implements(marshalerType) {
checkIsEmpty, err := createCheckIsEmpty(reflect.PtrTo(typ))
if err != nil {
return nil, err
}
templateInterface := reflect.New(typ).Interface()
var encoder ValEncoder = &marshalerEncoder{
templateInterface: extractInterface(templateInterface),
checkIsEmpty: checkIsEmpty,
}
return encoder, nil
}
if typ.Implements(textMarshalerType) {
checkIsEmpty, err := createCheckIsEmpty(typ)
if err != nil {
return nil, err
}
templateInterface := reflect.New(typ).Elem().Interface()
var encoder ValEncoder = &textMarshalerEncoder{
templateInterface: extractInterface(templateInterface),
checkIsEmpty: checkIsEmpty,
}
if typ.Kind() == reflect.Ptr {
encoder = &OptionalEncoder{encoder}
}
return encoder, nil
}
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
return &base64Codec{}, nil
}
if typ.Implements(anyType) {
return &anyCodec{}, nil
}
return createEncoderOfSimpleType(cfg, typ)
}
func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) {
kind := typ.Kind()
switch kind {
case reflect.String:
return &stringCodec{}, nil
case reflect.Int:
return &intCodec{}, nil
case reflect.Int8:
return &int8Codec{}, nil
case reflect.Int16:
return &int16Codec{}, nil
case reflect.Int32:
return &int32Codec{}, nil
case reflect.Int64:
return &int64Codec{}, nil
case reflect.Uint:
return &uintCodec{}, nil
case reflect.Uint8:
return &uint8Codec{}, nil
case reflect.Uint16:
return &uint16Codec{}, nil
case reflect.Uint32:
return &uint32Codec{}, nil
case reflect.Uintptr:
return &uintptrCodec{}, nil
case reflect.Uint64:
return &uint64Codec{}, nil
case reflect.Float32:
return &float32Codec{}, nil
case reflect.Float64:
return &float64Codec{}, nil
case reflect.Bool:
return &boolCodec{}, nil
case reflect.Interface:
if typ.NumMethod() == 0 {
return &emptyInterfaceCodec{}, nil
}
return &nonEmptyInterfaceCodec{}, nil
case reflect.Struct:
return &structEncoder{}, nil
case reflect.Array:
return &arrayEncoder{}, nil
case reflect.Slice:
return &sliceEncoder{}, nil
case reflect.Map:
return &mapEncoder{}, nil
case reflect.Ptr:
return &OptionalEncoder{}, nil
default:
return nil, fmt.Errorf("unsupported type: %v", typ)
}
}
func createEncoderOfSimpleType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
typeName := typ.String()
kind := typ.Kind()
switch kind {
case reflect.String:
if typeName != "string" {
return encoderOfType(cfg, reflect.TypeOf((*string)(nil)).Elem())
}
return &stringCodec{}, nil
case reflect.Int:
if typeName != "int" {
return encoderOfType(cfg, reflect.TypeOf((*int)(nil)).Elem())
}
return &intCodec{}, nil
case reflect.Int8:
if typeName != "int8" {
return encoderOfType(cfg, reflect.TypeOf((*int8)(nil)).Elem())
}
return &int8Codec{}, nil
case reflect.Int16:
if typeName != "int16" {
return encoderOfType(cfg, reflect.TypeOf((*int16)(nil)).Elem())
}
return &int16Codec{}, nil
case reflect.Int32:
if typeName != "int32" {
return encoderOfType(cfg, reflect.TypeOf((*int32)(nil)).Elem())
}
return &int32Codec{}, nil
case reflect.Int64:
if typeName != "int64" {
return encoderOfType(cfg, reflect.TypeOf((*int64)(nil)).Elem())
}
return &int64Codec{}, nil
case reflect.Uint:
if typeName != "uint" {
return encoderOfType(cfg, reflect.TypeOf((*uint)(nil)).Elem())
}
return &uintCodec{}, nil
case reflect.Uint8:
if typeName != "uint8" {
return encoderOfType(cfg, reflect.TypeOf((*uint8)(nil)).Elem())
}
return &uint8Codec{}, nil
case reflect.Uint16:
if typeName != "uint16" {
return encoderOfType(cfg, reflect.TypeOf((*uint16)(nil)).Elem())
}
return &uint16Codec{}, nil
case reflect.Uint32:
if typeName != "uint32" {
return encoderOfType(cfg, reflect.TypeOf((*uint32)(nil)).Elem())
}
return &uint32Codec{}, nil
case reflect.Uintptr:
if typeName != "uintptr" {
return encoderOfType(cfg, reflect.TypeOf((*uintptr)(nil)).Elem())
}
return &uintptrCodec{}, nil
case reflect.Uint64:
if typeName != "uint64" {
return encoderOfType(cfg, reflect.TypeOf((*uint64)(nil)).Elem())
}
return &uint64Codec{}, nil
case reflect.Float32:
if typeName != "float32" {
return encoderOfType(cfg, reflect.TypeOf((*float32)(nil)).Elem())
}
return &float32Codec{}, nil
case reflect.Float64:
if typeName != "float64" {
return encoderOfType(cfg, reflect.TypeOf((*float64)(nil)).Elem())
}
return &float64Codec{}, nil
case reflect.Bool:
if typeName != "bool" {
return encoderOfType(cfg, reflect.TypeOf((*bool)(nil)).Elem())
}
return &boolCodec{}, nil
case reflect.Interface:
if typ.NumMethod() == 0 {
return &emptyInterfaceCodec{}, nil
}
return &nonEmptyInterfaceCodec{}, nil
case reflect.Struct:
return prefix(fmt.Sprintf("[%s]", typeName)).addToEncoder(encoderOfStruct(cfg, typ))
case reflect.Array:
return prefix("[array]").addToEncoder(encoderOfArray(cfg, typ))
case reflect.Slice:
return prefix("[slice]").addToEncoder(encoderOfSlice(cfg, typ))
case reflect.Map:
return prefix("[map]").addToEncoder(encoderOfMap(cfg, typ))
case reflect.Ptr:
return prefix("[optional]").addToEncoder(encoderOfOptional(cfg, typ))
default:
return nil, fmt.Errorf("unsupported type: %v", typ)
}
}
func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
elemType := typ.Elem()
decoder, err := decoderOfType(cfg, elemType)
if err != nil {
return nil, err
}
return &OptionalDecoder{elemType, decoder}, nil
}
func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
elemType := typ.Elem()
elemEncoder, err := encoderOfType(cfg, elemType)
if err != nil {
return nil, err
}
encoder := &OptionalEncoder{elemEncoder}
if elemType.Kind() == reflect.Map {
encoder = &OptionalEncoder{encoder}
}
return encoder, nil
}
func decoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
decoder, err := decoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
mapInterface := reflect.New(typ).Interface()
return &mapDecoder{typ, typ.Key(), typ.Elem(), decoder, extractInterface(mapInterface)}, nil
}
func extractInterface(val interface{}) emptyInterface {
return *((*emptyInterface)(unsafe.Pointer(&val)))
}
func encoderOfMap(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
elemType := typ.Elem()
encoder, err := encoderOfType(cfg, elemType)
if err != nil {
return nil, err
}
mapInterface := reflect.New(typ).Elem().Interface()
if cfg.sortMapKeys {
return &sortKeysMapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil
}
return &mapEncoder{typ, elemType, encoder, *((*emptyInterface)(unsafe.Pointer(&mapInterface)))}, nil
}

View File

@ -1,99 +0,0 @@
package jsoniter
import (
"fmt"
"io"
"reflect"
"unsafe"
)
func decoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
decoder, err := decoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
return &arrayDecoder{typ, typ.Elem(), decoder}, nil
}
func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
encoder, err := encoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
if typ.Elem().Kind() == reflect.Map {
encoder = &OptionalEncoder{encoder}
}
return &arrayEncoder{typ, typ.Elem(), encoder}, nil
}
type arrayEncoder struct {
arrayType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
}
func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteArrayStart()
elemPtr := unsafe.Pointer(ptr)
encoder.elemEncoder.Encode(elemPtr, stream)
for i := 1; i < encoder.arrayType.Len(); i++ {
stream.WriteMore()
elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size())
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
}
stream.WriteArrayEnd()
if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
}
}
func (encoder *arrayEncoder) EncodeInterface(val interface{}, stream *Stream) {
// special optimization for interface{}
e := (*emptyInterface)(unsafe.Pointer(&val))
if e.word == nil {
stream.WriteArrayStart()
stream.WriteNil()
stream.WriteArrayEnd()
return
}
elemType := encoder.arrayType.Elem()
if encoder.arrayType.Len() == 1 && (elemType.Kind() == reflect.Ptr || elemType.Kind() == reflect.Map) {
ptr := uintptr(e.word)
e.word = unsafe.Pointer(&ptr)
}
if reflect.TypeOf(val).Kind() == reflect.Ptr {
encoder.Encode(unsafe.Pointer(&e.word), stream)
} else {
encoder.Encode(e.word, stream)
}
}
func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}
type arrayDecoder struct {
arrayType reflect.Type
elemType reflect.Type
elemDecoder ValDecoder
}
func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.doDecode(ptr, iter)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
}
}
func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
offset := uintptr(0)
iter.ReadArrayCB(func(iter *Iterator) bool {
if offset < decoder.arrayType.Size() {
decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(ptr)+offset), iter)
offset += decoder.elemType.Size()
} else {
iter.Skip()
}
return true
})
}

View File

@ -1,244 +0,0 @@
package jsoniter
import (
"encoding"
"encoding/json"
"reflect"
"sort"
"strconv"
"unsafe"
)
type mapDecoder struct {
mapType reflect.Type
keyType reflect.Type
elemType reflect.Type
elemDecoder ValDecoder
mapInterface emptyInterface
}
func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
// dark magic to cast unsafe.Pointer back to interface{} using reflect.Type
mapInterface := decoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface).Elem()
if iter.ReadNil() {
realVal.Set(reflect.Zero(decoder.mapType))
return
}
if realVal.IsNil() {
realVal.Set(reflect.MakeMap(realVal.Type()))
}
iter.ReadMapCB(func(iter *Iterator, keyStr string) bool {
elem := reflect.New(decoder.elemType)
decoder.elemDecoder.Decode(unsafe.Pointer(elem.Pointer()), iter)
// to put into map, we have to use reflection
keyType := decoder.keyType
// TODO: remove this from loop
switch {
case keyType.Kind() == reflect.String:
realVal.SetMapIndex(reflect.ValueOf(keyStr).Convert(keyType), elem.Elem())
return true
case keyType.Implements(textUnmarshalerType):
textUnmarshaler := reflect.New(keyType.Elem()).Interface().(encoding.TextUnmarshaler)
err := textUnmarshaler.UnmarshalText([]byte(keyStr))
if err != nil {
iter.ReportError("read map key as TextUnmarshaler", err.Error())
return false
}
realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler), elem.Elem())
return true
case reflect.PtrTo(keyType).Implements(textUnmarshalerType):
textUnmarshaler := reflect.New(keyType).Interface().(encoding.TextUnmarshaler)
err := textUnmarshaler.UnmarshalText([]byte(keyStr))
if err != nil {
iter.ReportError("read map key as TextUnmarshaler", err.Error())
return false
}
realVal.SetMapIndex(reflect.ValueOf(textUnmarshaler).Elem(), elem.Elem())
return true
default:
switch keyType.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(keyStr, 10, 64)
if err != nil || reflect.Zero(keyType).OverflowInt(n) {
iter.ReportError("read map key as int64", "read int64 failed")
return false
}
realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem())
return true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(keyStr, 10, 64)
if err != nil || reflect.Zero(keyType).OverflowUint(n) {
iter.ReportError("read map key as uint64", "read uint64 failed")
return false
}
realVal.SetMapIndex(reflect.ValueOf(n).Convert(keyType), elem.Elem())
return true
}
}
iter.ReportError("read map key", "unexpected map key type "+keyType.String())
return true
})
}
type mapEncoder struct {
mapType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
mapInterface emptyInterface
}
func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
stream.WriteObjectStart()
for i, key := range realVal.MapKeys() {
if i != 0 {
stream.WriteMore()
}
encodeMapKey(key, stream)
if stream.indention > 0 {
stream.writeTwoBytes(byte(':'), byte(' '))
} else {
stream.writeByte(':')
}
val := realVal.MapIndex(key).Interface()
encoder.elemEncoder.EncodeInterface(val, stream)
}
stream.WriteObjectEnd()
}
func encodeMapKey(key reflect.Value, stream *Stream) {
if key.Kind() == reflect.String {
stream.WriteString(key.String())
return
}
if tm, ok := key.Interface().(encoding.TextMarshaler); ok {
buf, err := tm.MarshalText()
if err != nil {
stream.Error = err
return
}
stream.writeByte('"')
stream.Write(buf)
stream.writeByte('"')
return
}
switch key.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
stream.writeByte('"')
stream.WriteInt64(key.Int())
stream.writeByte('"')
return
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
stream.writeByte('"')
stream.WriteUint64(key.Uint())
stream.writeByte('"')
return
}
stream.Error = &json.UnsupportedTypeError{Type: key.Type()}
}
func (encoder *mapEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
return realVal.Len() == 0
}
type sortKeysMapEncoder struct {
mapType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
mapInterface emptyInterface
}
func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
// Extract and sort the keys.
keys := realVal.MapKeys()
sv := stringValues(make([]reflectWithString, len(keys)))
for i, v := range keys {
sv[i].v = v
if err := sv[i].resolve(); err != nil {
stream.Error = err
return
}
}
sort.Sort(sv)
stream.WriteObjectStart()
for i, key := range sv {
if i != 0 {
stream.WriteMore()
}
stream.WriteVal(key.s) // might need html escape, so can not WriteString directly
if stream.indention > 0 {
stream.writeTwoBytes(byte(':'), byte(' '))
} else {
stream.writeByte(':')
}
val := realVal.MapIndex(key.v).Interface()
encoder.elemEncoder.EncodeInterface(val, stream)
}
stream.WriteObjectEnd()
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflectWithString
type reflectWithString struct {
v reflect.Value
s string
}
func (w *reflectWithString) resolve() error {
if w.v.Kind() == reflect.String {
w.s = w.v.String()
return nil
}
if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok {
buf, err := tm.MarshalText()
w.s = string(buf)
return err
}
switch w.v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
w.s = strconv.FormatInt(w.v.Int(), 10)
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
w.s = strconv.FormatUint(w.v.Uint(), 10)
return nil
}
return &json.UnsupportedTypeError{Type: w.v.Type()}
}
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool { return sv[i].s < sv[j].s }
func (encoder *sortKeysMapEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
mapInterface := encoder.mapInterface
mapInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&mapInterface))
realVal := reflect.ValueOf(*realInterface)
return realVal.Len() == 0
}

View File

@ -1,764 +0,0 @@
package jsoniter
import (
"encoding"
"encoding/base64"
"encoding/json"
"reflect"
"unsafe"
)
type stringCodec struct {
}
func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*string)(ptr)) = iter.ReadString()
}
func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
str := *((*string)(ptr))
stream.WriteString(str)
}
func (codec *stringCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*string)(ptr)) == ""
}
type intCodec struct {
}
func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int)(ptr)) = iter.ReadInt()
}
}
func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt(*((*int)(ptr)))
}
func (codec *intCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *intCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int)(ptr)) == 0
}
type uintptrCodec struct {
}
func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uintptr)(ptr)) = uintptr(iter.ReadUint64())
}
}
func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint64(uint64(*((*uintptr)(ptr))))
}
func (codec *uintptrCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uintptrCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uintptr)(ptr)) == 0
}
type int8Codec struct {
}
func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int8)(ptr)) = iter.ReadInt8()
}
}
func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt8(*((*int8)(ptr)))
}
func (codec *int8Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int8)(ptr)) == 0
}
type int16Codec struct {
}
func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int16)(ptr)) = iter.ReadInt16()
}
}
func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt16(*((*int16)(ptr)))
}
func (codec *int16Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int16)(ptr)) == 0
}
type int32Codec struct {
}
func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int32)(ptr)) = iter.ReadInt32()
}
}
func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt32(*((*int32)(ptr)))
}
func (codec *int32Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int32)(ptr)) == 0
}
type int64Codec struct {
}
func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int64)(ptr)) = iter.ReadInt64()
}
}
func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt64(*((*int64)(ptr)))
}
func (codec *int64Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int64)(ptr)) == 0
}
type uintCodec struct {
}
func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint)(ptr)) = iter.ReadUint()
return
}
}
func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint(*((*uint)(ptr)))
}
func (codec *uintCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uintCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint)(ptr)) == 0
}
type uint8Codec struct {
}
func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint8)(ptr)) = iter.ReadUint8()
}
}
func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint8(*((*uint8)(ptr)))
}
func (codec *uint8Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint8)(ptr)) == 0
}
type uint16Codec struct {
}
func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint16)(ptr)) = iter.ReadUint16()
}
}
func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint16(*((*uint16)(ptr)))
}
func (codec *uint16Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint16)(ptr)) == 0
}
type uint32Codec struct {
}
func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint32)(ptr)) = iter.ReadUint32()
}
}
func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint32(*((*uint32)(ptr)))
}
func (codec *uint32Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint32)(ptr)) == 0
}
type uint64Codec struct {
}
func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint64)(ptr)) = iter.ReadUint64()
}
}
func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint64(*((*uint64)(ptr)))
}
func (codec *uint64Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint64)(ptr)) == 0
}
type float32Codec struct {
}
func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*float32)(ptr)) = iter.ReadFloat32()
}
}
func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat32(*((*float32)(ptr)))
}
func (codec *float32Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float32)(ptr)) == 0
}
type float64Codec struct {
}
func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*float64)(ptr)) = iter.ReadFloat64()
}
}
func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat64(*((*float64)(ptr)))
}
func (codec *float64Codec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float64)(ptr)) == 0
}
type boolCodec struct {
}
func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*bool)(ptr)) = iter.ReadBool()
}
}
func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteBool(*((*bool)(ptr)))
}
func (codec *boolCodec) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, codec)
}
func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
return !(*((*bool)(ptr)))
}
type emptyInterfaceCodec struct {
}
func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
existing := *((*interface{})(ptr))
// Checking for both typed and untyped nil pointers.
if existing != nil &&
reflect.TypeOf(existing).Kind() == reflect.Ptr &&
!reflect.ValueOf(existing).IsNil() {
var ptrToExisting interface{}
for {
elem := reflect.ValueOf(existing).Elem()
if elem.Kind() != reflect.Ptr || elem.IsNil() {
break
}
ptrToExisting = existing
existing = elem.Interface()
}
if iter.ReadNil() {
if ptrToExisting != nil {
nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem())
reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr)
} else {
*((*interface{})(ptr)) = nil
}
} else {
iter.ReadVal(existing)
}
return
}
if iter.ReadNil() {
*((*interface{})(ptr)) = nil
} else {
*((*interface{})(ptr)) = iter.Read()
}
}
func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteVal(*((*interface{})(ptr)))
}
func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteVal(val)
}
func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
emptyInterface := (*emptyInterface)(ptr)
return emptyInterface.typ == nil
}
type nonEmptyInterfaceCodec struct {
}
func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
nonEmptyInterface := (*nonEmptyInterface)(ptr)
if nonEmptyInterface.itab == nil {
iter.ReportError("read non-empty interface", "do not know which concrete type to decode to")
return
}
var i interface{}
e := (*emptyInterface)(unsafe.Pointer(&i))
e.typ = nonEmptyInterface.itab.typ
e.word = nonEmptyInterface.word
iter.ReadVal(&i)
if e.word == nil {
nonEmptyInterface.itab = nil
}
nonEmptyInterface.word = e.word
}
func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
nonEmptyInterface := (*nonEmptyInterface)(ptr)
var i interface{}
if nonEmptyInterface.itab != nil {
e := (*emptyInterface)(unsafe.Pointer(&i))
e.typ = nonEmptyInterface.itab.typ
e.word = nonEmptyInterface.word
}
stream.WriteVal(i)
}
func (codec *nonEmptyInterfaceCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteVal(val)
}
func (codec *nonEmptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
nonEmptyInterface := (*nonEmptyInterface)(ptr)
return nonEmptyInterface.word == nil
}
type anyCodec struct {
}
func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*Any)(ptr)) = iter.ReadAny()
}
func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
(*((*Any)(ptr))).WriteTo(stream)
}
func (codec *anyCodec) EncodeInterface(val interface{}, stream *Stream) {
(val.(Any)).WriteTo(stream)
}
func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
return (*((*Any)(ptr))).Size() == 0
}
type jsonNumberCodec struct {
}
func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
switch iter.WhatIsNext() {
case StringValue:
*((*json.Number)(ptr)) = json.Number(iter.ReadString())
case NilValue:
iter.skipFourBytes('n', 'u', 'l', 'l')
*((*json.Number)(ptr)) = ""
default:
*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
}
}
func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*json.Number)(ptr))))
}
func (codec *jsonNumberCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(json.Number)))
}
func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*json.Number)(ptr))) == 0
}
type jsoniterNumberCodec struct {
}
func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
switch iter.WhatIsNext() {
case StringValue:
*((*Number)(ptr)) = Number(iter.ReadString())
case NilValue:
iter.skipFourBytes('n', 'u', 'l', 'l')
*((*Number)(ptr)) = ""
default:
*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
}
}
func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*Number)(ptr))))
}
func (codec *jsoniterNumberCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(Number)))
}
func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*Number)(ptr))) == 0
}
type jsonRawMessageCodec struct {
}
func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
}
func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
}
func (codec *jsonRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(json.RawMessage)))
}
func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*json.RawMessage)(ptr))) == 0
}
type jsoniterRawMessageCodec struct {
}
func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
}
func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*RawMessage)(ptr))))
}
func (codec *jsoniterRawMessageCodec) EncodeInterface(val interface{}, stream *Stream) {
stream.WriteRaw(string(val.(RawMessage)))
}
func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*RawMessage)(ptr))) == 0
}
type base64Codec struct {
sliceDecoder ValDecoder
}
func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
ptrSlice := (*sliceHeader)(ptr)
ptrSlice.Len = 0
ptrSlice.Cap = 0
ptrSlice.Data = nil
return
}
switch iter.WhatIsNext() {
case StringValue:
encoding := base64.StdEncoding
src := iter.SkipAndReturnBytes()
src = src[1 : len(src)-1]
decodedLen := encoding.DecodedLen(len(src))
dst := make([]byte, decodedLen)
len, err := encoding.Decode(dst, src)
if err != nil {
iter.ReportError("decode base64", err.Error())
} else {
dst = dst[:len]
dstSlice := (*sliceHeader)(unsafe.Pointer(&dst))
ptrSlice := (*sliceHeader)(ptr)
ptrSlice.Data = dstSlice.Data
ptrSlice.Cap = dstSlice.Cap
ptrSlice.Len = dstSlice.Len
}
case ArrayValue:
codec.sliceDecoder.Decode(ptr, iter)
default:
iter.ReportError("base64Codec", "invalid input")
}
}
func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
src := *((*[]byte)(ptr))
if len(src) == 0 {
stream.WriteNil()
return
}
encoding := base64.StdEncoding
stream.writeByte('"')
toGrow := encoding.EncodedLen(len(src))
stream.ensure(toGrow)
encoding.Encode(stream.buf[stream.n:], src)
stream.n += toGrow
stream.writeByte('"')
}
func (codec *base64Codec) EncodeInterface(val interface{}, stream *Stream) {
ptr := extractInterface(val).word
src := *((*[]byte)(ptr))
if len(src) == 0 {
stream.WriteNil()
return
}
encoding := base64.StdEncoding
stream.writeByte('"')
toGrow := encoding.EncodedLen(len(src))
stream.ensure(toGrow)
encoding.Encode(stream.buf[stream.n:], src)
stream.n += toGrow
stream.writeByte('"')
}
func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*[]byte)(ptr))) == 0
}
type stringModeNumberDecoder struct {
elemDecoder ValDecoder
}
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
c := iter.nextToken()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
return
}
decoder.elemDecoder.Decode(ptr, iter)
if iter.Error != nil {
return
}
c = iter.readByte()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
return
}
}
type stringModeStringDecoder struct {
elemDecoder ValDecoder
cfg *frozenConfig
}
func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.elemDecoder.Decode(ptr, iter)
str := *((*string)(ptr))
tempIter := decoder.cfg.BorrowIterator([]byte(str))
defer decoder.cfg.ReturnIterator(tempIter)
*((*string)(ptr)) = tempIter.ReadString()
}
type stringModeNumberEncoder struct {
elemEncoder ValEncoder
}
func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.writeByte('"')
encoder.elemEncoder.Encode(ptr, stream)
stream.writeByte('"')
}
func (encoder *stringModeNumberEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.elemEncoder.IsEmpty(ptr)
}
type stringModeStringEncoder struct {
elemEncoder ValEncoder
cfg *frozenConfig
}
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
tempStream := encoder.cfg.BorrowStream(nil)
defer encoder.cfg.ReturnStream(tempStream)
encoder.elemEncoder.Encode(ptr, tempStream)
stream.WriteString(string(tempStream.Buffer()))
}
func (encoder *stringModeStringEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.elemEncoder.IsEmpty(ptr)
}
type marshalerEncoder struct {
templateInterface emptyInterface
checkIsEmpty checkIsEmpty
}
func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
templateInterface := encoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
marshaler, ok := (*realInterface).(json.Marshaler)
if !ok {
stream.WriteVal(nil)
return
}
bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err
} else {
stream.Write(bytes)
}
}
func (encoder *marshalerEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type textMarshalerEncoder struct {
templateInterface emptyInterface
checkIsEmpty checkIsEmpty
}
func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
templateInterface := encoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
marshaler := (*realInterface).(encoding.TextMarshaler)
bytes, err := marshaler.MarshalText()
if err != nil {
stream.Error = err
} else {
stream.WriteString(string(bytes))
}
}
func (encoder *textMarshalerEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type unmarshalerDecoder struct {
templateInterface emptyInterface
}
func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
templateInterface := decoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
unmarshaler := (*realInterface).(json.Unmarshaler)
iter.nextToken()
iter.unreadByte() // skip spaces
bytes := iter.SkipAndReturnBytes()
err := unmarshaler.UnmarshalJSON(bytes)
if err != nil {
iter.ReportError("unmarshalerDecoder", err.Error())
}
}
type textUnmarshalerDecoder struct {
templateInterface emptyInterface
}
func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
templateInterface := decoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
unmarshaler := (*realInterface).(encoding.TextUnmarshaler)
str := iter.ReadString()
err := unmarshaler.UnmarshalText([]byte(str))
if err != nil {
iter.ReportError("textUnmarshalerDecoder", err.Error())
}
}

View File

@ -1,147 +0,0 @@
package jsoniter
import (
"fmt"
"io"
"reflect"
"unsafe"
)
func decoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
decoder, err := decoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
return &sliceDecoder{typ, typ.Elem(), decoder}, nil
}
func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
encoder, err := encoderOfType(cfg, typ.Elem())
if err != nil {
return nil, err
}
if typ.Elem().Kind() == reflect.Map {
encoder = &OptionalEncoder{encoder}
}
return &sliceEncoder{typ, typ.Elem(), encoder}, nil
}
type sliceEncoder struct {
sliceType reflect.Type
elemType reflect.Type
elemEncoder ValEncoder
}
func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
slice := (*sliceHeader)(ptr)
if slice.Data == nil {
stream.WriteNil()
return
}
if slice.Len == 0 {
stream.WriteEmptyArray()
return
}
stream.WriteArrayStart()
elemPtr := unsafe.Pointer(slice.Data)
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
for i := 1; i < slice.Len; i++ {
stream.WriteMore()
elemPtr = unsafe.Pointer(uintptr(elemPtr) + encoder.elemType.Size())
encoder.elemEncoder.Encode(unsafe.Pointer(elemPtr), stream)
}
stream.WriteArrayEnd()
if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
}
}
func (encoder *sliceEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
slice := (*sliceHeader)(ptr)
return slice.Len == 0
}
type sliceDecoder struct {
sliceType reflect.Type
elemType reflect.Type
elemDecoder ValDecoder
}
// sliceHeader is a safe version of SliceHeader used within this package.
type sliceHeader struct {
Data unsafe.Pointer
Len int
Cap int
}
func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.doDecode(ptr, iter)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
}
}
func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
slice := (*sliceHeader)(ptr)
if iter.ReadNil() {
slice.Len = 0
slice.Cap = 0
slice.Data = nil
return
}
reuseSlice(slice, decoder.sliceType, 4)
slice.Len = 0
offset := uintptr(0)
iter.ReadArrayCB(func(iter *Iterator) bool {
growOne(slice, decoder.sliceType, decoder.elemType)
decoder.elemDecoder.Decode(unsafe.Pointer(uintptr(slice.Data)+offset), iter)
offset += decoder.elemType.Size()
return true
})
}
// grow grows the slice s so that it can hold extra more values, allocating
// more capacity if needed. It also returns the old and new slice lengths.
func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Type) {
newLen := slice.Len + 1
if newLen <= slice.Cap {
slice.Len = newLen
return
}
newCap := slice.Cap
if newCap == 0 {
newCap = 1
} else {
for newCap < newLen {
if slice.Len < 1024 {
newCap += newCap
} else {
newCap += newCap / 4
}
}
}
newVal := reflect.MakeSlice(sliceType, newLen, newCap)
dst := unsafe.Pointer(newVal.Pointer())
// copy old array into new array
originalBytesCount := slice.Len * int(elementType.Size())
srcSliceHeader := (unsafe.Pointer)(&sliceHeader{slice.Data, originalBytesCount, originalBytesCount})
dstSliceHeader := (unsafe.Pointer)(&sliceHeader{dst, originalBytesCount, originalBytesCount})
copy(*(*[]byte)(dstSliceHeader), *(*[]byte)(srcSliceHeader))
slice.Data = dst
slice.Len = newLen
slice.Cap = newCap
}
func reuseSlice(slice *sliceHeader, sliceType reflect.Type, expectedCap int) {
if expectedCap <= slice.Cap {
return
}
newVal := reflect.MakeSlice(sliceType, 0, expectedCap)
dst := unsafe.Pointer(newVal.Pointer())
slice.Data = dst
slice.Cap = expectedCap
}

View File

@ -1,320 +0,0 @@
package jsoniter
var digits []uint32
func init() {
digits = make([]uint32, 1000)
for i := uint32(0); i < 1000; i++ {
digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
if i < 10 {
digits[i] += 2 << 24
} else if i < 100 {
digits[i] += 1 << 24
}
}
}
func writeFirstBuf(buf []byte, v uint32, n int) int {
start := v >> 24
if start == 0 {
buf[n] = byte(v >> 16)
n++
buf[n] = byte(v >> 8)
n++
} else if start == 1 {
buf[n] = byte(v >> 8)
n++
}
buf[n] = byte(v)
n++
return n
}
func writeBuf(buf []byte, v uint32, n int) {
buf[n] = byte(v >> 16)
buf[n+1] = byte(v >> 8)
buf[n+2] = byte(v)
}
// WriteUint8 write uint8 to stream
func (stream *Stream) WriteUint8(val uint8) {
stream.ensure(3)
stream.n = writeFirstBuf(stream.buf, digits[val], stream.n)
}
// WriteInt8 write int8 to stream
func (stream *Stream) WriteInt8(nval int8) {
stream.ensure(4)
n := stream.n
var val uint8
if nval < 0 {
val = uint8(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint8(nval)
}
stream.n = writeFirstBuf(stream.buf, digits[val], n)
}
// WriteUint16 write uint16 to stream
func (stream *Stream) WriteUint16(val uint16) {
stream.ensure(5)
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], stream.n)
return
}
r1 := val - q1*1000
n := writeFirstBuf(stream.buf, digits[q1], stream.n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
// WriteInt16 write int16 to stream
func (stream *Stream) WriteInt16(nval int16) {
stream.ensure(6)
n := stream.n
var val uint16
if nval < 0 {
val = uint16(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint16(nval)
}
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
n = writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
// WriteUint32 write uint32 to stream
func (stream *Stream) WriteUint32(val uint32) {
stream.ensure(10)
n := stream.n
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
} else {
r3 := q2 - q3*1000
stream.buf[n] = byte(q3 + '0')
n++
writeBuf(stream.buf, digits[r3], n)
n += 3
}
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
}
// WriteInt32 write int32 to stream
func (stream *Stream) WriteInt32(nval int32) {
stream.ensure(11)
n := stream.n
var val uint32
if nval < 0 {
val = uint32(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint32(nval)
}
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
} else {
r3 := q2 - q3*1000
stream.buf[n] = byte(q3 + '0')
n++
writeBuf(stream.buf, digits[r3], n)
n += 3
}
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
}
// WriteUint64 write uint64 to stream
func (stream *Stream) WriteUint64(val uint64) {
stream.ensure(20)
n := stream.n
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
return
}
r3 := q2 - q3*1000
q4 := q3 / 1000
if q4 == 0 {
n = writeFirstBuf(stream.buf, digits[q3], n)
writeBuf(stream.buf, digits[r3], n)
writeBuf(stream.buf, digits[r2], n+3)
writeBuf(stream.buf, digits[r1], n+6)
stream.n = n + 9
return
}
r4 := q3 - q4*1000
q5 := q4 / 1000
if q5 == 0 {
n = writeFirstBuf(stream.buf, digits[q4], n)
writeBuf(stream.buf, digits[r4], n)
writeBuf(stream.buf, digits[r3], n+3)
writeBuf(stream.buf, digits[r2], n+6)
writeBuf(stream.buf, digits[r1], n+9)
stream.n = n + 12
return
}
r5 := q4 - q5*1000
q6 := q5 / 1000
if q6 == 0 {
n = writeFirstBuf(stream.buf, digits[q5], n)
} else {
n = writeFirstBuf(stream.buf, digits[q6], n)
r6 := q5 - q6*1000
writeBuf(stream.buf, digits[r6], n)
n += 3
}
writeBuf(stream.buf, digits[r5], n)
writeBuf(stream.buf, digits[r4], n+3)
writeBuf(stream.buf, digits[r3], n+6)
writeBuf(stream.buf, digits[r2], n+9)
writeBuf(stream.buf, digits[r1], n+12)
stream.n = n + 15
}
// WriteInt64 write int64 to stream
func (stream *Stream) WriteInt64(nval int64) {
stream.ensure(20)
n := stream.n
var val uint64
if nval < 0 {
val = uint64(-nval)
stream.buf[n] = '-'
n++
} else {
val = uint64(nval)
}
q1 := val / 1000
if q1 == 0 {
stream.n = writeFirstBuf(stream.buf, digits[val], n)
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
n := writeFirstBuf(stream.buf, digits[q1], n)
writeBuf(stream.buf, digits[r1], n)
stream.n = n + 3
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
n = writeFirstBuf(stream.buf, digits[q2], n)
writeBuf(stream.buf, digits[r2], n)
writeBuf(stream.buf, digits[r1], n+3)
stream.n = n + 6
return
}
r3 := q2 - q3*1000
q4 := q3 / 1000
if q4 == 0 {
n = writeFirstBuf(stream.buf, digits[q3], n)
writeBuf(stream.buf, digits[r3], n)
writeBuf(stream.buf, digits[r2], n+3)
writeBuf(stream.buf, digits[r1], n+6)
stream.n = n + 9
return
}
r4 := q3 - q4*1000
q5 := q4 / 1000
if q5 == 0 {
n = writeFirstBuf(stream.buf, digits[q4], n)
writeBuf(stream.buf, digits[r4], n)
writeBuf(stream.buf, digits[r3], n+3)
writeBuf(stream.buf, digits[r2], n+6)
writeBuf(stream.buf, digits[r1], n+9)
stream.n = n + 12
return
}
r5 := q4 - q5*1000
q6 := q5 / 1000
if q6 == 0 {
n = writeFirstBuf(stream.buf, digits[q5], n)
} else {
stream.buf[n] = byte(q6 + '0')
n++
r6 := q5 - q6*1000
writeBuf(stream.buf, digits[r6], n)
n += 3
}
writeBuf(stream.buf, digits[r5], n)
writeBuf(stream.buf, digits[r4], n+3)
writeBuf(stream.buf, digits[r3], n+6)
writeBuf(stream.buf, digits[r2], n+9)
writeBuf(stream.buf, digits[r1], n+12)
stream.n = n + 15
}
// WriteInt write int to stream
func (stream *Stream) WriteInt(val int) {
stream.WriteInt64(int64(val))
}
// WriteUint write uint to stream
func (stream *Stream) WriteUint(val uint) {
stream.WriteUint64(uint64(val))
}

View File

@ -1,6 +1,7 @@
package jsoniter package jsoniter
import ( import (
"encoding/json"
"io" "io"
"math/big" "math/big"
"strconv" "strconv"
@ -339,3 +340,8 @@ func validateFloat(str string) string {
} }
return "" return ""
} }
// ReadNumber read json.Number
func (iter *Iterator) ReadNumber() (ret json.Number) {
return json.Number(iter.readNumberAsString())
}

View File

@ -22,11 +22,17 @@ func init() {
// ReadUint read uint // ReadUint read uint
func (iter *Iterator) ReadUint() uint { func (iter *Iterator) ReadUint() uint {
if strconv.IntSize == 32 {
return uint(iter.ReadUint32())
}
return uint(iter.ReadUint64()) return uint(iter.ReadUint64())
} }
// ReadInt read int // ReadInt read int
func (iter *Iterator) ReadInt() int { func (iter *Iterator) ReadInt() int {
if strconv.IntSize == 32 {
return int(iter.ReadInt32())
}
return int(iter.ReadInt64()) return int(iter.ReadInt64())
} }
@ -113,13 +119,9 @@ func (iter *Iterator) ReadUint32() (ret uint32) {
} }
func (iter *Iterator) readUint32(c byte) (ret uint32) { func (iter *Iterator) readUint32(c byte) (ret uint32) {
defer func() {
if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
iter.ReportError("readUint32", "can not decode float as int")
}
}()
ind := intDigits[c] ind := intDigits[c]
if ind == 0 { if ind == 0 {
iter.assertInteger()
return 0 // single zero return 0 // single zero
} }
if ind == invalidCharForNumber { if ind == invalidCharForNumber {
@ -132,12 +134,14 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind2 := intDigits[iter.buf[i]] ind2 := intDigits[iter.buf[i]]
if ind2 == invalidCharForNumber { if ind2 == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value return value
} }
i++ i++
ind3 := intDigits[iter.buf[i]] ind3 := intDigits[iter.buf[i]]
if ind3 == invalidCharForNumber { if ind3 == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value*10 + uint32(ind2) return value*10 + uint32(ind2)
} }
//iter.head = i + 1 //iter.head = i + 1
@ -146,30 +150,35 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind4 := intDigits[iter.buf[i]] ind4 := intDigits[iter.buf[i]]
if ind4 == invalidCharForNumber { if ind4 == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value*100 + uint32(ind2)*10 + uint32(ind3) return value*100 + uint32(ind2)*10 + uint32(ind3)
} }
i++ i++
ind5 := intDigits[iter.buf[i]] ind5 := intDigits[iter.buf[i]]
if ind5 == invalidCharForNumber { if ind5 == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
} }
i++ i++
ind6 := intDigits[iter.buf[i]] ind6 := intDigits[iter.buf[i]]
if ind6 == invalidCharForNumber { if ind6 == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
} }
i++ i++
ind7 := intDigits[iter.buf[i]] ind7 := intDigits[iter.buf[i]]
if ind7 == invalidCharForNumber { if ind7 == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
} }
i++ i++
ind8 := intDigits[iter.buf[i]] ind8 := intDigits[iter.buf[i]]
if ind8 == invalidCharForNumber { if ind8 == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
} }
i++ i++
@ -177,6 +186,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
iter.head = i iter.head = i
if ind9 == invalidCharForNumber { if ind9 == invalidCharForNumber {
iter.assertInteger()
return value return value
} }
} }
@ -185,6 +195,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind = intDigits[iter.buf[i]] ind = intDigits[iter.buf[i]]
if ind == invalidCharForNumber { if ind == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value return value
} }
if value > uint32SafeToMultiply10 { if value > uint32SafeToMultiply10 {
@ -199,6 +210,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
value = (value << 3) + (value << 1) + uint32(ind) value = (value << 3) + (value << 1) + uint32(ind)
} }
if !iter.loadMore() { if !iter.loadMore() {
iter.assertInteger()
return value return value
} }
} }
@ -229,13 +241,9 @@ func (iter *Iterator) ReadUint64() uint64 {
} }
func (iter *Iterator) readUint64(c byte) (ret uint64) { func (iter *Iterator) readUint64(c byte) (ret uint64) {
defer func() {
if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
iter.ReportError("readUint64", "can not decode float as int")
}
}()
ind := intDigits[c] ind := intDigits[c]
if ind == 0 { if ind == 0 {
iter.assertInteger()
return 0 // single zero return 0 // single zero
} }
if ind == invalidCharForNumber { if ind == invalidCharForNumber {
@ -243,11 +251,73 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) {
return return
} }
value := uint64(ind) value := uint64(ind)
if iter.tail-iter.head > 10 {
i := iter.head
ind2 := intDigits[iter.buf[i]]
if ind2 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value
}
i++
ind3 := intDigits[iter.buf[i]]
if ind3 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*10 + uint64(ind2)
}
//iter.head = i + 1
//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
i++
ind4 := intDigits[iter.buf[i]]
if ind4 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*100 + uint64(ind2)*10 + uint64(ind3)
}
i++
ind5 := intDigits[iter.buf[i]]
if ind5 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
}
i++
ind6 := intDigits[iter.buf[i]]
if ind6 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
}
i++
ind7 := intDigits[iter.buf[i]]
if ind7 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
}
i++
ind8 := intDigits[iter.buf[i]]
if ind8 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
}
i++
ind9 := intDigits[iter.buf[i]]
value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
iter.head = i
if ind9 == invalidCharForNumber {
iter.assertInteger()
return value
}
}
for { for {
for i := iter.head; i < iter.tail; i++ { for i := iter.head; i < iter.tail; i++ {
ind = intDigits[iter.buf[i]] ind = intDigits[iter.buf[i]]
if ind == invalidCharForNumber { if ind == invalidCharForNumber {
iter.head = i iter.head = i
iter.assertInteger()
return value return value
} }
if value > uint64SafeToMultiple10 { if value > uint64SafeToMultiple10 {
@ -262,7 +332,14 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) {
value = (value << 3) + (value << 1) + uint64(ind) value = (value << 3) + (value << 1) + uint64(ind)
} }
if !iter.loadMore() { if !iter.loadMore() {
iter.assertInteger()
return value return value
} }
} }
} }
func (iter *Iterator) assertInteger() {
if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
iter.ReportError("assertInteger", "can not decode float as int")
}
}

View File

@ -3,7 +3,6 @@ package jsoniter
import ( import (
"fmt" "fmt"
"unicode" "unicode"
"unsafe"
) )
// ReadObject read one field from object. // ReadObject read one field from object.
@ -19,26 +18,6 @@ func (iter *Iterator) ReadObject() (ret string) {
c = iter.nextToken() c = iter.nextToken()
if c == '"' { if c == '"' {
iter.unreadByte() iter.unreadByte()
if iter.cfg.objectFieldMustBeSimpleString {
return string(iter.readObjectFieldAsBytes())
} else {
field := iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
return field
}
}
if c == '}' {
return "" // end of object
}
iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
return
case ',':
if iter.cfg.objectFieldMustBeSimpleString {
return string(iter.readObjectFieldAsBytes())
} else {
field := iter.ReadString() field := iter.ReadString()
c = iter.nextToken() c = iter.nextToken()
if c != ':' { if c != ':' {
@ -46,6 +25,18 @@ func (iter *Iterator) ReadObject() (ret string) {
} }
return field return field
} }
if c == '}' {
return "" // end of object
}
iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
return
case ',':
field := iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
return field
case '}': case '}':
return "" // end of object return "" // end of object
default: default:
@ -54,97 +45,91 @@ func (iter *Iterator) ReadObject() (ret string) {
} }
} }
func (iter *Iterator) readFieldHash() int32 { // CaseInsensitive
func (iter *Iterator) readFieldHash() int64 {
hash := int64(0x811c9dc5) hash := int64(0x811c9dc5)
c := iter.nextToken() c := iter.nextToken()
if c == '"' { if c != '"' {
for { iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
for i := iter.head; i < iter.tail; i++ { return 0
// require ascii string and no escape }
b := iter.buf[i] for {
if !iter.cfg.objectFieldMustBeSimpleString && b == '\\' { for i := iter.head; i < iter.tail; i++ {
iter.head = i // require ascii string and no escape
for _, b := range iter.readStringSlowPath() { b := iter.buf[i]
if 'A' <= b && b <= 'Z' { if b == '\\' {
b += 'a' - 'A' iter.head = i
} for _, b := range iter.readStringSlowPath() {
hash ^= int64(b) if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
hash *= 0x1000193 b += 'a' - 'A'
} }
c = iter.nextToken() hash ^= int64(b)
if c != ':' { hash *= 0x1000193
iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
return 0
}
return int32(hash)
} }
if b == '"' { c = iter.nextToken()
iter.head = i + 1 if c != ':' {
c = iter.nextToken() iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
if c != ':' { return 0
iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
return 0
}
return int32(hash)
} }
if 'A' <= b && b <= 'Z' { return hash
b += 'a' - 'A'
}
hash ^= int64(b)
hash *= 0x1000193
} }
if !iter.loadMore() { if b == '"' {
iter.ReportError("readFieldHash", `incomplete field name`) iter.head = i + 1
return 0 c = iter.nextToken()
if c != ':' {
iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
return 0
}
return hash
} }
if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
b += 'a' - 'A'
}
hash ^= int64(b)
hash *= 0x1000193
}
if !iter.loadMore() {
iter.ReportError("readFieldHash", `incomplete field name`)
return 0
} }
} }
iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
return 0
} }
func calcHash(str string) int32 { func calcHash(str string, caseSensitive bool) int64 {
hash := int64(0x811c9dc5) hash := int64(0x811c9dc5)
for _, b := range str { for _, b := range str {
hash ^= int64(unicode.ToLower(b)) if caseSensitive {
hash ^= int64(b)
} else {
hash ^= int64(unicode.ToLower(b))
}
hash *= 0x1000193 hash *= 0x1000193
} }
return int32(hash) return int64(hash)
} }
// ReadObjectCB read object with callback, the key is ascii only and field name not copied // ReadObjectCB read object with callback, the key is ascii only and field name not copied
func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken() c := iter.nextToken()
var fieldBytes []byte
var field string var field string
if c == '{' { if c == '{' {
c = iter.nextToken() c = iter.nextToken()
if c == '"' { if c == '"' {
iter.unreadByte() iter.unreadByte()
if iter.cfg.objectFieldMustBeSimpleString { field = iter.ReadString()
fieldBytes = iter.readObjectFieldAsBytes() c = iter.nextToken()
field = *(*string)(unsafe.Pointer(&fieldBytes)) if c != ':' {
} else { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
field = iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
} }
if !callback(iter, field) { if !callback(iter, field) {
return false return false
} }
c = iter.nextToken() c = iter.nextToken()
for c == ',' { for c == ',' {
if iter.cfg.objectFieldMustBeSimpleString { field = iter.ReadString()
fieldBytes = iter.readObjectFieldAsBytes() c = iter.nextToken()
field = *(*string)(unsafe.Pointer(&fieldBytes)) if c != ':' {
} else { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
field = iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
} }
if !callback(iter, field) { if !callback(iter, field) {
return false return false

View File

@ -17,43 +17,26 @@ type StreamPool interface {
} }
func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
select { stream := cfg.streamPool.Get().(*Stream)
case stream := <-cfg.streamPool: stream.Reset(writer)
stream.Reset(writer) return stream
return stream
default:
return NewStream(cfg, writer, 512)
}
} }
func (cfg *frozenConfig) ReturnStream(stream *Stream) { func (cfg *frozenConfig) ReturnStream(stream *Stream) {
stream.out = nil
stream.Error = nil stream.Error = nil
stream.Attachment = nil stream.Attachment = nil
select { cfg.streamPool.Put(stream)
case cfg.streamPool <- stream:
return
default:
return
}
} }
func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
select { iter := cfg.iteratorPool.Get().(*Iterator)
case iter := <-cfg.iteratorPool: iter.ResetBytes(data)
iter.ResetBytes(data) return iter
return iter
default:
return ParseBytes(cfg, data)
}
} }
func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
iter.Error = nil iter.Error = nil
iter.Attachment = nil iter.Attachment = nil
select { cfg.iteratorPool.Put(iter)
case cfg.iteratorPool <- iter:
return
default:
return
}
} }

330
vendor/github.com/json-iterator/go/reflect.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
package jsoniter
import (
"fmt"
"reflect"
"unsafe"
"github.com/modern-go/reflect2"
)
// ValDecoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValDecoder with json.Decoder.
// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
//
// Reflection on type to create decoders, which is then cached
// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
// 1. create instance of new value, for example *int will need a int to be allocated
// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
// 3. assignment to map, both key and value will be reflect.Value
// For a simple struct binding, it will be reflect.Value free and allocation free
type ValDecoder interface {
Decode(ptr unsafe.Pointer, iter *Iterator)
}
// ValEncoder is an internal type registered to cache as needed.
// Don't confuse jsoniter.ValEncoder with json.Encoder.
// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
type ValEncoder interface {
IsEmpty(ptr unsafe.Pointer) bool
Encode(ptr unsafe.Pointer, stream *Stream)
}
type checkIsEmpty interface {
IsEmpty(ptr unsafe.Pointer) bool
}
type ctx struct {
*frozenConfig
prefix string
encoders map[reflect2.Type]ValEncoder
decoders map[reflect2.Type]ValDecoder
}
func (b *ctx) caseSensitive() bool {
if b.frozenConfig == nil {
// default is case-insensitive
return false
}
return b.frozenConfig.caseSensitive
}
func (b *ctx) append(prefix string) *ctx {
return &ctx{
frozenConfig: b.frozenConfig,
prefix: b.prefix + " " + prefix,
encoders: b.encoders,
decoders: b.decoders,
}
}
// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
func (iter *Iterator) ReadVal(obj interface{}) {
cacheKey := reflect2.RTypeOf(obj)
decoder := iter.cfg.getDecoderFromCache(cacheKey)
if decoder == nil {
typ := reflect2.TypeOf(obj)
if typ.Kind() != reflect.Ptr {
iter.ReportError("ReadVal", "can only unmarshal into pointer")
return
}
decoder = iter.cfg.DecoderOf(typ)
}
ptr := reflect2.PtrOf(obj)
if ptr == nil {
iter.ReportError("ReadVal", "can not read into nil pointer")
return
}
decoder.Decode(ptr, iter)
}
// WriteVal copy the go interface into underlying JSON, same as json.Marshal
func (stream *Stream) WriteVal(val interface{}) {
if nil == val {
stream.WriteNil()
return
}
cacheKey := reflect2.RTypeOf(val)
encoder := stream.cfg.getEncoderFromCache(cacheKey)
if encoder == nil {
typ := reflect2.TypeOf(val)
encoder = stream.cfg.EncoderOf(typ)
}
encoder.Encode(reflect2.PtrOf(val), stream)
}
func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
cacheKey := typ.RType()
decoder := cfg.getDecoderFromCache(cacheKey)
if decoder != nil {
return decoder
}
ctx := &ctx{
frozenConfig: cfg,
prefix: "",
decoders: map[reflect2.Type]ValDecoder{},
encoders: map[reflect2.Type]ValEncoder{},
}
ptrType := typ.(*reflect2.UnsafePtrType)
decoder = decoderOfType(ctx, ptrType.Elem())
cfg.addDecoderToCache(cacheKey, decoder)
return decoder
}
func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
decoder := getTypeDecoderFromExtension(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfType(ctx, typ)
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
for _, extension := range ctx.extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
return decoder
}
func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
decoder := ctx.decoders[typ]
if decoder != nil {
return decoder
}
placeholder := &placeholderDecoder{}
ctx.decoders[typ] = placeholder
decoder = _createDecoderOfType(ctx, typ)
placeholder.decoder = decoder
return decoder
}
func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
decoder := createDecoderOfJsonRawMessage(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfJsonNumber(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfMarshaler(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfAny(ctx, typ)
if decoder != nil {
return decoder
}
decoder = createDecoderOfNative(ctx, typ)
if decoder != nil {
return decoder
}
switch typ.Kind() {
case reflect.Interface:
ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
if isIFace {
return &ifaceDecoder{valType: ifaceType}
}
return &efaceDecoder{}
case reflect.Struct:
return decoderOfStruct(ctx, typ)
case reflect.Array:
return decoderOfArray(ctx, typ)
case reflect.Slice:
return decoderOfSlice(ctx, typ)
case reflect.Map:
return decoderOfMap(ctx, typ)
case reflect.Ptr:
return decoderOfOptional(ctx, typ)
default:
return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
}
}
func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
cacheKey := typ.RType()
encoder := cfg.getEncoderFromCache(cacheKey)
if encoder != nil {
return encoder
}
ctx := &ctx{
frozenConfig: cfg,
prefix: "",
decoders: map[reflect2.Type]ValDecoder{},
encoders: map[reflect2.Type]ValEncoder{},
}
encoder = encoderOfType(ctx, typ)
if typ.LikePtr() {
encoder = &onePtrEncoder{encoder}
}
cfg.addEncoderToCache(cacheKey, encoder)
return encoder
}
type onePtrEncoder struct {
encoder ValEncoder
}
func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
}
func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
}
func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
encoder := getTypeEncoderFromExtension(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfType(ctx, typ)
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
for _, extension := range ctx.extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
return encoder
}
func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
encoder := ctx.encoders[typ]
if encoder != nil {
return encoder
}
placeholder := &placeholderEncoder{}
ctx.encoders[typ] = placeholder
encoder = _createEncoderOfType(ctx, typ)
placeholder.encoder = encoder
return encoder
}
func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
encoder := createEncoderOfJsonRawMessage(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfJsonNumber(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfMarshaler(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfAny(ctx, typ)
if encoder != nil {
return encoder
}
encoder = createEncoderOfNative(ctx, typ)
if encoder != nil {
return encoder
}
kind := typ.Kind()
switch kind {
case reflect.Interface:
return &dynamicEncoder{typ}
case reflect.Struct:
return encoderOfStruct(ctx, typ)
case reflect.Array:
return encoderOfArray(ctx, typ)
case reflect.Slice:
return encoderOfSlice(ctx, typ)
case reflect.Map:
return encoderOfMap(ctx, typ)
case reflect.Ptr:
return encoderOfOptional(ctx, typ)
default:
return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
}
}
type lazyErrorDecoder struct {
err error
}
func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.WhatIsNext() != NilValue {
if iter.Error == nil {
iter.Error = decoder.err
}
} else {
iter.Skip()
}
}
type lazyErrorEncoder struct {
err error
}
func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if ptr == nil {
stream.WriteNil()
} else if stream.Error == nil {
stream.Error = encoder.err
}
}
func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}
type placeholderDecoder struct {
decoder ValDecoder
}
func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.decoder.Decode(ptr, iter)
}
type placeholderEncoder struct {
encoder ValEncoder
}
func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.encoder.Encode(ptr, stream)
}
func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.encoder.IsEmpty(ptr)
}

104
vendor/github.com/json-iterator/go/reflect_array.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
package jsoniter
import (
"fmt"
"github.com/modern-go/reflect2"
"io"
"unsafe"
)
func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
arrayType := typ.(*reflect2.UnsafeArrayType)
decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
return &arrayDecoder{arrayType, decoder}
}
func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
arrayType := typ.(*reflect2.UnsafeArrayType)
if arrayType.Len() == 0 {
return emptyArrayEncoder{}
}
encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
return &arrayEncoder{arrayType, encoder}
}
type emptyArrayEncoder struct{}
func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteEmptyArray()
}
func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return true
}
type arrayEncoder struct {
arrayType *reflect2.UnsafeArrayType
elemEncoder ValEncoder
}
func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteArrayStart()
elemPtr := unsafe.Pointer(ptr)
encoder.elemEncoder.Encode(elemPtr, stream)
for i := 1; i < encoder.arrayType.Len(); i++ {
stream.WriteMore()
elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
encoder.elemEncoder.Encode(elemPtr, stream)
}
stream.WriteArrayEnd()
if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
}
}
func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}
type arrayDecoder struct {
arrayType *reflect2.UnsafeArrayType
elemDecoder ValDecoder
}
func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.doDecode(ptr, iter)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
}
}
func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
c := iter.nextToken()
arrayType := decoder.arrayType
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return
}
if c != '[' {
iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
return
}
c = iter.nextToken()
if c == ']' {
return
}
iter.unreadByte()
elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
decoder.elemDecoder.Decode(elemPtr, iter)
length := 1
for c = iter.nextToken(); c == ','; c = iter.nextToken() {
if length >= arrayType.Len() {
iter.Skip()
continue
}
idx := length
length += 1
elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
decoder.elemDecoder.Decode(elemPtr, iter)
}
if c != ']' {
iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
return
}
}

70
vendor/github.com/json-iterator/go/reflect_dynamic.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package jsoniter
import (
"github.com/modern-go/reflect2"
"reflect"
"unsafe"
)
type dynamicEncoder struct {
valType reflect2.Type
}
func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
obj := encoder.valType.UnsafeIndirect(ptr)
stream.WriteVal(obj)
}
func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.valType.UnsafeIndirect(ptr) == nil
}
type efaceDecoder struct {
}
func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
pObj := (*interface{})(ptr)
obj := *pObj
if obj == nil {
*pObj = iter.Read()
return
}
typ := reflect2.TypeOf(obj)
if typ.Kind() != reflect.Ptr {
*pObj = iter.Read()
return
}
ptrType := typ.(*reflect2.UnsafePtrType)
ptrElemType := ptrType.Elem()
if iter.WhatIsNext() == NilValue {
if ptrElemType.Kind() != reflect.Ptr {
iter.skipFourBytes('n', 'u', 'l', 'l')
*pObj = nil
return
}
}
if reflect2.IsNil(obj) {
obj := ptrElemType.New()
iter.ReadVal(obj)
*pObj = obj
return
}
iter.ReadVal(obj)
}
type ifaceDecoder struct {
valType *reflect2.UnsafeIFaceType
}
func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
return
}
obj := decoder.valType.UnsafeIndirect(ptr)
if reflect2.IsNil(obj) {
iter.ReportError("decode non empty interface", "can not unmarshal into nil")
return
}
iter.ReadVal(obj)
}

View File

@ -2,6 +2,7 @@ package jsoniter
import ( import (
"fmt" "fmt"
"github.com/modern-go/reflect2"
"reflect" "reflect"
"sort" "sort"
"strings" "strings"
@ -17,17 +18,15 @@ var extensions = []Extension{}
// StructDescriptor describe how should we encode/decode the struct // StructDescriptor describe how should we encode/decode the struct
type StructDescriptor struct { type StructDescriptor struct {
onePtrEmbedded bool Type reflect2.Type
onePtrOptimization bool Fields []*Binding
Type reflect.Type
Fields []*Binding
} }
// GetField get one field from the descriptor by its name. // GetField get one field from the descriptor by its name.
// Can not use map here to keep field orders. // Can not use map here to keep field orders.
func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
for _, binding := range structDescriptor.Fields { for _, binding := range structDescriptor.Fields {
if binding.Field.Name == fieldName { if binding.Field.Name() == fieldName {
return binding return binding
} }
} }
@ -37,7 +36,7 @@ func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
// Binding describe how should we encode/decode the struct field // Binding describe how should we encode/decode the struct field
type Binding struct { type Binding struct {
levels []int levels []int
Field *reflect.StructField Field reflect2.StructField
FromNames []string FromNames []string
ToNames []string ToNames []string
Encoder ValEncoder Encoder ValEncoder
@ -48,10 +47,12 @@ type Binding struct {
// Can also rename fields by UpdateStructDescriptor. // Can also rename fields by UpdateStructDescriptor.
type Extension interface { type Extension interface {
UpdateStructDescriptor(structDescriptor *StructDescriptor) UpdateStructDescriptor(structDescriptor *StructDescriptor)
CreateDecoder(typ reflect.Type) ValDecoder CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
CreateEncoder(typ reflect.Type) ValEncoder CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder CreateDecoder(typ reflect2.Type) ValDecoder
DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder CreateEncoder(typ reflect2.Type) ValEncoder
DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
} }
// DummyExtension embed this type get dummy implementation for all methods of Extension // DummyExtension embed this type get dummy implementation for all methods of Extension
@ -62,23 +63,105 @@ type DummyExtension struct {
func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
} }
// CreateMapKeyDecoder No-op
func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
return nil
}
// CreateMapKeyEncoder No-op
func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
return nil
}
// CreateDecoder No-op // CreateDecoder No-op
func (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder { func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
return nil return nil
} }
// CreateEncoder No-op // CreateEncoder No-op
func (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder { func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
return nil return nil
} }
// DecorateDecoder No-op // DecorateDecoder No-op
func (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder { func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
return decoder return decoder
} }
// DecorateEncoder No-op // DecorateEncoder No-op
func (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder { func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
return encoder
}
type EncoderExtension map[reflect2.Type]ValEncoder
// UpdateStructDescriptor No-op
func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
}
// CreateDecoder No-op
func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
return nil
}
// CreateEncoder get encoder from map
func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
return extension[typ]
}
// CreateMapKeyDecoder No-op
func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
return nil
}
// CreateMapKeyEncoder No-op
func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
return nil
}
// DecorateDecoder No-op
func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
return decoder
}
// DecorateEncoder No-op
func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
return encoder
}
type DecoderExtension map[reflect2.Type]ValDecoder
// UpdateStructDescriptor No-op
func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
}
// CreateMapKeyDecoder No-op
func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
return nil
}
// CreateMapKeyEncoder No-op
func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
return nil
}
// CreateDecoder get decoder from map
func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
return extension[typ]
}
// CreateEncoder No-op
func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
return nil
}
// DecorateDecoder No-op
func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
return decoder
}
// DecorateEncoder No-op
func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
return encoder return encoder
} }
@ -99,10 +182,6 @@ func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.fun(ptr, stream) encoder.fun(ptr, stream)
} }
func (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
if encoder.isEmptyFunc == nil { if encoder.isEmptyFunc == nil {
return false return false
@ -161,60 +240,80 @@ func RegisterExtension(extension Extension) {
extensions = append(extensions, extension) extensions = append(extensions, extension)
} }
func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
decoder := _getTypeDecoderFromExtension(typ) decoder := _getTypeDecoderFromExtension(ctx, typ)
if decoder != nil { if decoder != nil {
for _, extension := range extensions { for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder) decoder = extension.DecorateDecoder(typ, decoder)
} }
for _, extension := range ctx.extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
} }
return decoder return decoder
} }
func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder { func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
for _, extension := range extensions { for _, extension := range extensions {
decoder := extension.CreateDecoder(typ) decoder := extension.CreateDecoder(typ)
if decoder != nil { if decoder != nil {
return decoder return decoder
} }
} }
for _, extension := range ctx.extensions {
decoder := extension.CreateDecoder(typ)
if decoder != nil {
return decoder
}
}
typeName := typ.String() typeName := typ.String()
decoder := typeDecoders[typeName] decoder := typeDecoders[typeName]
if decoder != nil { if decoder != nil {
return decoder return decoder
} }
if typ.Kind() == reflect.Ptr { if typ.Kind() == reflect.Ptr {
decoder := typeDecoders[typ.Elem().String()] ptrType := typ.(*reflect2.UnsafePtrType)
decoder := typeDecoders[ptrType.Elem().String()]
if decoder != nil { if decoder != nil {
return &OptionalDecoder{typ.Elem(), decoder} return &OptionalDecoder{ptrType.Elem(), decoder}
} }
} }
return nil return nil
} }
func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
encoder := _getTypeEncoderFromExtension(typ) encoder := _getTypeEncoderFromExtension(ctx, typ)
if encoder != nil { if encoder != nil {
for _, extension := range extensions { for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder) encoder = extension.DecorateEncoder(typ, encoder)
} }
for _, extension := range ctx.extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
} }
return encoder return encoder
} }
func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder { func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
for _, extension := range extensions { for _, extension := range extensions {
encoder := extension.CreateEncoder(typ) encoder := extension.CreateEncoder(typ)
if encoder != nil { if encoder != nil {
return encoder return encoder
} }
} }
for _, extension := range ctx.extensions {
encoder := extension.CreateEncoder(typ)
if encoder != nil {
return encoder
}
}
typeName := typ.String() typeName := typ.String()
encoder := typeEncoders[typeName] encoder := typeEncoders[typeName]
if encoder != nil { if encoder != nil {
return encoder return encoder
} }
if typ.Kind() == reflect.Ptr { if typ.Kind() == reflect.Ptr {
encoder := typeEncoders[typ.Elem().String()] typePtr := typ.(*reflect2.UnsafePtrType)
encoder := typeEncoders[typePtr.Elem().String()]
if encoder != nil { if encoder != nil {
return &OptionalEncoder{encoder} return &OptionalEncoder{encoder}
} }
@ -222,72 +321,60 @@ func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
return nil return nil
} }
func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) { func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
structType := typ.(*reflect2.UnsafeStructType)
embeddedBindings := []*Binding{} embeddedBindings := []*Binding{}
bindings := []*Binding{} bindings := []*Binding{}
for i := 0; i < typ.NumField(); i++ { for i := 0; i < structType.NumField(); i++ {
field := typ.Field(i) field := structType.Field(i)
tag := field.Tag.Get(cfg.getTagKey()) tag, hastag := field.Tag().Lookup(ctx.getTagKey())
if ctx.onlyTaggedField && !hastag {
continue
}
tagParts := strings.Split(tag, ",") tagParts := strings.Split(tag, ",")
if tag == "-" { if tag == "-" {
continue continue
} }
if field.Anonymous && (tag == "" || tagParts[0] == "") { if field.Anonymous() && (tag == "" || tagParts[0] == "") {
if field.Type.Kind() == reflect.Struct { if field.Type().Kind() == reflect.Struct {
structDescriptor, err := describeStruct(cfg, field.Type) structDescriptor := describeStruct(ctx, field.Type())
if err != nil {
return nil, err
}
for _, binding := range structDescriptor.Fields { for _, binding := range structDescriptor.Fields {
binding.levels = append([]int{i}, binding.levels...) binding.levels = append([]int{i}, binding.levels...)
omitempty := binding.Encoder.(*structFieldEncoder).omitempty omitempty := binding.Encoder.(*structFieldEncoder).omitempty
binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty} binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
binding.Decoder = &structFieldDecoder{&field, binding.Decoder} binding.Decoder = &structFieldDecoder{field, binding.Decoder}
embeddedBindings = append(embeddedBindings, binding) embeddedBindings = append(embeddedBindings, binding)
} }
continue continue
} else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct { } else if field.Type().Kind() == reflect.Ptr {
structDescriptor, err := describeStruct(cfg, field.Type.Elem()) ptrType := field.Type().(*reflect2.UnsafePtrType)
if err != nil { if ptrType.Elem().Kind() == reflect.Struct {
return nil, err structDescriptor := describeStruct(ctx, ptrType.Elem())
for _, binding := range structDescriptor.Fields {
binding.levels = append([]int{i}, binding.levels...)
omitempty := binding.Encoder.(*structFieldEncoder).omitempty
binding.Encoder = &dereferenceEncoder{binding.Encoder}
binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
binding.Decoder = &structFieldDecoder{field, binding.Decoder}
embeddedBindings = append(embeddedBindings, binding)
}
continue
} }
for _, binding := range structDescriptor.Fields {
binding.levels = append([]int{i}, binding.levels...)
omitempty := binding.Encoder.(*structFieldEncoder).omitempty
binding.Encoder = &OptionalEncoder{binding.Encoder}
binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty}
binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder}
binding.Decoder = &structFieldDecoder{&field, binding.Decoder}
embeddedBindings = append(embeddedBindings, binding)
}
continue
} }
} }
fieldNames := calcFieldNames(field.Name, tagParts[0], tag) fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name) fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
decoder := fieldDecoders[fieldCacheKey] decoder := fieldDecoders[fieldCacheKey]
if decoder == nil { if decoder == nil {
var err error decoder = decoderOfType(ctx.append(field.Name()), field.Type())
decoder, err = decoderOfType(cfg, field.Type)
if len(fieldNames) > 0 && err != nil {
return nil, err
}
} }
encoder := fieldEncoders[fieldCacheKey] encoder := fieldEncoders[fieldCacheKey]
if encoder == nil { if encoder == nil {
var err error encoder = encoderOfType(ctx.append(field.Name()), field.Type())
encoder, err = encoderOfType(cfg, field.Type)
if len(fieldNames) > 0 && err != nil {
return nil, err
}
// map is stored as pointer in the struct,
// and treat nil or empty map as empty field
if encoder != nil && field.Type.Kind() == reflect.Map {
encoder = &optionalMapEncoder{encoder}
}
} }
binding := &Binding{ binding := &Binding{
Field: &field, Field: field,
FromNames: fieldNames, FromNames: fieldNames,
ToNames: fieldNames, ToNames: fieldNames,
Decoder: decoder, Decoder: decoder,
@ -296,35 +383,20 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err
binding.levels = []int{i} binding.levels = []int{i}
bindings = append(bindings, binding) bindings = append(bindings, binding)
} }
return createStructDescriptor(cfg, typ, bindings, embeddedBindings), nil return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
} }
func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
onePtrEmbedded := false
onePtrOptimization := false
if typ.NumField() == 1 {
firstField := typ.Field(0)
switch firstField.Type.Kind() {
case reflect.Ptr:
if firstField.Anonymous && firstField.Type.Elem().Kind() == reflect.Struct {
onePtrEmbedded = true
}
fallthrough
case reflect.Map:
onePtrOptimization = true
case reflect.Struct:
onePtrOptimization = isStructOnePtr(firstField.Type)
}
}
structDescriptor := &StructDescriptor{ structDescriptor := &StructDescriptor{
onePtrEmbedded: onePtrEmbedded, Type: typ,
onePtrOptimization: onePtrOptimization, Fields: bindings,
Type: typ,
Fields: bindings,
} }
for _, extension := range extensions { for _, extension := range extensions {
extension.UpdateStructDescriptor(structDescriptor) extension.UpdateStructDescriptor(structDescriptor)
} }
processTags(structDescriptor, cfg) for _, extension := range ctx.extensions {
extension.UpdateStructDescriptor(structDescriptor)
}
processTags(structDescriptor, ctx.frozenConfig)
// merge normal & embedded bindings & sort with original order // merge normal & embedded bindings & sort with original order
allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
sort.Sort(allBindings) sort.Sort(allBindings)
@ -332,21 +404,6 @@ func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Bin
return structDescriptor return structDescriptor
} }
func isStructOnePtr(typ reflect.Type) bool {
if typ.NumField() == 1 {
firstField := typ.Field(0)
switch firstField.Type.Kind() {
case reflect.Ptr:
return true
case reflect.Map:
return true
case reflect.Struct:
return isStructOnePtr(firstField.Type)
}
}
return false
}
type sortableBindings []*Binding type sortableBindings []*Binding
func (bindings sortableBindings) Len() int { func (bindings sortableBindings) Len() int {
@ -374,12 +431,12 @@ func (bindings sortableBindings) Swap(i, j int) {
func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
for _, binding := range structDescriptor.Fields { for _, binding := range structDescriptor.Fields {
shouldOmitEmpty := false shouldOmitEmpty := false
tagParts := strings.Split(binding.Field.Tag.Get(cfg.getTagKey()), ",") tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
for _, tagPart := range tagParts[1:] { for _, tagPart := range tagParts[1:] {
if tagPart == "omitempty" { if tagPart == "omitempty" {
shouldOmitEmpty = true shouldOmitEmpty = true
} else if tagPart == "string" { } else if tagPart == "string" {
if binding.Field.Type.Kind() == reflect.String { if binding.Field.Type().Kind() == reflect.String {
binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
} else { } else {

View File

@ -0,0 +1,112 @@
package jsoniter
import (
"encoding/json"
"github.com/modern-go/reflect2"
"strconv"
"unsafe"
)
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
func CastJsonNumber(val interface{}) (string, bool) {
switch typedVal := val.(type) {
case json.Number:
return string(typedVal), true
case Number:
return string(typedVal), true
}
return "", false
}
var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
if typ.AssignableTo(jsonNumberType) {
return &jsonNumberCodec{}
}
if typ.AssignableTo(jsoniterNumberType) {
return &jsoniterNumberCodec{}
}
return nil
}
func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
if typ.AssignableTo(jsonNumberType) {
return &jsonNumberCodec{}
}
if typ.AssignableTo(jsoniterNumberType) {
return &jsoniterNumberCodec{}
}
return nil
}
type jsonNumberCodec struct {
}
func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
switch iter.WhatIsNext() {
case StringValue:
*((*json.Number)(ptr)) = json.Number(iter.ReadString())
case NilValue:
iter.skipFourBytes('n', 'u', 'l', 'l')
*((*json.Number)(ptr)) = ""
default:
*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
}
}
func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
number := *((*json.Number)(ptr))
if len(number) == 0 {
stream.writeByte('0')
} else {
stream.WriteRaw(string(number))
}
}
func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*json.Number)(ptr))) == 0
}
type jsoniterNumberCodec struct {
}
func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
switch iter.WhatIsNext() {
case StringValue:
*((*Number)(ptr)) = Number(iter.ReadString())
case NilValue:
iter.skipFourBytes('n', 'u', 'l', 'l')
*((*Number)(ptr)) = ""
default:
*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
}
}
func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
number := *((*Number)(ptr))
if len(number) == 0 {
stream.writeByte('0')
} else {
stream.WriteRaw(string(number))
}
}
func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*Number)(ptr))) == 0
}

View File

@ -0,0 +1,60 @@
package jsoniter
import (
"encoding/json"
"github.com/modern-go/reflect2"
"unsafe"
)
var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
if typ == jsonRawMessageType {
return &jsonRawMessageCodec{}
}
if typ == jsoniterRawMessageType {
return &jsoniterRawMessageCodec{}
}
return nil
}
func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
if typ == jsonRawMessageType {
return &jsonRawMessageCodec{}
}
if typ == jsoniterRawMessageType {
return &jsoniterRawMessageCodec{}
}
return nil
}
type jsonRawMessageCodec struct {
}
func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
}
func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
}
func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*json.RawMessage)(ptr))) == 0
}
type jsoniterRawMessageCodec struct {
}
func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
}
func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteRaw(string(*((*RawMessage)(ptr))))
}
func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*RawMessage)(ptr))) == 0
}

318
vendor/github.com/json-iterator/go/reflect_map.go generated vendored Normal file
View File

@ -0,0 +1,318 @@
package jsoniter
import (
"fmt"
"github.com/modern-go/reflect2"
"io"
"reflect"
"sort"
"unsafe"
)
func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
mapType := typ.(*reflect2.UnsafeMapType)
keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
return &mapDecoder{
mapType: mapType,
keyType: mapType.Key(),
elemType: mapType.Elem(),
keyDecoder: keyDecoder,
elemDecoder: elemDecoder,
}
}
func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
mapType := typ.(*reflect2.UnsafeMapType)
if ctx.sortMapKeys {
return &sortKeysMapEncoder{
mapType: mapType,
keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
}
}
return &mapEncoder{
mapType: mapType,
keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
}
}
func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
for _, extension := range ctx.extensions {
decoder := extension.CreateMapKeyDecoder(typ)
if decoder != nil {
return decoder
}
}
switch typ.Kind() {
case reflect.String:
return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
case reflect.Bool,
reflect.Uint8, reflect.Int8,
reflect.Uint16, reflect.Int16,
reflect.Uint32, reflect.Int32,
reflect.Uint64, reflect.Int64,
reflect.Uint, reflect.Int,
reflect.Float32, reflect.Float64,
reflect.Uintptr:
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
default:
ptrType := reflect2.PtrTo(typ)
if ptrType.Implements(textMarshalerType) {
return &referenceDecoder{
&textUnmarshalerDecoder{
valType: ptrType,
},
}
}
if typ.Implements(textMarshalerType) {
return &textUnmarshalerDecoder{
valType: typ,
}
}
return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
}
}
func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
for _, extension := range ctx.extensions {
encoder := extension.CreateMapKeyEncoder(typ)
if encoder != nil {
return encoder
}
}
switch typ.Kind() {
case reflect.String:
return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
case reflect.Bool,
reflect.Uint8, reflect.Int8,
reflect.Uint16, reflect.Int16,
reflect.Uint32, reflect.Int32,
reflect.Uint64, reflect.Int64,
reflect.Uint, reflect.Int,
reflect.Float32, reflect.Float64,
reflect.Uintptr:
typ = reflect2.DefaultTypeOfKind(typ.Kind())
return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
default:
if typ == textMarshalerType {
return &directTextMarshalerEncoder{
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
}
}
if typ.Implements(textMarshalerType) {
return &textMarshalerEncoder{
valType: typ,
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
}
}
if typ.Kind() == reflect.Interface {
return &dynamicMapKeyEncoder{ctx, typ}
}
return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
}
}
type mapDecoder struct {
mapType *reflect2.UnsafeMapType
keyType reflect2.Type
elemType reflect2.Type
keyDecoder ValDecoder
elemDecoder ValDecoder
}
func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
mapType := decoder.mapType
c := iter.nextToken()
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
*(*unsafe.Pointer)(ptr) = nil
mapType.UnsafeSet(ptr, mapType.UnsafeNew())
return
}
if mapType.UnsafeIsNil(ptr) {
mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
}
if c != '{' {
iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
return
}
c = iter.nextToken()
if c == '}' {
return
}
if c != '"' {
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
return
}
iter.unreadByte()
key := decoder.keyType.UnsafeNew()
decoder.keyDecoder.Decode(key, iter)
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
return
}
elem := decoder.elemType.UnsafeNew()
decoder.elemDecoder.Decode(elem, iter)
decoder.mapType.UnsafeSetIndex(ptr, key, elem)
for c = iter.nextToken(); c == ','; c = iter.nextToken() {
key := decoder.keyType.UnsafeNew()
decoder.keyDecoder.Decode(key, iter)
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
return
}
elem := decoder.elemType.UnsafeNew()
decoder.elemDecoder.Decode(elem, iter)
decoder.mapType.UnsafeSetIndex(ptr, key, elem)
}
if c != '}' {
iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
}
}
type numericMapKeyDecoder struct {
decoder ValDecoder
}
func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
c := iter.nextToken()
if c != '"' {
iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
return
}
decoder.decoder.Decode(ptr, iter)
c = iter.nextToken()
if c != '"' {
iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
return
}
}
type numericMapKeyEncoder struct {
encoder ValEncoder
}
func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.writeByte('"')
encoder.encoder.Encode(ptr, stream)
stream.writeByte('"')
}
func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false
}
type dynamicMapKeyEncoder struct {
ctx *ctx
valType reflect2.Type
}
func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
obj := encoder.valType.UnsafeIndirect(ptr)
encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
}
func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
obj := encoder.valType.UnsafeIndirect(ptr)
return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
}
type mapEncoder struct {
mapType *reflect2.UnsafeMapType
keyEncoder ValEncoder
elemEncoder ValEncoder
}
func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteObjectStart()
iter := encoder.mapType.UnsafeIterate(ptr)
for i := 0; iter.HasNext(); i++ {
if i != 0 {
stream.WriteMore()
}
key, elem := iter.UnsafeNext()
encoder.keyEncoder.Encode(key, stream)
if stream.indention > 0 {
stream.writeTwoBytes(byte(':'), byte(' '))
} else {
stream.writeByte(':')
}
encoder.elemEncoder.Encode(elem, stream)
}
stream.WriteObjectEnd()
}
func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
iter := encoder.mapType.UnsafeIterate(ptr)
return !iter.HasNext()
}
type sortKeysMapEncoder struct {
mapType *reflect2.UnsafeMapType
keyEncoder ValEncoder
elemEncoder ValEncoder
}
func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *(*unsafe.Pointer)(ptr) == nil {
stream.WriteNil()
return
}
stream.WriteObjectStart()
mapIter := encoder.mapType.UnsafeIterate(ptr)
subStream := stream.cfg.BorrowStream(nil)
subIter := stream.cfg.BorrowIterator(nil)
keyValues := encodedKeyValues{}
for mapIter.HasNext() {
subStream.buf = make([]byte, 0, 64)
key, elem := mapIter.UnsafeNext()
encoder.keyEncoder.Encode(key, subStream)
if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
stream.Error = subStream.Error
}
encodedKey := subStream.Buffer()
subIter.ResetBytes(encodedKey)
decodedKey := subIter.ReadString()
if stream.indention > 0 {
subStream.writeTwoBytes(byte(':'), byte(' '))
} else {
subStream.writeByte(':')
}
encoder.elemEncoder.Encode(elem, subStream)
keyValues = append(keyValues, encodedKV{
key: decodedKey,
keyValue: subStream.Buffer(),
})
}
sort.Sort(keyValues)
for i, keyValue := range keyValues {
if i != 0 {
stream.WriteMore()
}
stream.Write(keyValue.keyValue)
}
stream.WriteObjectEnd()
stream.cfg.ReturnStream(subStream)
stream.cfg.ReturnIterator(subIter)
}
func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
iter := encoder.mapType.UnsafeIterate(ptr)
return !iter.HasNext()
}
type encodedKeyValues []encodedKV
type encodedKV struct {
key string
keyValue []byte
}
func (sv encodedKeyValues) Len() int { return len(sv) }
func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }

218
vendor/github.com/json-iterator/go/reflect_marshaler.go generated vendored Normal file
View File

@ -0,0 +1,218 @@
package jsoniter
import (
"encoding"
"encoding/json"
"github.com/modern-go/reflect2"
"unsafe"
)
var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
ptrType := reflect2.PtrTo(typ)
if ptrType.Implements(unmarshalerType) {
return &referenceDecoder{
&unmarshalerDecoder{ptrType},
}
}
if ptrType.Implements(textUnmarshalerType) {
return &referenceDecoder{
&textUnmarshalerDecoder{ptrType},
}
}
return nil
}
func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
if typ == marshalerType {
checkIsEmpty := createCheckIsEmpty(ctx, typ)
var encoder ValEncoder = &directMarshalerEncoder{
checkIsEmpty: checkIsEmpty,
}
return encoder
}
if typ.Implements(marshalerType) {
checkIsEmpty := createCheckIsEmpty(ctx, typ)
var encoder ValEncoder = &marshalerEncoder{
valType: typ,
checkIsEmpty: checkIsEmpty,
}
return encoder
}
ptrType := reflect2.PtrTo(typ)
if ctx.prefix != "" && ptrType.Implements(marshalerType) {
checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
var encoder ValEncoder = &marshalerEncoder{
valType: ptrType,
checkIsEmpty: checkIsEmpty,
}
return &referenceEncoder{encoder}
}
if typ == textMarshalerType {
checkIsEmpty := createCheckIsEmpty(ctx, typ)
var encoder ValEncoder = &directTextMarshalerEncoder{
checkIsEmpty: checkIsEmpty,
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
}
return encoder
}
if typ.Implements(textMarshalerType) {
checkIsEmpty := createCheckIsEmpty(ctx, typ)
var encoder ValEncoder = &textMarshalerEncoder{
valType: typ,
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
checkIsEmpty: checkIsEmpty,
}
return encoder
}
// if prefix is empty, the type is the root type
if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
var encoder ValEncoder = &textMarshalerEncoder{
valType: ptrType,
stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
checkIsEmpty: checkIsEmpty,
}
return &referenceEncoder{encoder}
}
return nil
}
type marshalerEncoder struct {
checkIsEmpty checkIsEmpty
valType reflect2.Type
}
func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
obj := encoder.valType.UnsafeIndirect(ptr)
if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
stream.WriteNil()
return
}
marshaler := obj.(json.Marshaler)
bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err
} else {
stream.Write(bytes)
}
}
func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type directMarshalerEncoder struct {
checkIsEmpty checkIsEmpty
}
func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
marshaler := *(*json.Marshaler)(ptr)
if marshaler == nil {
stream.WriteNil()
return
}
bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err
} else {
stream.Write(bytes)
}
}
func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type textMarshalerEncoder struct {
valType reflect2.Type
stringEncoder ValEncoder
checkIsEmpty checkIsEmpty
}
func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
obj := encoder.valType.UnsafeIndirect(ptr)
if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
stream.WriteNil()
return
}
marshaler := (obj).(encoding.TextMarshaler)
bytes, err := marshaler.MarshalText()
if err != nil {
stream.Error = err
} else {
str := string(bytes)
encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
}
}
func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type directTextMarshalerEncoder struct {
stringEncoder ValEncoder
checkIsEmpty checkIsEmpty
}
func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
marshaler := *(*encoding.TextMarshaler)(ptr)
if marshaler == nil {
stream.WriteNil()
return
}
bytes, err := marshaler.MarshalText()
if err != nil {
stream.Error = err
} else {
str := string(bytes)
encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
}
}
func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.checkIsEmpty.IsEmpty(ptr)
}
type unmarshalerDecoder struct {
valType reflect2.Type
}
func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
valType := decoder.valType
obj := valType.UnsafeIndirect(ptr)
unmarshaler := obj.(json.Unmarshaler)
iter.nextToken()
iter.unreadByte() // skip spaces
bytes := iter.SkipAndReturnBytes()
err := unmarshaler.UnmarshalJSON(bytes)
if err != nil {
iter.ReportError("unmarshalerDecoder", err.Error())
}
}
type textUnmarshalerDecoder struct {
valType reflect2.Type
}
func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
valType := decoder.valType
obj := valType.UnsafeIndirect(ptr)
if reflect2.IsNil(obj) {
ptrType := valType.(*reflect2.UnsafePtrType)
elemType := ptrType.Elem()
elem := elemType.UnsafeNew()
ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
obj = valType.UnsafeIndirect(ptr)
}
unmarshaler := (obj).(encoding.TextUnmarshaler)
str := iter.ReadString()
err := unmarshaler.UnmarshalText([]byte(str))
if err != nil {
iter.ReportError("textUnmarshalerDecoder", err.Error())
}
}

451
vendor/github.com/json-iterator/go/reflect_native.go generated vendored Normal file
View File

@ -0,0 +1,451 @@
package jsoniter
import (
"encoding/base64"
"reflect"
"strconv"
"unsafe"
"github.com/modern-go/reflect2"
)
const ptrSize = 32 << uintptr(^uintptr(0)>>63)
func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
sliceDecoder := decoderOfSlice(ctx, typ)
return &base64Codec{sliceDecoder: sliceDecoder}
}
typeName := typ.String()
kind := typ.Kind()
switch kind {
case reflect.String:
if typeName != "string" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
}
return &stringCodec{}
case reflect.Int:
if typeName != "int" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
}
if strconv.IntSize == 32 {
return &int32Codec{}
}
return &int64Codec{}
case reflect.Int8:
if typeName != "int8" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
}
return &int8Codec{}
case reflect.Int16:
if typeName != "int16" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
}
return &int16Codec{}
case reflect.Int32:
if typeName != "int32" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
}
return &int32Codec{}
case reflect.Int64:
if typeName != "int64" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
}
return &int64Codec{}
case reflect.Uint:
if typeName != "uint" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
}
if strconv.IntSize == 32 {
return &uint32Codec{}
}
return &uint64Codec{}
case reflect.Uint8:
if typeName != "uint8" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
}
return &uint8Codec{}
case reflect.Uint16:
if typeName != "uint16" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
}
return &uint16Codec{}
case reflect.Uint32:
if typeName != "uint32" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
}
return &uint32Codec{}
case reflect.Uintptr:
if typeName != "uintptr" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
}
if ptrSize == 32 {
return &uint32Codec{}
}
return &uint64Codec{}
case reflect.Uint64:
if typeName != "uint64" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
}
return &uint64Codec{}
case reflect.Float32:
if typeName != "float32" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
}
return &float32Codec{}
case reflect.Float64:
if typeName != "float64" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
}
return &float64Codec{}
case reflect.Bool:
if typeName != "bool" {
return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
}
return &boolCodec{}
}
return nil
}
func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
sliceDecoder := decoderOfSlice(ctx, typ)
return &base64Codec{sliceDecoder: sliceDecoder}
}
typeName := typ.String()
switch typ.Kind() {
case reflect.String:
if typeName != "string" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
}
return &stringCodec{}
case reflect.Int:
if typeName != "int" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
}
if strconv.IntSize == 32 {
return &int32Codec{}
}
return &int64Codec{}
case reflect.Int8:
if typeName != "int8" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
}
return &int8Codec{}
case reflect.Int16:
if typeName != "int16" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
}
return &int16Codec{}
case reflect.Int32:
if typeName != "int32" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
}
return &int32Codec{}
case reflect.Int64:
if typeName != "int64" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
}
return &int64Codec{}
case reflect.Uint:
if typeName != "uint" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
}
if strconv.IntSize == 32 {
return &uint32Codec{}
}
return &uint64Codec{}
case reflect.Uint8:
if typeName != "uint8" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
}
return &uint8Codec{}
case reflect.Uint16:
if typeName != "uint16" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
}
return &uint16Codec{}
case reflect.Uint32:
if typeName != "uint32" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
}
return &uint32Codec{}
case reflect.Uintptr:
if typeName != "uintptr" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
}
if ptrSize == 32 {
return &uint32Codec{}
}
return &uint64Codec{}
case reflect.Uint64:
if typeName != "uint64" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
}
return &uint64Codec{}
case reflect.Float32:
if typeName != "float32" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
}
return &float32Codec{}
case reflect.Float64:
if typeName != "float64" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
}
return &float64Codec{}
case reflect.Bool:
if typeName != "bool" {
return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
}
return &boolCodec{}
}
return nil
}
type stringCodec struct {
}
func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*string)(ptr)) = iter.ReadString()
}
func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
str := *((*string)(ptr))
stream.WriteString(str)
}
func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*string)(ptr)) == ""
}
type int8Codec struct {
}
func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int8)(ptr)) = iter.ReadInt8()
}
}
func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt8(*((*int8)(ptr)))
}
func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int8)(ptr)) == 0
}
type int16Codec struct {
}
func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int16)(ptr)) = iter.ReadInt16()
}
}
func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt16(*((*int16)(ptr)))
}
func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int16)(ptr)) == 0
}
type int32Codec struct {
}
func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int32)(ptr)) = iter.ReadInt32()
}
}
func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt32(*((*int32)(ptr)))
}
func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int32)(ptr)) == 0
}
type int64Codec struct {
}
func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*int64)(ptr)) = iter.ReadInt64()
}
}
func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteInt64(*((*int64)(ptr)))
}
func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*int64)(ptr)) == 0
}
type uint8Codec struct {
}
func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint8)(ptr)) = iter.ReadUint8()
}
}
func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint8(*((*uint8)(ptr)))
}
func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint8)(ptr)) == 0
}
type uint16Codec struct {
}
func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint16)(ptr)) = iter.ReadUint16()
}
}
func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint16(*((*uint16)(ptr)))
}
func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint16)(ptr)) == 0
}
type uint32Codec struct {
}
func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint32)(ptr)) = iter.ReadUint32()
}
}
func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint32(*((*uint32)(ptr)))
}
func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint32)(ptr)) == 0
}
type uint64Codec struct {
}
func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*uint64)(ptr)) = iter.ReadUint64()
}
}
func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteUint64(*((*uint64)(ptr)))
}
func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*uint64)(ptr)) == 0
}
type float32Codec struct {
}
func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*float32)(ptr)) = iter.ReadFloat32()
}
}
func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat32(*((*float32)(ptr)))
}
func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float32)(ptr)) == 0
}
type float64Codec struct {
}
func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*float64)(ptr)) = iter.ReadFloat64()
}
}
func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteFloat64(*((*float64)(ptr)))
}
func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return *((*float64)(ptr)) == 0
}
type boolCodec struct {
}
func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.ReadNil() {
*((*bool)(ptr)) = iter.ReadBool()
}
}
func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteBool(*((*bool)(ptr)))
}
func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
return !(*((*bool)(ptr)))
}
type base64Codec struct {
sliceType *reflect2.UnsafeSliceType
sliceDecoder ValDecoder
}
func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
codec.sliceType.UnsafeSetNil(ptr)
return
}
switch iter.WhatIsNext() {
case StringValue:
src := iter.ReadString()
dst, err := base64.StdEncoding.DecodeString(src)
if err != nil {
iter.ReportError("decode base64", err.Error())
} else {
codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
}
case ArrayValue:
codec.sliceDecoder.Decode(ptr, iter)
default:
iter.ReportError("base64Codec", "invalid input")
}
}
func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
src := *((*[]byte)(ptr))
if len(src) == 0 {
stream.WriteNil()
return
}
encoding := base64.StdEncoding
stream.writeByte('"')
size := encoding.EncodedLen(len(src))
buf := make([]byte, size)
encoding.Encode(buf, src)
stream.buf = append(stream.buf, buf...)
stream.writeByte('"')
}
func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
return len(*((*[]byte)(ptr))) == 0
}

133
vendor/github.com/json-iterator/go/reflect_optional.go generated vendored Normal file
View File

@ -0,0 +1,133 @@
package jsoniter
import (
"github.com/modern-go/reflect2"
"reflect"
"unsafe"
)
func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
ptrType := typ.(*reflect2.UnsafePtrType)
elemType := ptrType.Elem()
decoder := decoderOfType(ctx, elemType)
if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
return &dereferenceDecoder{elemType, decoder}
}
return &OptionalDecoder{elemType, decoder}
}
func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
ptrType := typ.(*reflect2.UnsafePtrType)
elemType := ptrType.Elem()
elemEncoder := encoderOfType(ctx, elemType)
encoder := &OptionalEncoder{elemEncoder}
return encoder
}
type OptionalDecoder struct {
ValueType reflect2.Type
ValueDecoder ValDecoder
}
func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
*((*unsafe.Pointer)(ptr)) = nil
} else {
if *((*unsafe.Pointer)(ptr)) == nil {
//pointer to null, we have to allocate memory to hold the value
newPtr := decoder.ValueType.UnsafeNew()
decoder.ValueDecoder.Decode(newPtr, iter)
*((*unsafe.Pointer)(ptr)) = newPtr
} else {
//reuse existing instance
decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
}
}
}
type dereferenceDecoder struct {
// only to deference a pointer
valueType reflect2.Type
valueDecoder ValDecoder
}
func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if *((*unsafe.Pointer)(ptr)) == nil {
//pointer to null, we have to allocate memory to hold the value
newPtr := decoder.valueType.UnsafeNew()
decoder.valueDecoder.Decode(newPtr, iter)
*((*unsafe.Pointer)(ptr)) = newPtr
} else {
//reuse existing instance
decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
}
}
type OptionalEncoder struct {
ValueEncoder ValEncoder
}
func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *((*unsafe.Pointer)(ptr)) == nil {
stream.WriteNil()
} else {
encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
}
}
func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*unsafe.Pointer)(ptr)) == nil
}
type dereferenceEncoder struct {
ValueEncoder ValEncoder
}
func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *((*unsafe.Pointer)(ptr)) == nil {
stream.WriteNil()
} else {
encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
}
}
func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
dePtr := *((*unsafe.Pointer)(ptr))
if dePtr == nil {
return true
}
return encoder.ValueEncoder.IsEmpty(dePtr)
}
func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
deReferenced := *((*unsafe.Pointer)(ptr))
if deReferenced == nil {
return true
}
isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
if !converted {
return false
}
fieldPtr := unsafe.Pointer(deReferenced)
return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
}
type referenceEncoder struct {
encoder ValEncoder
}
func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
}
func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
}
type referenceDecoder struct {
decoder ValDecoder
}
func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
}

99
vendor/github.com/json-iterator/go/reflect_slice.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package jsoniter
import (
"fmt"
"github.com/modern-go/reflect2"
"io"
"unsafe"
)
func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
sliceType := typ.(*reflect2.UnsafeSliceType)
decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
return &sliceDecoder{sliceType, decoder}
}
func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
sliceType := typ.(*reflect2.UnsafeSliceType)
encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
return &sliceEncoder{sliceType, encoder}
}
type sliceEncoder struct {
sliceType *reflect2.UnsafeSliceType
elemEncoder ValEncoder
}
func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if encoder.sliceType.UnsafeIsNil(ptr) {
stream.WriteNil()
return
}
length := encoder.sliceType.UnsafeLengthOf(ptr)
if length == 0 {
stream.WriteEmptyArray()
return
}
stream.WriteArrayStart()
encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
for i := 1; i < length; i++ {
stream.WriteMore()
elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
encoder.elemEncoder.Encode(elemPtr, stream)
}
stream.WriteArrayEnd()
if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
}
}
func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.sliceType.UnsafeLengthOf(ptr) == 0
}
type sliceDecoder struct {
sliceType *reflect2.UnsafeSliceType
elemDecoder ValDecoder
}
func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.doDecode(ptr, iter)
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
}
}
func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
c := iter.nextToken()
sliceType := decoder.sliceType
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
sliceType.UnsafeSetNil(ptr)
return
}
if c != '[' {
iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
return
}
c = iter.nextToken()
if c == ']' {
sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
return
}
iter.unreadByte()
sliceType.UnsafeGrow(ptr, 1)
elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
decoder.elemDecoder.Decode(elemPtr, iter)
length := 1
for c = iter.nextToken(); c == ','; c = iter.nextToken() {
idx := length
length += 1
sliceType.UnsafeGrow(ptr, length)
elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
decoder.elemDecoder.Decode(elemPtr, iter)
}
if c != ']' {
iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
return
}
}

View File

@ -3,38 +3,78 @@ package jsoniter
import ( import (
"fmt" "fmt"
"io" "io"
"reflect"
"strings" "strings"
"unsafe" "unsafe"
"github.com/modern-go/reflect2"
) )
func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder) (ValDecoder, error) { func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
knownHash := map[int32]struct{}{ bindings := map[string]*Binding{}
structDescriptor := describeStruct(ctx, typ)
for _, binding := range structDescriptor.Fields {
for _, fromName := range binding.FromNames {
old := bindings[fromName]
if old == nil {
bindings[fromName] = binding
continue
}
ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
if ignoreOld {
delete(bindings, fromName)
}
if !ignoreNew {
bindings[fromName] = binding
}
}
}
fields := map[string]*structFieldDecoder{}
for k, binding := range bindings {
fields[k] = binding.Decoder.(*structFieldDecoder)
}
if !ctx.caseSensitive() {
for k, binding := range bindings {
if _, found := fields[strings.ToLower(k)]; !found {
fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
}
}
}
return createStructDecoder(ctx, typ, fields)
}
func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
if ctx.disallowUnknownFields {
return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
}
knownHash := map[int64]struct{}{
0: {}, 0: {},
} }
switch len(fields) { switch len(fields) {
case 0: case 0:
return &skipObjectDecoder{typ}, nil return &skipObjectDecoder{typ}
case 1: case 1:
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}, nil return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
} }
case 2: case 2:
var fieldHash1 int32 var fieldHash1 int64
var fieldHash2 int32 var fieldHash2 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldHash1 == 0 { if fieldHash1 == 0 {
@ -45,19 +85,19 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
fieldDecoder2 = fieldDecoder fieldDecoder2 = fieldDecoder
} }
} }
return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}, nil return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
case 3: case 3:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -72,21 +112,23 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &threeFieldsStructDecoder{typ, return &threeFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3}, nil fieldName1, fieldDecoder1,
fieldName2, fieldDecoder2,
fieldName3, fieldDecoder3}
case 4: case 4:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldName4 int32 var fieldName4 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder var fieldDecoder4 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -104,24 +146,26 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &fourFieldsStructDecoder{typ, return &fourFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, fieldName1, fieldDecoder1,
fieldName4, fieldDecoder4}, nil fieldName2, fieldDecoder2,
fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4}
case 5: case 5:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldName4 int32 var fieldName4 int64
var fieldName5 int32 var fieldName5 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
var fieldDecoder4 *structFieldDecoder var fieldDecoder4 *structFieldDecoder
var fieldDecoder5 *structFieldDecoder var fieldDecoder5 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -142,15 +186,18 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &fiveFieldsStructDecoder{typ, return &fiveFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, fieldName1, fieldDecoder1,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5}, nil fieldName2, fieldDecoder2,
fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4,
fieldName5, fieldDecoder5}
case 6: case 6:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldName4 int32 var fieldName4 int64
var fieldName5 int32 var fieldName5 int64
var fieldName6 int32 var fieldName6 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
@ -158,10 +205,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
var fieldDecoder5 *structFieldDecoder var fieldDecoder5 *structFieldDecoder
var fieldDecoder6 *structFieldDecoder var fieldDecoder6 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -185,16 +232,20 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &sixFieldsStructDecoder{typ, return &sixFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, fieldName1, fieldDecoder1,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6}, nil fieldName2, fieldDecoder2,
fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4,
fieldName5, fieldDecoder5,
fieldName6, fieldDecoder6}
case 7: case 7:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldName4 int32 var fieldName4 int64
var fieldName5 int32 var fieldName5 int64
var fieldName6 int32 var fieldName6 int64
var fieldName7 int32 var fieldName7 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
@ -203,10 +254,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
var fieldDecoder6 *structFieldDecoder var fieldDecoder6 *structFieldDecoder
var fieldDecoder7 *structFieldDecoder var fieldDecoder7 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -233,18 +284,22 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &sevenFieldsStructDecoder{typ, return &sevenFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, fieldName1, fieldDecoder1,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, fieldName2, fieldDecoder2,
fieldName7, fieldDecoder7}, nil fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4,
fieldName5, fieldDecoder5,
fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7}
case 8: case 8:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldName4 int32 var fieldName4 int64
var fieldName5 int32 var fieldName5 int64
var fieldName6 int32 var fieldName6 int64
var fieldName7 int32 var fieldName7 int64
var fieldName8 int32 var fieldName8 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
@ -254,10 +309,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
var fieldDecoder7 *structFieldDecoder var fieldDecoder7 *structFieldDecoder
var fieldDecoder8 *structFieldDecoder var fieldDecoder8 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -287,19 +342,24 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &eightFieldsStructDecoder{typ, return &eightFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, fieldName1, fieldDecoder1,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, fieldName2, fieldDecoder2,
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8}, nil fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4,
fieldName5, fieldDecoder5,
fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7,
fieldName8, fieldDecoder8}
case 9: case 9:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldName4 int32 var fieldName4 int64
var fieldName5 int32 var fieldName5 int64
var fieldName6 int32 var fieldName6 int64
var fieldName7 int32 var fieldName7 int64
var fieldName8 int32 var fieldName8 int64
var fieldName9 int32 var fieldName9 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
@ -310,10 +370,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
var fieldDecoder8 *structFieldDecoder var fieldDecoder8 *structFieldDecoder
var fieldDecoder9 *structFieldDecoder var fieldDecoder9 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -346,20 +406,26 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &nineFieldsStructDecoder{typ, return &nineFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, fieldName1, fieldDecoder1,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, fieldName2, fieldDecoder2,
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9}, nil fieldName3, fieldDecoder3,
fieldName4, fieldDecoder4,
fieldName5, fieldDecoder5,
fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7,
fieldName8, fieldDecoder8,
fieldName9, fieldDecoder9}
case 10: case 10:
var fieldName1 int32 var fieldName1 int64
var fieldName2 int32 var fieldName2 int64
var fieldName3 int32 var fieldName3 int64
var fieldName4 int32 var fieldName4 int64
var fieldName5 int32 var fieldName5 int64
var fieldName6 int32 var fieldName6 int64
var fieldName7 int32 var fieldName7 int64
var fieldName8 int32 var fieldName8 int64
var fieldName9 int32 var fieldName9 int64
var fieldName10 int32 var fieldName10 int64
var fieldDecoder1 *structFieldDecoder var fieldDecoder1 *structFieldDecoder
var fieldDecoder2 *structFieldDecoder var fieldDecoder2 *structFieldDecoder
var fieldDecoder3 *structFieldDecoder var fieldDecoder3 *structFieldDecoder
@ -371,10 +437,10 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
var fieldDecoder9 *structFieldDecoder var fieldDecoder9 *structFieldDecoder
var fieldDecoder10 *structFieldDecoder var fieldDecoder10 *structFieldDecoder
for fieldName, fieldDecoder := range fields { for fieldName, fieldDecoder := range fields {
fieldHash := calcHash(fieldName) fieldHash := calcHash(fieldName, ctx.caseSensitive())
_, known := knownHash[fieldHash] _, known := knownHash[fieldHash]
if known { if known {
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
knownHash[fieldHash] = struct{}{} knownHash[fieldHash] = struct{}{}
if fieldName1 == 0 { if fieldName1 == 0 {
@ -410,66 +476,80 @@ func createStructDecoder(typ reflect.Type, fields map[string]*structFieldDecoder
} }
} }
return &tenFieldsStructDecoder{typ, return &tenFieldsStructDecoder{typ,
fieldName1, fieldDecoder1, fieldName2, fieldDecoder2, fieldName3, fieldDecoder3, fieldName1, fieldDecoder1,
fieldName4, fieldDecoder4, fieldName5, fieldDecoder5, fieldName6, fieldDecoder6, fieldName2, fieldDecoder2,
fieldName7, fieldDecoder7, fieldName8, fieldDecoder8, fieldName9, fieldDecoder9, fieldName3, fieldDecoder3,
fieldName10, fieldDecoder10}, nil fieldName4, fieldDecoder4,
fieldName5, fieldDecoder5,
fieldName6, fieldDecoder6,
fieldName7, fieldDecoder7,
fieldName8, fieldDecoder8,
fieldName9, fieldDecoder9,
fieldName10, fieldDecoder10}
} }
return &generalStructDecoder{typ, fields}, nil return &generalStructDecoder{typ, fields, false}
} }
type generalStructDecoder struct { type generalStructDecoder struct {
typ reflect.Type typ reflect2.Type
fields map[string]*structFieldDecoder fields map[string]*structFieldDecoder
disallowUnknownFields bool
} }
func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if !iter.readObjectStart() { if !iter.readObjectStart() {
return return
} }
var fieldBytes []byte var c byte
for c = ','; c == ','; c = iter.nextToken() {
decoder.decodeOneField(ptr, iter)
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
}
if c != '}' {
iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
}
}
func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
var field string var field string
var fieldDecoder *structFieldDecoder
if iter.cfg.objectFieldMustBeSimpleString { if iter.cfg.objectFieldMustBeSimpleString {
fieldBytes = iter.readObjectFieldAsBytes() fieldBytes := iter.ReadStringAsSlice()
field = *(*string)(unsafe.Pointer(&fieldBytes)) field = *(*string)(unsafe.Pointer(&fieldBytes))
fieldDecoder = decoder.fields[field]
if fieldDecoder == nil && !iter.cfg.caseSensitive {
fieldDecoder = decoder.fields[strings.ToLower(field)]
}
} else { } else {
field = iter.ReadString() field = iter.ReadString()
fieldDecoder = decoder.fields[field]
if fieldDecoder == nil && !iter.cfg.caseSensitive {
fieldDecoder = decoder.fields[strings.ToLower(field)]
}
}
if fieldDecoder == nil {
msg := "found unknown field: " + field
if decoder.disallowUnknownFields {
iter.ReportError("ReadObject", msg)
}
c := iter.nextToken() c := iter.nextToken()
if c != ':' { if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
} }
}
fieldDecoder := decoder.fields[strings.ToLower(field)]
if fieldDecoder == nil {
iter.Skip() iter.Skip()
} else { return
fieldDecoder.Decode(ptr, iter)
} }
for iter.nextToken() == ',' { c := iter.nextToken()
if iter.cfg.objectFieldMustBeSimpleString { if c != ':' {
fieldBytes := iter.readObjectFieldAsBytes() iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
field = *(*string)(unsafe.Pointer(&fieldBytes))
} else {
field = iter.ReadString()
c := iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
}
fieldDecoder = decoder.fields[strings.ToLower(field)]
if fieldDecoder == nil {
iter.Skip()
} else {
fieldDecoder.Decode(ptr, iter)
}
}
if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error())
} }
fieldDecoder.Decode(ptr, iter)
} }
type skipObjectDecoder struct { type skipObjectDecoder struct {
typ reflect.Type typ reflect2.Type
} }
func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
@ -482,8 +562,8 @@ func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
} }
type oneFieldStructDecoder struct { type oneFieldStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash int32 fieldHash int64
fieldDecoder *structFieldDecoder fieldDecoder *structFieldDecoder
} }
@ -502,15 +582,15 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type twoFieldsStructDecoder struct { type twoFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
} }
@ -532,17 +612,17 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type threeFieldsStructDecoder struct { type threeFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
} }
@ -566,19 +646,19 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type fourFieldsStructDecoder struct { type fourFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
fieldHash4 int32 fieldHash4 int64
fieldDecoder4 *structFieldDecoder fieldDecoder4 *structFieldDecoder
} }
@ -604,21 +684,21 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type fiveFieldsStructDecoder struct { type fiveFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
fieldHash4 int32 fieldHash4 int64
fieldDecoder4 *structFieldDecoder fieldDecoder4 *structFieldDecoder
fieldHash5 int32 fieldHash5 int64
fieldDecoder5 *structFieldDecoder fieldDecoder5 *structFieldDecoder
} }
@ -646,23 +726,23 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type sixFieldsStructDecoder struct { type sixFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
fieldHash4 int32 fieldHash4 int64
fieldDecoder4 *structFieldDecoder fieldDecoder4 *structFieldDecoder
fieldHash5 int32 fieldHash5 int64
fieldDecoder5 *structFieldDecoder fieldDecoder5 *structFieldDecoder
fieldHash6 int32 fieldHash6 int64
fieldDecoder6 *structFieldDecoder fieldDecoder6 *structFieldDecoder
} }
@ -692,25 +772,25 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type sevenFieldsStructDecoder struct { type sevenFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
fieldHash4 int32 fieldHash4 int64
fieldDecoder4 *structFieldDecoder fieldDecoder4 *structFieldDecoder
fieldHash5 int32 fieldHash5 int64
fieldDecoder5 *structFieldDecoder fieldDecoder5 *structFieldDecoder
fieldHash6 int32 fieldHash6 int64
fieldDecoder6 *structFieldDecoder fieldDecoder6 *structFieldDecoder
fieldHash7 int32 fieldHash7 int64
fieldDecoder7 *structFieldDecoder fieldDecoder7 *structFieldDecoder
} }
@ -742,27 +822,27 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type eightFieldsStructDecoder struct { type eightFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
fieldHash4 int32 fieldHash4 int64
fieldDecoder4 *structFieldDecoder fieldDecoder4 *structFieldDecoder
fieldHash5 int32 fieldHash5 int64
fieldDecoder5 *structFieldDecoder fieldDecoder5 *structFieldDecoder
fieldHash6 int32 fieldHash6 int64
fieldDecoder6 *structFieldDecoder fieldDecoder6 *structFieldDecoder
fieldHash7 int32 fieldHash7 int64
fieldDecoder7 *structFieldDecoder fieldDecoder7 *structFieldDecoder
fieldHash8 int32 fieldHash8 int64
fieldDecoder8 *structFieldDecoder fieldDecoder8 *structFieldDecoder
} }
@ -796,29 +876,29 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type nineFieldsStructDecoder struct { type nineFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
fieldHash4 int32 fieldHash4 int64
fieldDecoder4 *structFieldDecoder fieldDecoder4 *structFieldDecoder
fieldHash5 int32 fieldHash5 int64
fieldDecoder5 *structFieldDecoder fieldDecoder5 *structFieldDecoder
fieldHash6 int32 fieldHash6 int64
fieldDecoder6 *structFieldDecoder fieldDecoder6 *structFieldDecoder
fieldHash7 int32 fieldHash7 int64
fieldDecoder7 *structFieldDecoder fieldDecoder7 *structFieldDecoder
fieldHash8 int32 fieldHash8 int64
fieldDecoder8 *structFieldDecoder fieldDecoder8 *structFieldDecoder
fieldHash9 int32 fieldHash9 int64
fieldDecoder9 *structFieldDecoder fieldDecoder9 *structFieldDecoder
} }
@ -854,31 +934,31 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type tenFieldsStructDecoder struct { type tenFieldsStructDecoder struct {
typ reflect.Type typ reflect2.Type
fieldHash1 int32 fieldHash1 int64
fieldDecoder1 *structFieldDecoder fieldDecoder1 *structFieldDecoder
fieldHash2 int32 fieldHash2 int64
fieldDecoder2 *structFieldDecoder fieldDecoder2 *structFieldDecoder
fieldHash3 int32 fieldHash3 int64
fieldDecoder3 *structFieldDecoder fieldDecoder3 *structFieldDecoder
fieldHash4 int32 fieldHash4 int64
fieldDecoder4 *structFieldDecoder fieldDecoder4 *structFieldDecoder
fieldHash5 int32 fieldHash5 int64
fieldDecoder5 *structFieldDecoder fieldDecoder5 *structFieldDecoder
fieldHash6 int32 fieldHash6 int64
fieldDecoder6 *structFieldDecoder fieldDecoder6 *structFieldDecoder
fieldHash7 int32 fieldHash7 int64
fieldDecoder7 *structFieldDecoder fieldDecoder7 *structFieldDecoder
fieldHash8 int32 fieldHash8 int64
fieldDecoder8 *structFieldDecoder fieldDecoder8 *structFieldDecoder
fieldHash9 int32 fieldHash9 int64
fieldDecoder9 *structFieldDecoder fieldDecoder9 *structFieldDecoder
fieldHash10 int32 fieldHash10 int64
fieldDecoder10 *structFieldDecoder fieldDecoder10 *structFieldDecoder
} }
@ -916,19 +996,53 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator
} }
} }
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%v: %s", decoder.typ, iter.Error.Error()) iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
} }
} }
type structFieldDecoder struct { type structFieldDecoder struct {
field *reflect.StructField field reflect2.StructField
fieldDecoder ValDecoder fieldDecoder ValDecoder
} }
func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
fieldPtr := unsafe.Pointer(uintptr(ptr) + decoder.field.Offset) fieldPtr := decoder.field.UnsafeGet(ptr)
decoder.fieldDecoder.Decode(fieldPtr, iter) decoder.fieldDecoder.Decode(fieldPtr, iter)
if iter.Error != nil && iter.Error != io.EOF { if iter.Error != nil && iter.Error != io.EOF {
iter.Error = fmt.Errorf("%s: %s", decoder.field.Name, iter.Error.Error()) iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
}
}
type stringModeStringDecoder struct {
elemDecoder ValDecoder
cfg *frozenConfig
}
func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
decoder.elemDecoder.Decode(ptr, iter)
str := *((*string)(ptr))
tempIter := decoder.cfg.BorrowIterator([]byte(str))
defer decoder.cfg.ReturnIterator(tempIter)
*((*string)(ptr)) = tempIter.ReadString()
}
type stringModeNumberDecoder struct {
elemDecoder ValDecoder
}
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
c := iter.nextToken()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
return
}
decoder.elemDecoder.Decode(ptr, iter)
if iter.Error != nil {
return
}
c = iter.readByte()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
return
} }
} }

View File

@ -2,23 +2,20 @@ package jsoniter
import ( import (
"fmt" "fmt"
"github.com/modern-go/reflect2"
"io" "io"
"reflect" "reflect"
"strings"
"unsafe" "unsafe"
) )
func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) { func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
type bindingTo struct { type bindingTo struct {
binding *Binding binding *Binding
toName string toName string
ignored bool ignored bool
} }
orderedBindings := []*bindingTo{} orderedBindings := []*bindingTo{}
structDescriptor, err := describeStruct(cfg, typ) structDescriptor := describeStruct(ctx, typ)
if err != nil {
return nil, err
}
for _, binding := range structDescriptor.Fields { for _, binding := range structDescriptor.Fields {
for _, toName := range binding.ToNames { for _, toName := range binding.ToNames {
new := &bindingTo{ new := &bindingTo{
@ -29,13 +26,13 @@ func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
if old.toName != toName { if old.toName != toName {
continue continue
} }
old.ignored, new.ignored = resolveConflictBinding(cfg, old.binding, new.binding) old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
} }
orderedBindings = append(orderedBindings, new) orderedBindings = append(orderedBindings, new)
} }
} }
if len(orderedBindings) == 0 { if len(orderedBindings) == 0 {
return &emptyStructEncoder{}, nil return &emptyStructEncoder{}
} }
finalOrderedFields := []structFieldTo{} finalOrderedFields := []structFieldTo{}
for _, bindingTo := range orderedBindings { for _, bindingTo := range orderedBindings {
@ -46,12 +43,36 @@ func encoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
}) })
} }
} }
return &structEncoder{structDescriptor.onePtrEmbedded, structDescriptor.onePtrOptimization, finalOrderedFields}, nil return &structEncoder{typ, finalOrderedFields}
}
func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
encoder := createEncoderOfNative(ctx, typ)
if encoder != nil {
return encoder
}
kind := typ.Kind()
switch kind {
case reflect.Interface:
return &dynamicEncoder{typ}
case reflect.Struct:
return &structEncoder{typ: typ}
case reflect.Array:
return &arrayEncoder{}
case reflect.Slice:
return &sliceEncoder{}
case reflect.Map:
return encoderOfMap(ctx, typ)
case reflect.Ptr:
return &OptionalEncoder{}
default:
return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
}
} }
func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
newTagged := new.Field.Tag.Get(cfg.getTagKey()) != "" newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
oldTagged := old.Field.Tag.Get(cfg.getTagKey()) != "" oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
if newTagged { if newTagged {
if oldTagged { if oldTagged {
if len(old.levels) > len(new.levels) { if len(old.levels) > len(new.levels) {
@ -78,62 +99,41 @@ func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ig
} }
} }
func decoderOfStruct(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
bindings := map[string]*Binding{}
structDescriptor, err := describeStruct(cfg, typ)
if err != nil {
return nil, err
}
for _, binding := range structDescriptor.Fields {
for _, fromName := range binding.FromNames {
old := bindings[fromName]
if old == nil {
bindings[fromName] = binding
continue
}
ignoreOld, ignoreNew := resolveConflictBinding(cfg, old, binding)
if ignoreOld {
delete(bindings, fromName)
}
if !ignoreNew {
bindings[fromName] = binding
}
}
}
fields := map[string]*structFieldDecoder{}
for k, binding := range bindings {
fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
}
return createStructDecoder(typ, fields)
}
type structFieldEncoder struct { type structFieldEncoder struct {
field *reflect.StructField field reflect2.StructField
fieldEncoder ValEncoder fieldEncoder ValEncoder
omitempty bool omitempty bool
} }
func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) fieldPtr := encoder.field.UnsafeGet(ptr)
encoder.fieldEncoder.Encode(fieldPtr, stream) encoder.fieldEncoder.Encode(fieldPtr, stream)
if stream.Error != nil && stream.Error != io.EOF { if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%s: %s", encoder.field.Name, stream.Error.Error()) stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
} }
} }
func (encoder *structFieldEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
fieldPtr := unsafe.Pointer(uintptr(ptr) + encoder.field.Offset) fieldPtr := encoder.field.UnsafeGet(ptr)
return encoder.fieldEncoder.IsEmpty(fieldPtr) return encoder.fieldEncoder.IsEmpty(fieldPtr)
} }
func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
if !converted {
return false
}
fieldPtr := encoder.field.UnsafeGet(ptr)
return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
}
type IsEmbeddedPtrNil interface {
IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
}
type structEncoder struct { type structEncoder struct {
onePtrEmbedded bool typ reflect2.Type
onePtrOptimization bool fields []structFieldTo
fields []structFieldTo
} }
type structFieldTo struct { type structFieldTo struct {
@ -148,6 +148,9 @@ func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
continue continue
} }
if field.encoder.IsEmbeddedPtrNil(ptr) {
continue
}
if isNotFirst { if isNotFirst {
stream.WriteMore() stream.WriteMore()
} }
@ -156,23 +159,8 @@ func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
isNotFirst = true isNotFirst = true
} }
stream.WriteObjectEnd() stream.WriteObjectEnd()
} if stream.Error != nil && stream.Error != io.EOF {
stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
func (encoder *structEncoder) EncodeInterface(val interface{}, stream *Stream) {
e := (*emptyInterface)(unsafe.Pointer(&val))
if encoder.onePtrOptimization {
if e.word == nil && encoder.onePtrEmbedded {
stream.WriteObjectStart()
stream.WriteObjectEnd()
return
}
ptr := uintptr(e.word)
e.word = unsafe.Pointer(&ptr)
}
if reflect.TypeOf(val).Kind() == reflect.Ptr {
encoder.Encode(unsafe.Pointer(&e.word), stream)
} else {
encoder.Encode(e.word, stream)
} }
} }
@ -187,10 +175,36 @@ func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.WriteEmptyObject() stream.WriteEmptyObject()
} }
func (encoder *emptyStructEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return false return false
} }
type stringModeNumberEncoder struct {
elemEncoder ValEncoder
}
func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
stream.writeByte('"')
encoder.elemEncoder.Encode(ptr, stream)
stream.writeByte('"')
}
func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.elemEncoder.IsEmpty(ptr)
}
type stringModeStringEncoder struct {
elemEncoder ValEncoder
cfg *frozenConfig
}
func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
tempStream := encoder.cfg.BorrowStream(nil)
defer encoder.cfg.ReturnStream(tempStream)
encoder.elemEncoder.Encode(ptr, tempStream)
stream.WriteString(string(tempStream.Buffer()))
}
func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return encoder.elemEncoder.IsEmpty(ptr)
}

View File

@ -10,7 +10,6 @@ type Stream struct {
cfg *frozenConfig cfg *frozenConfig
out io.Writer out io.Writer
buf []byte buf []byte
n int
Error error Error error
indention int indention int
Attachment interface{} // open for customized encoder Attachment interface{} // open for customized encoder
@ -24,8 +23,7 @@ func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
return &Stream{ return &Stream{
cfg: cfg.(*frozenConfig), cfg: cfg.(*frozenConfig),
out: out, out: out,
buf: make([]byte, bufSize), buf: make([]byte, 0, bufSize),
n: 0,
Error: nil, Error: nil,
indention: 0, indention: 0,
} }
@ -39,22 +37,27 @@ func (stream *Stream) Pool() StreamPool {
// Reset reuse this stream instance by assign a new writer // Reset reuse this stream instance by assign a new writer
func (stream *Stream) Reset(out io.Writer) { func (stream *Stream) Reset(out io.Writer) {
stream.out = out stream.out = out
stream.n = 0 stream.buf = stream.buf[:0]
} }
// Available returns how many bytes are unused in the buffer. // Available returns how many bytes are unused in the buffer.
func (stream *Stream) Available() int { func (stream *Stream) Available() int {
return len(stream.buf) - stream.n return cap(stream.buf) - len(stream.buf)
} }
// Buffered returns the number of bytes that have been written into the current buffer. // Buffered returns the number of bytes that have been written into the current buffer.
func (stream *Stream) Buffered() int { func (stream *Stream) Buffered() int {
return stream.n return len(stream.buf)
} }
// Buffer if writer is nil, use this method to take the result // Buffer if writer is nil, use this method to take the result
func (stream *Stream) Buffer() []byte { func (stream *Stream) Buffer() []byte {
return stream.buf[:stream.n] return stream.buf
}
// SetBuffer allows to append to the internal buffer directly
func (stream *Stream) SetBuffer(buf []byte) {
stream.buf = buf
} }
// Write writes the contents of p into the buffer. // Write writes the contents of p into the buffer.
@ -62,97 +65,34 @@ func (stream *Stream) Buffer() []byte {
// If nn < len(p), it also returns an error explaining // If nn < len(p), it also returns an error explaining
// why the write is short. // why the write is short.
func (stream *Stream) Write(p []byte) (nn int, err error) { func (stream *Stream) Write(p []byte) (nn int, err error) {
for len(p) > stream.Available() && stream.Error == nil { stream.buf = append(stream.buf, p...)
if stream.out == nil { if stream.out != nil {
stream.growAtLeast(len(p)) nn, err = stream.out.Write(stream.buf)
} else { stream.buf = stream.buf[nn:]
var n int return
if stream.Buffered() == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, stream.Error = stream.out.Write(p)
} else {
n = copy(stream.buf[stream.n:], p)
stream.n += n
stream.Flush()
}
nn += n
p = p[n:]
}
} }
if stream.Error != nil { return len(p), nil
return nn, stream.Error
}
n := copy(stream.buf[stream.n:], p)
stream.n += n
nn += n
return nn, nil
} }
// WriteByte writes a single byte. // WriteByte writes a single byte.
func (stream *Stream) writeByte(c byte) { func (stream *Stream) writeByte(c byte) {
if stream.Error != nil { stream.buf = append(stream.buf, c)
return
}
if stream.Available() < 1 {
stream.growAtLeast(1)
}
stream.buf[stream.n] = c
stream.n++
} }
func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
if stream.Error != nil { stream.buf = append(stream.buf, c1, c2)
return
}
if stream.Available() < 2 {
stream.growAtLeast(2)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.n += 2
} }
func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
if stream.Error != nil { stream.buf = append(stream.buf, c1, c2, c3)
return
}
if stream.Available() < 3 {
stream.growAtLeast(3)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.buf[stream.n+2] = c3
stream.n += 3
} }
func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
if stream.Error != nil { stream.buf = append(stream.buf, c1, c2, c3, c4)
return
}
if stream.Available() < 4 {
stream.growAtLeast(4)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.buf[stream.n+2] = c3
stream.buf[stream.n+3] = c4
stream.n += 4
} }
func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
if stream.Error != nil { stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
return
}
if stream.Available() < 5 {
stream.growAtLeast(5)
}
stream.buf[stream.n] = c1
stream.buf[stream.n+1] = c2
stream.buf[stream.n+2] = c3
stream.buf[stream.n+3] = c4
stream.buf[stream.n+4] = c5
stream.n += 5
} }
// Flush writes any buffered data to the underlying io.Writer. // Flush writes any buffered data to the underlying io.Writer.
@ -163,56 +103,20 @@ func (stream *Stream) Flush() error {
if stream.Error != nil { if stream.Error != nil {
return stream.Error return stream.Error
} }
if stream.n == 0 { n, err := stream.out.Write(stream.buf)
return nil
}
n, err := stream.out.Write(stream.buf[0:stream.n])
if n < stream.n && err == nil {
err = io.ErrShortWrite
}
if err != nil { if err != nil {
if n > 0 && n < stream.n { if stream.Error == nil {
copy(stream.buf[0:stream.n-n], stream.buf[n:stream.n]) stream.Error = err
} }
stream.n -= n
stream.Error = err
return err return err
} }
stream.n = 0 stream.buf = stream.buf[n:]
return nil return nil
} }
func (stream *Stream) ensure(minimal int) {
available := stream.Available()
if available < minimal {
stream.growAtLeast(minimal)
}
}
func (stream *Stream) growAtLeast(minimal int) {
if stream.out != nil {
stream.Flush()
if stream.Available() >= minimal {
return
}
}
toGrow := len(stream.buf)
if toGrow < minimal {
toGrow = minimal
}
newBuf := make([]byte, len(stream.buf)+toGrow)
copy(newBuf, stream.Buffer())
stream.buf = newBuf
}
// WriteRaw write string out without quotes, just like []byte // WriteRaw write string out without quotes, just like []byte
func (stream *Stream) WriteRaw(s string) { func (stream *Stream) WriteRaw(s string) {
stream.ensure(len(s)) stream.buf = append(stream.buf, s...)
if stream.Error != nil {
return
}
n := copy(stream.buf[stream.n:], s)
stream.n += n
} }
// WriteNil write null to stream // WriteNil write null to stream
@ -273,6 +177,7 @@ func (stream *Stream) WriteEmptyObject() {
func (stream *Stream) WriteMore() { func (stream *Stream) WriteMore() {
stream.writeByte(',') stream.writeByte(',')
stream.writeIndention(0) stream.writeIndention(0)
stream.Flush()
} }
// WriteArrayStart write [ with possible indention // WriteArrayStart write [ with possible indention
@ -300,9 +205,7 @@ func (stream *Stream) writeIndention(delta int) {
} }
stream.writeByte('\n') stream.writeByte('\n')
toWrite := stream.indention - delta toWrite := stream.indention - delta
stream.ensure(toWrite) for i := 0; i < toWrite; i++ {
for i := 0; i < toWrite && stream.n < len(stream.buf); i++ { stream.buf = append(stream.buf, ' ')
stream.buf[stream.n] = ' '
stream.n++
} }
} }

View File

@ -21,7 +21,7 @@ func (stream *Stream) WriteFloat32(val float32) {
fmt = 'e' fmt = 'e'
} }
} }
stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 32)) stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
} }
// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster // WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
@ -43,13 +43,12 @@ func (stream *Stream) WriteFloat32Lossy(val float32) {
return return
} }
stream.writeByte('.') stream.writeByte('.')
stream.ensure(10)
for p := precision - 1; p > 0 && fval < pow10[p]; p-- { for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
stream.writeByte('0') stream.writeByte('0')
} }
stream.WriteUint64(fval) stream.WriteUint64(fval)
for stream.buf[stream.n-1] == '0' { for stream.buf[len(stream.buf)-1] == '0' {
stream.n-- stream.buf = stream.buf[:len(stream.buf)-1]
} }
} }
@ -63,7 +62,7 @@ func (stream *Stream) WriteFloat64(val float64) {
fmt = 'e' fmt = 'e'
} }
} }
stream.WriteRaw(strconv.FormatFloat(float64(val), fmt, -1, 64)) stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
} }
// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster // WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
@ -85,12 +84,11 @@ func (stream *Stream) WriteFloat64Lossy(val float64) {
return return
} }
stream.writeByte('.') stream.writeByte('.')
stream.ensure(10)
for p := precision - 1; p > 0 && fval < pow10[p]; p-- { for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
stream.writeByte('0') stream.writeByte('0')
} }
stream.WriteUint64(fval) stream.WriteUint64(fval)
for stream.buf[stream.n-1] == '0' { for stream.buf[len(stream.buf)-1] == '0' {
stream.n-- stream.buf = stream.buf[:len(stream.buf)-1]
} }
} }

190
vendor/github.com/json-iterator/go/stream_int.go generated vendored Normal file
View File

@ -0,0 +1,190 @@
package jsoniter
var digits []uint32
func init() {
digits = make([]uint32, 1000)
for i := uint32(0); i < 1000; i++ {
digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
if i < 10 {
digits[i] += 2 << 24
} else if i < 100 {
digits[i] += 1 << 24
}
}
}
func writeFirstBuf(space []byte, v uint32) []byte {
start := v >> 24
if start == 0 {
space = append(space, byte(v>>16), byte(v>>8))
} else if start == 1 {
space = append(space, byte(v>>8))
}
space = append(space, byte(v))
return space
}
func writeBuf(buf []byte, v uint32) []byte {
return append(buf, byte(v>>16), byte(v>>8), byte(v))
}
// WriteUint8 write uint8 to stream
func (stream *Stream) WriteUint8(val uint8) {
stream.buf = writeFirstBuf(stream.buf, digits[val])
}
// WriteInt8 write int8 to stream
func (stream *Stream) WriteInt8(nval int8) {
var val uint8
if nval < 0 {
val = uint8(-nval)
stream.buf = append(stream.buf, '-')
} else {
val = uint8(nval)
}
stream.buf = writeFirstBuf(stream.buf, digits[val])
}
// WriteUint16 write uint16 to stream
func (stream *Stream) WriteUint16(val uint16) {
q1 := val / 1000
if q1 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[val])
return
}
r1 := val - q1*1000
stream.buf = writeFirstBuf(stream.buf, digits[q1])
stream.buf = writeBuf(stream.buf, digits[r1])
return
}
// WriteInt16 write int16 to stream
func (stream *Stream) WriteInt16(nval int16) {
var val uint16
if nval < 0 {
val = uint16(-nval)
stream.buf = append(stream.buf, '-')
} else {
val = uint16(nval)
}
stream.WriteUint16(val)
}
// WriteUint32 write uint32 to stream
func (stream *Stream) WriteUint32(val uint32) {
q1 := val / 1000
if q1 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[val])
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[q1])
stream.buf = writeBuf(stream.buf, digits[r1])
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[q2])
} else {
r3 := q2 - q3*1000
stream.buf = append(stream.buf, byte(q3+'0'))
stream.buf = writeBuf(stream.buf, digits[r3])
}
stream.buf = writeBuf(stream.buf, digits[r2])
stream.buf = writeBuf(stream.buf, digits[r1])
}
// WriteInt32 write int32 to stream
func (stream *Stream) WriteInt32(nval int32) {
var val uint32
if nval < 0 {
val = uint32(-nval)
stream.buf = append(stream.buf, '-')
} else {
val = uint32(nval)
}
stream.WriteUint32(val)
}
// WriteUint64 write uint64 to stream
func (stream *Stream) WriteUint64(val uint64) {
q1 := val / 1000
if q1 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[val])
return
}
r1 := val - q1*1000
q2 := q1 / 1000
if q2 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[q1])
stream.buf = writeBuf(stream.buf, digits[r1])
return
}
r2 := q1 - q2*1000
q3 := q2 / 1000
if q3 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[q2])
stream.buf = writeBuf(stream.buf, digits[r2])
stream.buf = writeBuf(stream.buf, digits[r1])
return
}
r3 := q2 - q3*1000
q4 := q3 / 1000
if q4 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[q3])
stream.buf = writeBuf(stream.buf, digits[r3])
stream.buf = writeBuf(stream.buf, digits[r2])
stream.buf = writeBuf(stream.buf, digits[r1])
return
}
r4 := q3 - q4*1000
q5 := q4 / 1000
if q5 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[q4])
stream.buf = writeBuf(stream.buf, digits[r4])
stream.buf = writeBuf(stream.buf, digits[r3])
stream.buf = writeBuf(stream.buf, digits[r2])
stream.buf = writeBuf(stream.buf, digits[r1])
return
}
r5 := q4 - q5*1000
q6 := q5 / 1000
if q6 == 0 {
stream.buf = writeFirstBuf(stream.buf, digits[q5])
} else {
stream.buf = writeFirstBuf(stream.buf, digits[q6])
r6 := q5 - q6*1000
stream.buf = writeBuf(stream.buf, digits[r6])
}
stream.buf = writeBuf(stream.buf, digits[r5])
stream.buf = writeBuf(stream.buf, digits[r4])
stream.buf = writeBuf(stream.buf, digits[r3])
stream.buf = writeBuf(stream.buf, digits[r2])
stream.buf = writeBuf(stream.buf, digits[r1])
}
// WriteInt64 write int64 to stream
func (stream *Stream) WriteInt64(nval int64) {
var val uint64
if nval < 0 {
val = uint64(-nval)
stream.buf = append(stream.buf, '-')
} else {
val = uint64(nval)
}
stream.WriteUint64(val)
}
// WriteInt write int to stream
func (stream *Stream) WriteInt(val int) {
stream.WriteInt64(int64(val))
}
// WriteUint write uint to stream
func (stream *Stream) WriteUint(val uint) {
stream.WriteUint64(uint64(val))
}

View File

@ -219,34 +219,22 @@ var hex = "0123456789abcdef"
// WriteStringWithHTMLEscaped write string to stream with html special characters escaped // WriteStringWithHTMLEscaped write string to stream with html special characters escaped
func (stream *Stream) WriteStringWithHTMLEscaped(s string) { func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
stream.ensure(32)
valLen := len(s) valLen := len(s)
toWriteLen := valLen stream.buf = append(stream.buf, '"')
bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes
if stream.n+toWriteLen > bufLengthMinusTwo {
toWriteLen = bufLengthMinusTwo - stream.n
}
n := stream.n
stream.buf[n] = '"'
n++
// write string, the fast path, without utf8 and escape support // write string, the fast path, without utf8 and escape support
i := 0 i := 0
for ; i < toWriteLen; i++ { for ; i < valLen; i++ {
c := s[i] c := s[i]
if c < utf8.RuneSelf && htmlSafeSet[c] { if c < utf8.RuneSelf && htmlSafeSet[c] {
stream.buf[n] = c stream.buf = append(stream.buf, c)
n++
} else { } else {
break break
} }
} }
if i == valLen { if i == valLen {
stream.buf[n] = '"' stream.buf = append(stream.buf, '"')
n++
stream.n = n
return return
} }
stream.n = n
writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen) writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
} }
@ -321,34 +309,22 @@ func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen
// WriteString write string to stream without html escape // WriteString write string to stream without html escape
func (stream *Stream) WriteString(s string) { func (stream *Stream) WriteString(s string) {
stream.ensure(32)
valLen := len(s) valLen := len(s)
toWriteLen := valLen stream.buf = append(stream.buf, '"')
bufLengthMinusTwo := len(stream.buf) - 2 // make room for the quotes
if stream.n+toWriteLen > bufLengthMinusTwo {
toWriteLen = bufLengthMinusTwo - stream.n
}
n := stream.n
stream.buf[n] = '"'
n++
// write string, the fast path, without utf8 and escape support // write string, the fast path, without utf8 and escape support
i := 0 i := 0
for ; i < toWriteLen; i++ { for ; i < valLen; i++ {
c := s[i] c := s[i]
if c > 31 && c != '"' && c != '\\' { if c > 31 && c != '"' && c != '\\' {
stream.buf[n] = c stream.buf = append(stream.buf, c)
n++
} else { } else {
break break
} }
} }
if i == valLen { if i == valLen {
stream.buf[n] = '"' stream.buf = append(stream.buf, '"')
n++
stream.n = n
return return
} }
stream.n = n
writeStringSlowPath(stream, i, s, valLen) writeStringSlowPath(stream, i, s, valLen)
} }

201
vendor/github.com/modern-go/concurrent/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

49
vendor/github.com/modern-go/concurrent/README.md generated vendored Normal file
View File

@ -0,0 +1,49 @@
# concurrent
[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent)
[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent)
[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent)
[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE)
* concurrent.Map: backport sync.Map for go below 1.9
* concurrent.Executor: goroutine with explicit ownership and cancellable
# concurrent.Map
because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable
```go
m := concurrent.NewMap()
m.Store("hello", "world")
elem, found := m.Load("hello")
// elem will be "world"
// found will be true
```
# concurrent.Executor
```go
executor := concurrent.NewUnboundedExecutor()
executor.Go(func(ctx context.Context) {
everyMillisecond := time.NewTicker(time.Millisecond)
for {
select {
case <-ctx.Done():
fmt.Println("goroutine exited")
return
case <-everyMillisecond.C:
// do something
}
}
})
time.Sleep(time.Second)
executor.StopAndWaitForever()
fmt.Println("executor stopped")
```
attach goroutine to executor instance, so that we can
* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever
* handle panic by callback: the default behavior will no longer crash your application

14
vendor/github.com/modern-go/concurrent/executor.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package concurrent
import "context"
// Executor replace go keyword to start a new goroutine
// the goroutine should cancel itself if the context passed in has been cancelled
// the goroutine started by the executor, is owned by the executor
// we can cancel all executors owned by the executor just by stop the executor itself
// however Executor interface does not Stop method, the one starting and owning executor
// should use the concrete type of executor, instead of this interface.
type Executor interface {
// Go starts a new goroutine controlled by the context
Go(handler func(ctx context.Context))
}

15
vendor/github.com/modern-go/concurrent/go_above_19.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
//+build go1.9
package concurrent
import "sync"
// Map is a wrapper for sync.Map introduced in go1.9
type Map struct {
sync.Map
}
// NewMap creates a thread safe Map
func NewMap() *Map {
return &Map{}
}

33
vendor/github.com/modern-go/concurrent/go_below_19.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
//+build !go1.9
package concurrent
import "sync"
// Map implements a thread safe map for go version below 1.9 using mutex
type Map struct {
lock sync.RWMutex
data map[interface{}]interface{}
}
// NewMap creates a thread safe map
func NewMap() *Map {
return &Map{
data: make(map[interface{}]interface{}, 32),
}
}
// Load is same as sync.Map Load
func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
m.lock.RLock()
elem, found = m.data[key]
m.lock.RUnlock()
return
}
// Load is same as sync.Map Store
func (m *Map) Store(key interface{}, elem interface{}) {
m.lock.Lock()
m.data[key] = elem
m.lock.Unlock()
}

13
vendor/github.com/modern-go/concurrent/log.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package concurrent
import (
"os"
"log"
"io/ioutil"
)
// ErrorLogger is used to print out error, can be set to writer other than stderr
var ErrorLogger = log.New(os.Stderr, "", 0)
// InfoLogger is used to print informational message, default to off
var InfoLogger = log.New(ioutil.Discard, "", 0)

View File

@ -0,0 +1,119 @@
package concurrent
import (
"context"
"fmt"
"runtime"
"runtime/debug"
"sync"
"time"
"reflect"
)
// HandlePanic logs goroutine panic by default
var HandlePanic = func(recovered interface{}, funcName string) {
ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered))
ErrorLogger.Println(string(debug.Stack()))
}
// UnboundedExecutor is a executor without limits on counts of alive goroutines
// it tracks the goroutine started by it, and can cancel them when shutdown
type UnboundedExecutor struct {
ctx context.Context
cancel context.CancelFunc
activeGoroutinesMutex *sync.Mutex
activeGoroutines map[string]int
HandlePanic func(recovered interface{}, funcName string)
}
// GlobalUnboundedExecutor has the life cycle of the program itself
// any goroutine want to be shutdown before main exit can be started from this executor
// GlobalUnboundedExecutor expects the main function to call stop
// it does not magically knows the main function exits
var GlobalUnboundedExecutor = NewUnboundedExecutor()
// NewUnboundedExecutor creates a new UnboundedExecutor,
// UnboundedExecutor can not be created by &UnboundedExecutor{}
// HandlePanic can be set with a callback to override global HandlePanic
func NewUnboundedExecutor() *UnboundedExecutor {
ctx, cancel := context.WithCancel(context.TODO())
return &UnboundedExecutor{
ctx: ctx,
cancel: cancel,
activeGoroutinesMutex: &sync.Mutex{},
activeGoroutines: map[string]int{},
}
}
// Go starts a new goroutine and tracks its lifecycle.
// Panic will be recovered and logged automatically, except for StopSignal
func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
pc := reflect.ValueOf(handler).Pointer()
f := runtime.FuncForPC(pc)
funcName := f.Name()
file, line := f.FileLine(pc)
executor.activeGoroutinesMutex.Lock()
defer executor.activeGoroutinesMutex.Unlock()
startFrom := fmt.Sprintf("%s:%d", file, line)
executor.activeGoroutines[startFrom] += 1
go func() {
defer func() {
recovered := recover()
// if you want to quit a goroutine without trigger HandlePanic
// use runtime.Goexit() to quit
if recovered != nil {
if executor.HandlePanic == nil {
HandlePanic(recovered, funcName)
} else {
executor.HandlePanic(recovered, funcName)
}
}
executor.activeGoroutinesMutex.Lock()
executor.activeGoroutines[startFrom] -= 1
executor.activeGoroutinesMutex.Unlock()
}()
handler(executor.ctx)
}()
}
// Stop cancel all goroutines started by this executor without wait
func (executor *UnboundedExecutor) Stop() {
executor.cancel()
}
// StopAndWaitForever cancel all goroutines started by this executor and
// wait until all goroutines exited
func (executor *UnboundedExecutor) StopAndWaitForever() {
executor.StopAndWait(context.Background())
}
// StopAndWait cancel all goroutines started by this executor and wait.
// Wait can be cancelled by the context passed in.
func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) {
executor.cancel()
for {
oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100)
select {
case <-oneHundredMilliseconds.C:
if executor.checkNoActiveGoroutines() {
return
}
case <-ctx.Done():
return
}
}
}
func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool {
executor.activeGoroutinesMutex.Lock()
defer executor.activeGoroutinesMutex.Unlock()
for startFrom, count := range executor.activeGoroutines {
if count > 0 {
InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit",
"startFrom", startFrom,
"count", count)
return false
}
}
return true
}

201
vendor/github.com/modern-go/reflect2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

71
vendor/github.com/modern-go/reflect2/README.md generated vendored Normal file
View File

@ -0,0 +1,71 @@
# reflect2
[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge)
[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2)
[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2)
[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2)
[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2)
[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE)
reflect api that avoids runtime reflect.Value cost
* reflect get/set interface{}, with type checking
* reflect get/set unsafe.Pointer, without type checking
* `reflect2.TypeByName` works like `Class.forName` found in java
[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost.
This package is designed for low level libraries to optimize reflection performance.
General application should still use reflect standard library.
# reflect2.TypeByName
```go
// given package is github.com/your/awesome-package
type MyStruct struct {
// ...
}
// will return the type
reflect2.TypeByName("awesome-package.MyStruct")
// however, if the type has not been used
// it will be eliminated by compiler, so we can not get it in runtime
```
# reflect2 get/set interface{}
```go
valType := reflect2.TypeOf(1)
i := 1
j := 10
valType.Set(&i, &j)
// i will be 10
```
to get set `type`, always use its pointer `*type`
# reflect2 get/set unsafe.Pointer
```go
valType := reflect2.TypeOf(1)
i := 1
j := 10
valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j))
// i will be 10
```
to get set `type`, always use its pointer `*type`
# benchmark
Benchmark is not necessary for this package. It does nothing actually.
As it is just a thin wrapper to make go runtime public.
Both `reflect2` and `reflect` call same function
provided by `runtime` package exposed by go language.
# unsafe safety
Instead of casting `[]byte` to `sliceHeader` in your application using unsafe.
We can use reflect2 instead. This way, if `sliceHeader` changes in the future,
only reflect2 need to be upgraded.
reflect2 tries its best to keep the implementation same as reflect (by testing).

8
vendor/github.com/modern-go/reflect2/go_above_17.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
//+build go1.7
package reflect2
import "unsafe"
//go:linkname resolveTypeOff reflect.resolveTypeOff
func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer

14
vendor/github.com/modern-go/reflect2/go_above_19.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
//+build go1.9
package reflect2
import (
"unsafe"
)
//go:linkname makemap reflect.makemap
func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
return makemap(rtype, cap)
}

9
vendor/github.com/modern-go/reflect2/go_below_17.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
//+build !go1.7
package reflect2
import "unsafe"
func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
return nil
}

14
vendor/github.com/modern-go/reflect2/go_below_19.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
//+build !go1.9
package reflect2
import (
"unsafe"
)
//go:linkname makemap reflect.makemap
func makemap(rtype unsafe.Pointer) (m unsafe.Pointer)
func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
return makemap(rtype)
}

295
vendor/github.com/modern-go/reflect2/reflect2.go generated vendored Normal file
View File

@ -0,0 +1,295 @@
package reflect2
import (
"github.com/modern-go/concurrent"
"reflect"
"unsafe"
)
type Type interface {
Kind() reflect.Kind
// New return pointer to data of this type
New() interface{}
// UnsafeNew return the allocated space pointed by unsafe.Pointer
UnsafeNew() unsafe.Pointer
// PackEFace cast a unsafe pointer to object represented pointer
PackEFace(ptr unsafe.Pointer) interface{}
// Indirect dereference object represented pointer to this type
Indirect(obj interface{}) interface{}
// UnsafeIndirect dereference pointer to this type
UnsafeIndirect(ptr unsafe.Pointer) interface{}
// Type1 returns reflect.Type
Type1() reflect.Type
Implements(thatType Type) bool
String() string
RType() uintptr
// interface{} of this type has pointer like behavior
LikePtr() bool
IsNullable() bool
IsNil(obj interface{}) bool
UnsafeIsNil(ptr unsafe.Pointer) bool
Set(obj interface{}, val interface{})
UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer)
AssignableTo(anotherType Type) bool
}
type ListType interface {
Type
Elem() Type
SetIndex(obj interface{}, index int, elem interface{})
UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer)
GetIndex(obj interface{}, index int) interface{}
UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer
}
type ArrayType interface {
ListType
Len() int
}
type SliceType interface {
ListType
MakeSlice(length int, cap int) interface{}
UnsafeMakeSlice(length int, cap int) unsafe.Pointer
Grow(obj interface{}, newLength int)
UnsafeGrow(ptr unsafe.Pointer, newLength int)
Append(obj interface{}, elem interface{})
UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer)
LengthOf(obj interface{}) int
UnsafeLengthOf(ptr unsafe.Pointer) int
SetNil(obj interface{})
UnsafeSetNil(ptr unsafe.Pointer)
Cap(obj interface{}) int
UnsafeCap(ptr unsafe.Pointer) int
}
type StructType interface {
Type
NumField() int
Field(i int) StructField
FieldByName(name string) StructField
FieldByIndex(index []int) StructField
FieldByNameFunc(match func(string) bool) StructField
}
type StructField interface {
Offset() uintptr
Name() string
PkgPath() string
Type() Type
Tag() reflect.StructTag
Index() []int
Anonymous() bool
Set(obj interface{}, value interface{})
UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer)
Get(obj interface{}) interface{}
UnsafeGet(obj unsafe.Pointer) unsafe.Pointer
}
type MapType interface {
Type
Key() Type
Elem() Type
MakeMap(cap int) interface{}
UnsafeMakeMap(cap int) unsafe.Pointer
SetIndex(obj interface{}, key interface{}, elem interface{})
UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer)
TryGetIndex(obj interface{}, key interface{}) (interface{}, bool)
GetIndex(obj interface{}, key interface{}) interface{}
UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
Iterate(obj interface{}) MapIterator
UnsafeIterate(obj unsafe.Pointer) MapIterator
}
type MapIterator interface {
HasNext() bool
Next() (key interface{}, elem interface{})
UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer)
}
type PtrType interface {
Type
Elem() Type
}
type InterfaceType interface {
NumMethod() int
}
type Config struct {
UseSafeImplementation bool
}
type API interface {
TypeOf(obj interface{}) Type
Type2(type1 reflect.Type) Type
}
var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze()
var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
type frozenConfig struct {
useSafeImplementation bool
cache *concurrent.Map
}
func (cfg Config) Froze() *frozenConfig {
return &frozenConfig{
useSafeImplementation: cfg.UseSafeImplementation,
cache: concurrent.NewMap(),
}
}
func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
cacheKey := uintptr(unpackEFace(obj).rtype)
typeObj, found := cfg.cache.Load(cacheKey)
if found {
return typeObj.(Type)
}
return cfg.Type2(reflect.TypeOf(obj))
}
func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
cacheKey := uintptr(unpackEFace(type1).data)
typeObj, found := cfg.cache.Load(cacheKey)
if found {
return typeObj.(Type)
}
type2 := cfg.wrapType(type1)
cfg.cache.Store(cacheKey, type2)
return type2
}
func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type {
safeType := safeType{Type: type1, cfg: cfg}
switch type1.Kind() {
case reflect.Struct:
if cfg.useSafeImplementation {
return &safeStructType{safeType}
}
return newUnsafeStructType(cfg, type1)
case reflect.Array:
if cfg.useSafeImplementation {
return &safeSliceType{safeType}
}
return newUnsafeArrayType(cfg, type1)
case reflect.Slice:
if cfg.useSafeImplementation {
return &safeSliceType{safeType}
}
return newUnsafeSliceType(cfg, type1)
case reflect.Map:
if cfg.useSafeImplementation {
return &safeMapType{safeType}
}
return newUnsafeMapType(cfg, type1)
case reflect.Ptr, reflect.Chan, reflect.Func:
if cfg.useSafeImplementation {
return &safeMapType{safeType}
}
return newUnsafePtrType(cfg, type1)
case reflect.Interface:
if cfg.useSafeImplementation {
return &safeMapType{safeType}
}
if type1.NumMethod() == 0 {
return newUnsafeEFaceType(cfg, type1)
}
return newUnsafeIFaceType(cfg, type1)
default:
if cfg.useSafeImplementation {
return &safeType
}
return newUnsafeType(cfg, type1)
}
}
func TypeOf(obj interface{}) Type {
return ConfigUnsafe.TypeOf(obj)
}
func TypeOfPtr(obj interface{}) PtrType {
return TypeOf(obj).(PtrType)
}
func Type2(type1 reflect.Type) Type {
if type1 == nil {
return nil
}
return ConfigUnsafe.Type2(type1)
}
func PtrTo(typ Type) Type {
return Type2(reflect.PtrTo(typ.Type1()))
}
func PtrOf(obj interface{}) unsafe.Pointer {
return unpackEFace(obj).data
}
func RTypeOf(obj interface{}) uintptr {
return uintptr(unpackEFace(obj).rtype)
}
func IsNil(obj interface{}) bool {
if obj == nil {
return true
}
return unpackEFace(obj).data == nil
}
func IsNullable(kind reflect.Kind) bool {
switch kind {
case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface:
return true
}
return false
}
func likePtrKind(kind reflect.Kind) bool {
switch kind {
case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func:
return true
}
return false
}
func likePtrType(typ reflect.Type) bool {
if likePtrKind(typ.Kind()) {
return true
}
if typ.Kind() == reflect.Struct {
if typ.NumField() != 1 {
return false
}
return likePtrType(typ.Field(0).Type)
}
if typ.Kind() == reflect.Array {
if typ.Len() != 1 {
return false
}
return likePtrType(typ.Elem())
}
return false
}
// NoEscape hides a pointer from escape analysis. noescape is
// the identity function but escape analysis doesn't think the
// output depends on the input. noescape is inlined and currently
// compiles down to zero instructions.
// USE CAREFULLY!
//go:nosplit
func NoEscape(p unsafe.Pointer) unsafe.Pointer {
x := uintptr(p)
return unsafe.Pointer(x ^ 0)
}
func UnsafeCastString(str string) []byte {
stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
sliceHeader := &reflect.SliceHeader{
Data: stringHeader.Data,
Cap: stringHeader.Len,
Len: stringHeader.Len,
}
return *(*[]byte)(unsafe.Pointer(sliceHeader))
}

View File

30
vendor/github.com/modern-go/reflect2/reflect2_kind.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package reflect2
import (
"reflect"
"unsafe"
)
// DefaultTypeOfKind return the non aliased default type for the kind
func DefaultTypeOfKind(kind reflect.Kind) Type {
return kindTypes[kind]
}
var kindTypes = map[reflect.Kind]Type{
reflect.Bool: TypeOf(true),
reflect.Uint8: TypeOf(uint8(0)),
reflect.Int8: TypeOf(int8(0)),
reflect.Uint16: TypeOf(uint16(0)),
reflect.Int16: TypeOf(int16(0)),
reflect.Uint32: TypeOf(uint32(0)),
reflect.Int32: TypeOf(int32(0)),
reflect.Uint64: TypeOf(uint64(0)),
reflect.Int64: TypeOf(int64(0)),
reflect.Uint: TypeOf(uint(0)),
reflect.Int: TypeOf(int(0)),
reflect.Float32: TypeOf(float32(0)),
reflect.Float64: TypeOf(float64(0)),
reflect.Uintptr: TypeOf(uintptr(0)),
reflect.String: TypeOf(""),
reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)),
}

0
vendor/github.com/modern-go/reflect2/relfect2_386.s generated vendored Normal file
View File

View File

0
vendor/github.com/modern-go/reflect2/relfect2_arm.s generated vendored Normal file
View File

View File

View File

View File

View File

View File

58
vendor/github.com/modern-go/reflect2/safe_field.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
package reflect2
import (
"reflect"
"unsafe"
)
type safeField struct {
reflect.StructField
}
func (field *safeField) Offset() uintptr {
return field.StructField.Offset
}
func (field *safeField) Name() string {
return field.StructField.Name
}
func (field *safeField) PkgPath() string {
return field.StructField.PkgPath
}
func (field *safeField) Type() Type {
panic("not implemented")
}
func (field *safeField) Tag() reflect.StructTag {
return field.StructField.Tag
}
func (field *safeField) Index() []int {
return field.StructField.Index
}
func (field *safeField) Anonymous() bool {
return field.StructField.Anonymous
}
func (field *safeField) Set(obj interface{}, value interface{}) {
val := reflect.ValueOf(obj).Elem()
val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem())
}
func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
panic("unsafe operation is not supported")
}
func (field *safeField) Get(obj interface{}) interface{} {
val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index())
ptr := reflect.New(val.Type())
ptr.Elem().Set(val)
return ptr.Interface()
}
func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
panic("does not support unsafe operation")
}

101
vendor/github.com/modern-go/reflect2/safe_map.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
package reflect2
import (
"reflect"
"unsafe"
)
type safeMapType struct {
safeType
}
func (type2 *safeMapType) Key() Type {
return type2.safeType.cfg.Type2(type2.Type.Key())
}
func (type2 *safeMapType) MakeMap(cap int) interface{} {
ptr := reflect.New(type2.Type)
ptr.Elem().Set(reflect.MakeMap(type2.Type))
return ptr.Interface()
}
func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
panic("does not support unsafe operation")
}
func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
keyVal := reflect.ValueOf(key)
elemVal := reflect.ValueOf(elem)
val := reflect.ValueOf(obj)
val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem())
}
func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
panic("does not support unsafe operation")
}
func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
keyVal := reflect.ValueOf(key)
if key == nil {
keyVal = reflect.New(type2.Type.Key()).Elem()
}
val := reflect.ValueOf(obj).MapIndex(keyVal)
if !val.IsValid() {
return nil, false
}
return val.Interface(), true
}
func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
val := reflect.ValueOf(obj).Elem()
keyVal := reflect.ValueOf(key).Elem()
elemVal := val.MapIndex(keyVal)
if !elemVal.IsValid() {
ptr := reflect.New(reflect.PtrTo(val.Type().Elem()))
return ptr.Elem().Interface()
}
ptr := reflect.New(elemVal.Type())
ptr.Elem().Set(elemVal)
return ptr.Interface()
}
func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
panic("does not support unsafe operation")
}
func (type2 *safeMapType) Iterate(obj interface{}) MapIterator {
m := reflect.ValueOf(obj).Elem()
return &safeMapIterator{
m: m,
keys: m.MapKeys(),
}
}
func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
panic("does not support unsafe operation")
}
type safeMapIterator struct {
i int
m reflect.Value
keys []reflect.Value
}
func (iter *safeMapIterator) HasNext() bool {
return iter.i != len(iter.keys)
}
func (iter *safeMapIterator) Next() (interface{}, interface{}) {
key := iter.keys[iter.i]
elem := iter.m.MapIndex(key)
iter.i += 1
keyPtr := reflect.New(key.Type())
keyPtr.Elem().Set(key)
elemPtr := reflect.New(elem.Type())
elemPtr.Elem().Set(elem)
return keyPtr.Interface(), elemPtr.Interface()
}
func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
panic("does not support unsafe operation")
}

92
vendor/github.com/modern-go/reflect2/safe_slice.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
package reflect2
import (
"reflect"
"unsafe"
)
type safeSliceType struct {
safeType
}
func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) {
val := reflect.ValueOf(obj).Elem()
elem := reflect.ValueOf(value).Elem()
val.Index(index).Set(elem)
}
func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) {
panic("does not support unsafe operation")
}
func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} {
val := reflect.ValueOf(obj).Elem()
elem := val.Index(index)
ptr := reflect.New(elem.Type())
ptr.Elem().Set(elem)
return ptr.Interface()
}
func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
panic("does not support unsafe operation")
}
func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} {
val := reflect.MakeSlice(type2.Type, length, cap)
ptr := reflect.New(val.Type())
ptr.Elem().Set(val)
return ptr.Interface()
}
func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
panic("does not support unsafe operation")
}
func (type2 *safeSliceType) Grow(obj interface{}, newLength int) {
oldCap := type2.Cap(obj)
oldSlice := reflect.ValueOf(obj).Elem()
delta := newLength - oldCap
deltaVals := make([]reflect.Value, delta)
newSlice := reflect.Append(oldSlice, deltaVals...)
oldSlice.Set(newSlice)
}
func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) {
panic("does not support unsafe operation")
}
func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) {
val := reflect.ValueOf(obj).Elem()
elemVal := reflect.ValueOf(elem).Elem()
newVal := reflect.Append(val, elemVal)
val.Set(newVal)
}
func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
panic("does not support unsafe operation")
}
func (type2 *safeSliceType) SetNil(obj interface{}) {
val := reflect.ValueOf(obj).Elem()
val.Set(reflect.Zero(val.Type()))
}
func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
panic("does not support unsafe operation")
}
func (type2 *safeSliceType) LengthOf(obj interface{}) int {
return reflect.ValueOf(obj).Elem().Len()
}
func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int {
panic("does not support unsafe operation")
}
func (type2 *safeSliceType) Cap(obj interface{}) int {
return reflect.ValueOf(obj).Elem().Cap()
}
func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
panic("does not support unsafe operation")
}

29
vendor/github.com/modern-go/reflect2/safe_struct.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
package reflect2
type safeStructType struct {
safeType
}
func (type2 *safeStructType) FieldByName(name string) StructField {
field, found := type2.Type.FieldByName(name)
if !found {
panic("field " + name + " not found")
}
return &safeField{StructField: field}
}
func (type2 *safeStructType) Field(i int) StructField {
return &safeField{StructField: type2.Type.Field(i)}
}
func (type2 *safeStructType) FieldByIndex(index []int) StructField {
return &safeField{StructField: type2.Type.FieldByIndex(index)}
}
func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField {
field, found := type2.Type.FieldByNameFunc(match)
if !found {
panic("field match condition not found in " + type2.Type.String())
}
return &safeField{StructField: field}
}

78
vendor/github.com/modern-go/reflect2/safe_type.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package reflect2
import (
"reflect"
"unsafe"
)
type safeType struct {
reflect.Type
cfg *frozenConfig
}
func (type2 *safeType) New() interface{} {
return reflect.New(type2.Type).Interface()
}
func (type2 *safeType) UnsafeNew() unsafe.Pointer {
panic("does not support unsafe operation")
}
func (type2 *safeType) Elem() Type {
return type2.cfg.Type2(type2.Type.Elem())
}
func (type2 *safeType) Type1() reflect.Type {
return type2.Type
}
func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} {
panic("does not support unsafe operation")
}
func (type2 *safeType) Implements(thatType Type) bool {
return type2.Type.Implements(thatType.Type1())
}
func (type2 *safeType) RType() uintptr {
panic("does not support unsafe operation")
}
func (type2 *safeType) Indirect(obj interface{}) interface{} {
return reflect.Indirect(reflect.ValueOf(obj)).Interface()
}
func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
panic("does not support unsafe operation")
}
func (type2 *safeType) LikePtr() bool {
panic("does not support unsafe operation")
}
func (type2 *safeType) IsNullable() bool {
return IsNullable(type2.Kind())
}
func (type2 *safeType) IsNil(obj interface{}) bool {
if obj == nil {
return true
}
return reflect.ValueOf(obj).Elem().IsNil()
}
func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
panic("does not support unsafe operation")
}
func (type2 *safeType) Set(obj interface{}, val interface{}) {
reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem())
}
func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
panic("does not support unsafe operation")
}
func (type2 *safeType) AssignableTo(anotherType Type) bool {
return type2.Type1().AssignableTo(anotherType.Type1())
}

103
vendor/github.com/modern-go/reflect2/type_map.go generated vendored Normal file
View File

@ -0,0 +1,103 @@
package reflect2
import (
"reflect"
"runtime"
"strings"
"unsafe"
)
// typelinks1 for 1.5 ~ 1.6
//go:linkname typelinks1 reflect.typelinks
func typelinks1() [][]unsafe.Pointer
// typelinks2 for 1.7 ~
//go:linkname typelinks2 reflect.typelinks
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
var types = map[string]reflect.Type{}
var packages = map[string]map[string]reflect.Type{}
func init() {
ver := runtime.Version()
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
loadGo15Types()
} else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
loadGo15Types()
} else {
loadGo17Types()
}
}
func loadGo15Types() {
var obj interface{} = reflect.TypeOf(0)
typePtrss := typelinks1()
for _, typePtrs := range typePtrss {
for _, typePtr := range typePtrs {
(*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr
typ := obj.(reflect.Type)
if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
loadedType := typ.Elem()
pkgTypes := packages[loadedType.PkgPath()]
if pkgTypes == nil {
pkgTypes = map[string]reflect.Type{}
packages[loadedType.PkgPath()] = pkgTypes
}
types[loadedType.String()] = loadedType
pkgTypes[loadedType.Name()] = loadedType
}
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr &&
typ.Elem().Elem().Kind() == reflect.Struct {
loadedType := typ.Elem().Elem()
pkgTypes := packages[loadedType.PkgPath()]
if pkgTypes == nil {
pkgTypes = map[string]reflect.Type{}
packages[loadedType.PkgPath()] = pkgTypes
}
types[loadedType.String()] = loadedType
pkgTypes[loadedType.Name()] = loadedType
}
}
}
}
func loadGo17Types() {
var obj interface{} = reflect.TypeOf(0)
sections, offset := typelinks2()
for i, offs := range offset {
rodata := sections[i]
for _, off := range offs {
(*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off)
typ := obj.(reflect.Type)
if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
loadedType := typ.Elem()
pkgTypes := packages[loadedType.PkgPath()]
if pkgTypes == nil {
pkgTypes = map[string]reflect.Type{}
packages[loadedType.PkgPath()] = pkgTypes
}
types[loadedType.String()] = loadedType
pkgTypes[loadedType.Name()] = loadedType
}
}
}
}
type emptyInterface struct {
typ unsafe.Pointer
word unsafe.Pointer
}
// TypeByName return the type by its name, just like Class.forName in java
func TypeByName(typeName string) Type {
return Type2(types[typeName])
}
// TypeByPackageName return the type by its package and name
func TypeByPackageName(pkgPath string, name string) Type {
pkgTypes := packages[pkgPath]
if pkgTypes == nil {
return nil
}
return Type2(pkgTypes[name])
}

Some files were not shown because too many files have changed in this diff Show More