chore: migrate to gitea
This commit is contained in:
7
vendor/github.com/ugorji/go/codec/0_importpath.go
generated
vendored
Normal file
7
vendor/github.com/ugorji/go/codec/0_importpath.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec // import "github.com/ugorji/go/codec"
|
||||
|
||||
// This establishes that this package must be imported as github.com/ugorji/go/codec.
|
||||
// It makes forking easier, and plays well with pre-module releases of go.
|
||||
22
vendor/github.com/ugorji/go/codec/LICENSE
generated
vendored
Normal file
22
vendor/github.com/ugorji/go/codec/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2012-2020 Ugorji Nwoke.
|
||||
All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
300
vendor/github.com/ugorji/go/codec/README.md
generated
vendored
Normal file
300
vendor/github.com/ugorji/go/codec/README.md
generated
vendored
Normal file
@@ -0,0 +1,300 @@
|
||||
# Package Documentation for github.com/ugorji/go/codec
|
||||
|
||||
Package codec provides a High Performance, Feature-Rich Idiomatic Go
|
||||
codec/encoding library for binc, msgpack, cbor, json.
|
||||
|
||||
Supported Serialization formats are:
|
||||
|
||||
- msgpack: https://github.com/msgpack/msgpack
|
||||
- binc: http://github.com/ugorji/binc
|
||||
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
|
||||
- json: http://json.org http://tools.ietf.org/html/rfc7159
|
||||
- simple: (unpublished)
|
||||
|
||||
For detailed usage information, read the primer at
|
||||
http://ugorji.net/blog/go-codec-primer .
|
||||
|
||||
The idiomatic Go support is as seen in other encoding packages in the standard
|
||||
library (ie json, xml, gob, etc).
|
||||
|
||||
Rich Feature Set includes:
|
||||
|
||||
- Simple but extremely powerful and feature-rich API
|
||||
- Support for go 1.21 and above, selectively using newer APIs for later releases
|
||||
- Excellent code coverage ( ~ 85-90% )
|
||||
- Very High Performance, significantly outperforming libraries for Gob, Json, Bson, etc
|
||||
- Careful selected use of 'unsafe' for targeted performance gains.
|
||||
- 100% safe mode supported, where 'unsafe' is not used at all.
|
||||
- Lock-free (sans mutex) concurrency for scaling to 100's of cores
|
||||
- In-place updates during decode, with option to zero value in maps and slices prior to decode
|
||||
- Coerce types where appropriate e.g. decode an int in the stream into a
|
||||
float, decode numbers from formatted strings, etc
|
||||
- Corner Cases: Overflows, nil maps/slices, nil values in streams are handled correctly
|
||||
- Standard field renaming via tags
|
||||
- Support for omitting empty fields during an encoding
|
||||
- Encoding from any value and decoding into pointer to any value (struct,
|
||||
slice, map, primitives, pointers, interface{}, etc)
|
||||
- Extensions to support efficient encoding/decoding of any named types
|
||||
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
|
||||
- Support using existence of `IsZero() bool` to determine if a zero value
|
||||
- Decoding without a schema (into a interface{}). Includes Options to
|
||||
configure what specific map or slice type to use when decoding an encoded
|
||||
list or map into a nil interface{}
|
||||
- Mapping a non-interface type to an interface, so we can decode appropriately
|
||||
into any interface type with a correctly configured non-interface value.
|
||||
- Encode a struct as an array, and decode struct from an array in the data stream
|
||||
- Option to encode struct keys as numbers (instead of strings) (to support
|
||||
structured streams with fields encoded as numeric codes)
|
||||
- Comprehensive support for anonymous fields
|
||||
- Fast (no-reflection) encoding/decoding of common maps and slices
|
||||
- Code-generation for faster performance, supported in go 1.6+
|
||||
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
|
||||
- Support indefinite-length formats to enable true streaming (for formats
|
||||
which support it e.g. json, cbor)
|
||||
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
|
||||
This mostly applies to maps, where iteration order is non-deterministic.
|
||||
- NIL in data stream decoded as zero value
|
||||
- Never silently skip data when decoding. User decides whether to return an
|
||||
error or silently skip data when keys or indexes in the data stream do not
|
||||
map to fields in the struct.
|
||||
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
|
||||
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||
- Handle unique idiosyncrasies of codecs e.g. For messagepack,
|
||||
configure how ambiguities in handling raw bytes are resolved and provide
|
||||
rpc server/client codec to support msgpack-rpc protocol defined at:
|
||||
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||
|
||||
# Supported build tags
|
||||
|
||||
We gain performance by code-generating fast-paths for slices and maps of built-in types,
|
||||
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
|
||||
|
||||
The results are 20-50% performance improvements over v1.2.
|
||||
|
||||
Building and running is configured using build tags as below.
|
||||
|
||||
At runtime:
|
||||
|
||||
- codec.safe: run in safe mode (not using unsafe optimizations)
|
||||
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
|
||||
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
|
||||
|
||||
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
|
||||
Pls use these mostly during development - use codec.XXX in your go files.
|
||||
|
||||
Build only:
|
||||
|
||||
- codec.build: used to generate fastpath and monomorphization code
|
||||
|
||||
Test only:
|
||||
|
||||
- codec.notmammoth: skip the mammoth generated tests
|
||||
|
||||
# Extension Support
|
||||
|
||||
Users can register a function to handle the encoding or decoding of their custom
|
||||
types.
|
||||
|
||||
There are no restrictions on what the custom type can be. Some examples:
|
||||
|
||||
```go
|
||||
type BisSet []int
|
||||
type BitSet64 uint64
|
||||
type UUID string
|
||||
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
|
||||
type GifImage struct { ... }
|
||||
```
|
||||
|
||||
As an illustration, MyStructWithUnexportedFields would normally be encoded as
|
||||
an empty map because it has no exported fields, while UUID would be encoded as a
|
||||
string. However, with extension support, you can encode any of these however you
|
||||
like.
|
||||
|
||||
There is also seamless support provided for registering an extension (with a
|
||||
tag) but letting the encoding mechanism default to the standard way.
|
||||
|
||||
# Custom Encoding and Decoding
|
||||
|
||||
This package maintains symmetry in the encoding and decoding halfs. We determine
|
||||
how to encode or decode by walking this decision tree
|
||||
|
||||
- is there an extension registered for the type?
|
||||
- is type a codec.Selfer?
|
||||
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
|
||||
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
|
||||
- is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler?
|
||||
- else use a pair of functions based on the "kind" of the type e.g. map, slice, int64
|
||||
|
||||
This symmetry is important to reduce chances of issues happening because the
|
||||
encoding and decoding sides are out of sync e.g. decoded via very specific
|
||||
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
|
||||
|
||||
Consequently, if a type only defines one-half of the symmetry (e.g.
|
||||
it implements UnmarshalJSON() but not MarshalJSON() ), then that type doesn't
|
||||
satisfy the check and we will continue walking down the decision tree.
|
||||
|
||||
# RPC
|
||||
|
||||
RPC Client and Server Codecs are implemented, so the codecs can be used with the
|
||||
standard net/rpc package.
|
||||
|
||||
# Usage
|
||||
|
||||
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent
|
||||
modification.
|
||||
|
||||
The Encoder and Decoder are NOT safe for concurrent use.
|
||||
|
||||
Consequently, the usage model is basically:
|
||||
|
||||
- Create and initialize the Handle before any use. Once created, DO NOT modify it.
|
||||
- Multiple Encoders or Decoders can now use the Handle concurrently.
|
||||
They only read information off the Handle (never write).
|
||||
- However, each Encoder or Decoder MUST not be used concurrently
|
||||
- To re-use an Encoder/Decoder, call Reset(...) on it first. This allows you
|
||||
use state maintained on the Encoder/Decoder.
|
||||
|
||||
Sample usage model:
|
||||
|
||||
```go
|
||||
// create and configure Handle
|
||||
var (
|
||||
bh codec.BincHandle
|
||||
mh codec.MsgpackHandle
|
||||
ch codec.CborHandle
|
||||
)
|
||||
|
||||
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
|
||||
|
||||
// configure extensions
|
||||
// e.g. for msgpack, define functions and enable Time support for tag 1
|
||||
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
|
||||
|
||||
// create and use decoder/encoder
|
||||
var (
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
b []byte
|
||||
h = &bh // or mh to use msgpack
|
||||
)
|
||||
|
||||
dec = codec.NewDecoder(r, h)
|
||||
dec = codec.NewDecoderBytes(b, h)
|
||||
err = dec.Decode(&v)
|
||||
|
||||
enc = codec.NewEncoder(w, h)
|
||||
enc = codec.NewEncoderBytes(&b, h)
|
||||
err = enc.Encode(v)
|
||||
|
||||
//RPC Server
|
||||
go func() {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
|
||||
rpc.ServeCodec(rpcCodec)
|
||||
}
|
||||
}()
|
||||
|
||||
//RPC Communication (client side)
|
||||
conn, err = net.Dial("tcp", "localhost:5555")
|
||||
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
|
||||
client := rpc.NewClientWithCodec(rpcCodec)
|
||||
```
|
||||
|
||||
# Running Tests
|
||||
|
||||
To run tests, use the following:
|
||||
|
||||
```
|
||||
go test
|
||||
```
|
||||
|
||||
To run the full suite of tests, use the following:
|
||||
|
||||
```
|
||||
go test -tags codec.alltests -run Suite
|
||||
```
|
||||
|
||||
You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
|
||||
|
||||
```
|
||||
go test -tags codec.safe -run Json
|
||||
go test -tags "codec.alltests codec.safe" -run Suite
|
||||
```
|
||||
|
||||
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
|
||||
|
||||
```
|
||||
go test -tags codec.notmono -run Json
|
||||
```
|
||||
|
||||
# Running Benchmarks
|
||||
|
||||
```
|
||||
cd bench
|
||||
go test -bench . -benchmem -benchtime 1s
|
||||
```
|
||||
|
||||
Please see http://github.com/ugorji/go-codec-bench .
|
||||
|
||||
# Caveats
|
||||
|
||||
Struct fields matching the following are ignored during encoding and decoding
|
||||
|
||||
- struct tag value set to -
|
||||
- func, complex numbers, unsafe pointers
|
||||
- unexported and not embedded
|
||||
- unexported and embedded and not struct kind
|
||||
- unexported and embedded pointers (from go1.10)
|
||||
|
||||
Every other field in a struct will be encoded/decoded.
|
||||
|
||||
Embedded fields are encoded as if they exist in the top-level struct, with some
|
||||
caveats. See Encode documentation.
|
||||
|
||||
## Exported Package API
|
||||
|
||||
```go
|
||||
var SelfExt = &extFailWrapper{}
|
||||
var GoRpc goRpc
|
||||
var MsgpackSpecRpc msgpackSpecRpc
|
||||
|
||||
type TypeInfos struct{ ... }
|
||||
func NewTypeInfos(tags []string) *TypeInfos
|
||||
|
||||
type Handle interface{ ... }
|
||||
type BasicHandle struct{ ... }
|
||||
type DecodeOptions struct{ ... }
|
||||
type EncodeOptions struct{ ... }
|
||||
|
||||
type Decoder struct{ ... }
|
||||
func NewDecoder(r io.Reader, h Handle) *Decoder
|
||||
func NewDecoderBytes(in []byte, h Handle) *Decoder
|
||||
func NewDecoderString(s string, h Handle) *Decoder
|
||||
type Encoder struct{ ... }
|
||||
func NewEncoder(w io.Writer, h Handle) *Encoder
|
||||
func NewEncoderBytes(out *[]byte, h Handle) *Encoder
|
||||
|
||||
type Ext interface{ ... }
|
||||
type InterfaceExt interface{ ... }
|
||||
type BytesExt interface{ ... }
|
||||
|
||||
type BincHandle struct{ ... }
|
||||
type CborHandle struct{ ... }
|
||||
type JsonHandle struct{ ... }
|
||||
type MsgpackHandle struct{ ... }
|
||||
type SimpleHandle struct{ ... }
|
||||
|
||||
type MapBySlice interface{ ... }
|
||||
type MissingFielder interface{ ... }
|
||||
type MsgpackSpecRpcMultiArgs []interface{}
|
||||
type RPCOptions struct{ ... }
|
||||
type Raw []byte
|
||||
type RawExt struct{ ... }
|
||||
type Rpc interface{ ... }
|
||||
type Selfer interface{ ... }
|
||||
```
|
||||
259
vendor/github.com/ugorji/go/codec/base.fastpath.generated.go
generated
vendored
Normal file
259
vendor/github.com/ugorji/go/codec/base.fastpath.generated.go
generated
vendored
Normal file
@@ -0,0 +1,259 @@
|
||||
//go:build !notfastpath && !codec.notfastpath
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// Code generated from fastpath.go.tmpl - DO NOT EDIT.
|
||||
|
||||
package codec
|
||||
|
||||
// Fast path functions try to create a fast path encode or decode implementation
|
||||
// for common maps and slices.
|
||||
//
|
||||
// We define the functions and register them in this single file
|
||||
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
|
||||
// This file can be omitted without causing a build failure.
|
||||
//
|
||||
// The advantage of fast paths is:
|
||||
// - Many calls bypass reflection altogether
|
||||
//
|
||||
// Currently support
|
||||
// - slice of all builtin types (numeric, bool, string, []byte)
|
||||
// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
|
||||
// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
|
||||
// AND values of type type int8/16/32, uint16/32
|
||||
// This should provide adequate "typical" implementations.
|
||||
//
|
||||
// Note that fast track decode functions must handle values for which an address cannot be obtained.
|
||||
// For example:
|
||||
// m2 := map[string]int{}
|
||||
// p2 := []interface{}{m2}
|
||||
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
|
||||
//
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"slices"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const fastpathEnabled = true
|
||||
|
||||
type fastpathARtid [56]uintptr
|
||||
|
||||
type fastpathRtRtid struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
}
|
||||
type fastpathARtRtid [56]fastpathRtRtid
|
||||
|
||||
var (
|
||||
fastpathAvRtidArr fastpathARtid
|
||||
fastpathAvRtRtidArr fastpathARtRtid
|
||||
fastpathAvRtid = fastpathAvRtidArr[:]
|
||||
fastpathAvRtRtid = fastpathAvRtRtidArr[:]
|
||||
)
|
||||
|
||||
func fastpathAvIndex(rtid uintptr) (i uint, ok bool) {
|
||||
return searchRtids(fastpathAvRtid, rtid)
|
||||
}
|
||||
|
||||
func init() {
|
||||
var i uint = 0
|
||||
fn := func(v interface{}) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
xrtid := rt2id(xrt)
|
||||
xptrtid := rt2id(reflect.PointerTo(xrt))
|
||||
fastpathAvRtid[i] = xrtid
|
||||
fastpathAvRtRtid[i] = fastpathRtRtid{rtid: xrtid, rt: xrt}
|
||||
encBuiltinRtids = append(encBuiltinRtids, xrtid, xptrtid)
|
||||
decBuiltinRtids = append(decBuiltinRtids, xrtid, xptrtid)
|
||||
i++
|
||||
}
|
||||
|
||||
fn([]interface{}(nil))
|
||||
fn([]string(nil))
|
||||
fn([][]byte(nil))
|
||||
fn([]float32(nil))
|
||||
fn([]float64(nil))
|
||||
fn([]uint8(nil))
|
||||
fn([]uint64(nil))
|
||||
fn([]int(nil))
|
||||
fn([]int32(nil))
|
||||
fn([]int64(nil))
|
||||
fn([]bool(nil))
|
||||
|
||||
fn(map[string]interface{}(nil))
|
||||
fn(map[string]string(nil))
|
||||
fn(map[string][]byte(nil))
|
||||
fn(map[string]uint8(nil))
|
||||
fn(map[string]uint64(nil))
|
||||
fn(map[string]int(nil))
|
||||
fn(map[string]int32(nil))
|
||||
fn(map[string]float64(nil))
|
||||
fn(map[string]bool(nil))
|
||||
fn(map[uint8]interface{}(nil))
|
||||
fn(map[uint8]string(nil))
|
||||
fn(map[uint8][]byte(nil))
|
||||
fn(map[uint8]uint8(nil))
|
||||
fn(map[uint8]uint64(nil))
|
||||
fn(map[uint8]int(nil))
|
||||
fn(map[uint8]int32(nil))
|
||||
fn(map[uint8]float64(nil))
|
||||
fn(map[uint8]bool(nil))
|
||||
fn(map[uint64]interface{}(nil))
|
||||
fn(map[uint64]string(nil))
|
||||
fn(map[uint64][]byte(nil))
|
||||
fn(map[uint64]uint8(nil))
|
||||
fn(map[uint64]uint64(nil))
|
||||
fn(map[uint64]int(nil))
|
||||
fn(map[uint64]int32(nil))
|
||||
fn(map[uint64]float64(nil))
|
||||
fn(map[uint64]bool(nil))
|
||||
fn(map[int]interface{}(nil))
|
||||
fn(map[int]string(nil))
|
||||
fn(map[int][]byte(nil))
|
||||
fn(map[int]uint8(nil))
|
||||
fn(map[int]uint64(nil))
|
||||
fn(map[int]int(nil))
|
||||
fn(map[int]int32(nil))
|
||||
fn(map[int]float64(nil))
|
||||
fn(map[int]bool(nil))
|
||||
fn(map[int32]interface{}(nil))
|
||||
fn(map[int32]string(nil))
|
||||
fn(map[int32][]byte(nil))
|
||||
fn(map[int32]uint8(nil))
|
||||
fn(map[int32]uint64(nil))
|
||||
fn(map[int32]int(nil))
|
||||
fn(map[int32]int32(nil))
|
||||
fn(map[int32]float64(nil))
|
||||
fn(map[int32]bool(nil))
|
||||
|
||||
sort.Slice(fastpathAvRtid, func(i, j int) bool { return fastpathAvRtid[i] < fastpathAvRtid[j] })
|
||||
sort.Slice(fastpathAvRtRtid, func(i, j int) bool { return fastpathAvRtRtid[i].rtid < fastpathAvRtRtid[j].rtid })
|
||||
slices.Sort(encBuiltinRtids)
|
||||
slices.Sort(decBuiltinRtids)
|
||||
}
|
||||
|
||||
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
|
||||
switch v := iv.(type) {
|
||||
case *[]interface{}:
|
||||
*v = nil
|
||||
case *[]string:
|
||||
*v = nil
|
||||
case *[][]byte:
|
||||
*v = nil
|
||||
case *[]float32:
|
||||
*v = nil
|
||||
case *[]float64:
|
||||
*v = nil
|
||||
case *[]uint8:
|
||||
*v = nil
|
||||
case *[]uint64:
|
||||
*v = nil
|
||||
case *[]int:
|
||||
*v = nil
|
||||
case *[]int32:
|
||||
*v = nil
|
||||
case *[]int64:
|
||||
*v = nil
|
||||
case *[]bool:
|
||||
*v = nil
|
||||
|
||||
case *map[string]interface{}:
|
||||
*v = nil
|
||||
case *map[string]string:
|
||||
*v = nil
|
||||
case *map[string][]byte:
|
||||
*v = nil
|
||||
case *map[string]uint8:
|
||||
*v = nil
|
||||
case *map[string]uint64:
|
||||
*v = nil
|
||||
case *map[string]int:
|
||||
*v = nil
|
||||
case *map[string]int32:
|
||||
*v = nil
|
||||
case *map[string]float64:
|
||||
*v = nil
|
||||
case *map[string]bool:
|
||||
*v = nil
|
||||
case *map[uint8]interface{}:
|
||||
*v = nil
|
||||
case *map[uint8]string:
|
||||
*v = nil
|
||||
case *map[uint8][]byte:
|
||||
*v = nil
|
||||
case *map[uint8]uint8:
|
||||
*v = nil
|
||||
case *map[uint8]uint64:
|
||||
*v = nil
|
||||
case *map[uint8]int:
|
||||
*v = nil
|
||||
case *map[uint8]int32:
|
||||
*v = nil
|
||||
case *map[uint8]float64:
|
||||
*v = nil
|
||||
case *map[uint8]bool:
|
||||
*v = nil
|
||||
case *map[uint64]interface{}:
|
||||
*v = nil
|
||||
case *map[uint64]string:
|
||||
*v = nil
|
||||
case *map[uint64][]byte:
|
||||
*v = nil
|
||||
case *map[uint64]uint8:
|
||||
*v = nil
|
||||
case *map[uint64]uint64:
|
||||
*v = nil
|
||||
case *map[uint64]int:
|
||||
*v = nil
|
||||
case *map[uint64]int32:
|
||||
*v = nil
|
||||
case *map[uint64]float64:
|
||||
*v = nil
|
||||
case *map[uint64]bool:
|
||||
*v = nil
|
||||
case *map[int]interface{}:
|
||||
*v = nil
|
||||
case *map[int]string:
|
||||
*v = nil
|
||||
case *map[int][]byte:
|
||||
*v = nil
|
||||
case *map[int]uint8:
|
||||
*v = nil
|
||||
case *map[int]uint64:
|
||||
*v = nil
|
||||
case *map[int]int:
|
||||
*v = nil
|
||||
case *map[int]int32:
|
||||
*v = nil
|
||||
case *map[int]float64:
|
||||
*v = nil
|
||||
case *map[int]bool:
|
||||
*v = nil
|
||||
case *map[int32]interface{}:
|
||||
*v = nil
|
||||
case *map[int32]string:
|
||||
*v = nil
|
||||
case *map[int32][]byte:
|
||||
*v = nil
|
||||
case *map[int32]uint8:
|
||||
*v = nil
|
||||
case *map[int32]uint64:
|
||||
*v = nil
|
||||
case *map[int32]int:
|
||||
*v = nil
|
||||
case *map[int32]int32:
|
||||
*v = nil
|
||||
case *map[int32]float64:
|
||||
*v = nil
|
||||
case *map[int32]bool:
|
||||
*v = nil
|
||||
|
||||
default:
|
||||
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
6259
vendor/github.com/ugorji/go/codec/base.fastpath.notmono.generated.go
generated
vendored
Normal file
6259
vendor/github.com/ugorji/go/codec/base.fastpath.notmono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
34
vendor/github.com/ugorji/go/codec/base.notfastpath.go
generated
vendored
Normal file
34
vendor/github.com/ugorji/go/codec/base.notfastpath.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build notfastpath || codec.notfastpath
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
|
||||
const fastpathEnabled = false
|
||||
|
||||
// The generated fast-path code is very large, and adds a few seconds to the build time.
|
||||
// This causes test execution, execution of small tools which use codec, etc
|
||||
// to take a long time.
|
||||
//
|
||||
// To mitigate, we now support the notfastpath tag.
|
||||
// This tag disables fastpath during build, allowing for faster build, test execution,
|
||||
// short-program runs, etc.
|
||||
|
||||
// func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false }
|
||||
// func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false }
|
||||
|
||||
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false }
|
||||
|
||||
func fastpathAvIndex(rtid uintptr) (uint, bool) { return 0, false }
|
||||
|
||||
type fastpathRtRtid struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
}
|
||||
|
||||
type fastpathARtRtid [0]fastpathRtRtid
|
||||
|
||||
var fastpathAvRtRtid fastpathARtRtid
|
||||
26
vendor/github.com/ugorji/go/codec/base.notfastpath.notmono.go
generated
vendored
Normal file
26
vendor/github.com/ugorji/go/codec/base.notfastpath.notmono.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
//go:build notfastpath || (codec.notfastpath && (notmono || codec.notmono))
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import "reflect"
|
||||
|
||||
// type fastpathT struct{}
|
||||
type fastpathE[T encDriver] struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoder[T], *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathD[T decDriver] struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoder[T], *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEs[T encDriver] [0]fastpathE[T]
|
||||
type fastpathDs[T decDriver] [0]fastpathD[T]
|
||||
|
||||
func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool { return false }
|
||||
func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool { return false }
|
||||
|
||||
func (helperEncDriver[T]) fastpathEList() (v *fastpathEs[T]) { return }
|
||||
func (helperDecDriver[T]) fastpathDList() (v *fastpathDs[T]) { return }
|
||||
194
vendor/github.com/ugorji/go/codec/binc.base.go
generated
vendored
Normal file
194
vendor/github.com/ugorji/go/codec/binc.base.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Symbol management:
|
||||
// - symbols are stored in a symbol map during encoding and decoding.
|
||||
// - the symbols persist until the (En|De)coder ResetXXX method is called.
|
||||
|
||||
const bincDoPrune = true
|
||||
|
||||
// vd as low 4 bits (there are 16 slots)
|
||||
const (
|
||||
bincVdSpecial byte = iota
|
||||
bincVdPosInt
|
||||
bincVdNegInt
|
||||
bincVdFloat
|
||||
|
||||
bincVdString
|
||||
bincVdByteArray
|
||||
bincVdArray
|
||||
bincVdMap
|
||||
|
||||
bincVdTimestamp
|
||||
bincVdSmallInt
|
||||
_ // bincVdUnicodeOther
|
||||
bincVdSymbol
|
||||
|
||||
_ // bincVdDecimal
|
||||
_ // open slot
|
||||
_ // open slot
|
||||
bincVdCustomExt = 0x0f
|
||||
)
|
||||
|
||||
const (
|
||||
bincSpNil byte = iota
|
||||
bincSpFalse
|
||||
bincSpTrue
|
||||
bincSpNan
|
||||
bincSpPosInf
|
||||
bincSpNegInf
|
||||
bincSpZeroFloat
|
||||
bincSpZero
|
||||
bincSpNegOne
|
||||
)
|
||||
|
||||
const (
|
||||
_ byte = iota // bincFlBin16
|
||||
bincFlBin32
|
||||
_ // bincFlBin32e
|
||||
bincFlBin64
|
||||
_ // bincFlBin64e
|
||||
// others not currently supported
|
||||
)
|
||||
|
||||
const bincBdNil = 0 // bincVdSpecial<<4 | bincSpNil // staticcheck barfs on this (SA4016)
|
||||
|
||||
var (
|
||||
bincdescSpecialVsNames = map[byte]string{
|
||||
bincSpNil: "nil",
|
||||
bincSpFalse: "false",
|
||||
bincSpTrue: "true",
|
||||
bincSpNan: "float",
|
||||
bincSpPosInf: "float",
|
||||
bincSpNegInf: "float",
|
||||
bincSpZeroFloat: "float",
|
||||
bincSpZero: "uint",
|
||||
bincSpNegOne: "int",
|
||||
}
|
||||
bincdescVdNames = map[byte]string{
|
||||
bincVdSpecial: "special",
|
||||
bincVdSmallInt: "uint",
|
||||
bincVdPosInt: "uint",
|
||||
bincVdFloat: "float",
|
||||
bincVdSymbol: "string",
|
||||
bincVdString: "string",
|
||||
bincVdByteArray: "bytes",
|
||||
bincVdTimestamp: "time",
|
||||
bincVdCustomExt: "ext",
|
||||
bincVdArray: "array",
|
||||
bincVdMap: "map",
|
||||
}
|
||||
)
|
||||
|
||||
func bincdescbd(bd byte) (s string) {
|
||||
return bincdesc(bd>>4, bd&0x0f)
|
||||
}
|
||||
|
||||
func bincdesc(vd, vs byte) (s string) {
|
||||
if vd == bincVdSpecial {
|
||||
s = bincdescSpecialVsNames[vs]
|
||||
} else {
|
||||
s = bincdescVdNames[vd]
|
||||
}
|
||||
if s == "" {
|
||||
s = "unknown"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type bincEncState struct {
|
||||
m map[string]uint16 // symbols
|
||||
}
|
||||
|
||||
// func (e *bincEncState) restoreState(v interface{}) { e.m = v.(map[string]uint16) }
|
||||
// func (e bincEncState) captureState() interface{} { return e.m }
|
||||
// func (e *bincEncState) resetState() { e.m = nil }
|
||||
// func (e *bincEncState) reset() { e.resetState() }
|
||||
func (e *bincEncState) reset() { e.m = nil }
|
||||
|
||||
type bincDecState struct {
|
||||
bdRead bool
|
||||
bd byte
|
||||
vd byte
|
||||
vs byte
|
||||
|
||||
_ bool
|
||||
// MARKER: consider using binary search here instead of a map (ie bincDecSymbol)
|
||||
s map[uint16][]byte
|
||||
}
|
||||
|
||||
// func (x bincDecState) captureState() interface{} { return x }
|
||||
// func (x *bincDecState) resetState() { *x = bincDecState{} }
|
||||
// func (x *bincDecState) reset() { x.resetState() }
|
||||
// func (x *bincDecState) restoreState(v interface{}) { *x = v.(bincDecState) }
|
||||
func (x *bincDecState) reset() { *x = bincDecState{} }
|
||||
|
||||
//------------------------------------
|
||||
|
||||
// BincHandle is a Handle for the Binc Schema-Free Encoding Format
|
||||
// defined at https://github.com/ugorji/binc .
|
||||
//
|
||||
// BincHandle currently supports all Binc features with the following EXCEPTIONS:
|
||||
// - only integers up to 64 bits of precision are supported.
|
||||
// big integers are unsupported.
|
||||
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
|
||||
// extended precision and decimal IEEE 754 floats are unsupported.
|
||||
// - Only UTF-8 strings supported.
|
||||
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
|
||||
//
|
||||
// Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
|
||||
type BincHandle struct {
|
||||
binaryEncodingType
|
||||
notJsonType
|
||||
// noElemSeparators
|
||||
BasicHandle
|
||||
|
||||
// AsSymbols defines what should be encoded as symbols.
|
||||
//
|
||||
// Encoding as symbols can reduce the encoded size significantly.
|
||||
//
|
||||
// However, during decoding, each string to be encoded as a symbol must
|
||||
// be checked to see if it has been seen before. Consequently, encoding time
|
||||
// will increase if using symbols, because string comparisons has a clear cost.
|
||||
//
|
||||
// Values:
|
||||
// - 0: default: library uses best judgement
|
||||
// - 1: use symbols
|
||||
// - 2: do not use symbols
|
||||
AsSymbols uint8
|
||||
|
||||
// AsSymbols: may later on introduce more options ...
|
||||
// - m: map keys
|
||||
// - s: struct fields
|
||||
// - n: none
|
||||
// - a: all: same as m, s, ...
|
||||
|
||||
// _ [7]uint64 // padding (cache-aligned)
|
||||
}
|
||||
|
||||
// Name returns the name of the handle: binc
|
||||
func (h *BincHandle) Name() string { return "binc" }
|
||||
|
||||
func (h *BincHandle) desc(bd byte) string { return bincdesc(bd>>4, bd&0x0f) }
|
||||
|
||||
// SetBytesExt sets an extension
|
||||
func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, makeExt(ext))
|
||||
}
|
||||
|
||||
// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
|
||||
|
||||
func bincEncodeTime(t time.Time) []byte {
|
||||
return customEncodeTime(t)
|
||||
}
|
||||
|
||||
func bincDecodeTime(bs []byte) (tt time.Time, err error) {
|
||||
return customDecodeTime(bs)
|
||||
}
|
||||
12482
vendor/github.com/ugorji/go/codec/binc.fastpath.mono.generated.go
generated
vendored
Normal file
12482
vendor/github.com/ugorji/go/codec/binc.fastpath.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1066
vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
Normal file
1066
vendor/github.com/ugorji/go/codec/binc.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
8158
vendor/github.com/ugorji/go/codec/binc.mono.generated.go
generated
vendored
Normal file
8158
vendor/github.com/ugorji/go/codec/binc.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
52
vendor/github.com/ugorji/go/codec/binc.notfastpath.mono.generated.go
generated
vendored
Normal file
52
vendor/github.com/ugorji/go/codec/binc.notfastpath.mono.generated.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type fastpathEBincBytes struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderBincBytes, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDBincBytes struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderBincBytes, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsBincBytes [0]fastpathEBincBytes
|
||||
type fastpathDsBincBytes [0]fastpathDBincBytes
|
||||
|
||||
func (helperEncDriverBincBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincBytes) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverBincBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincBytes) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverBincBytes) fastpathEList() (v *fastpathEsBincBytes) { return }
|
||||
func (helperDecDriverBincBytes) fastpathDList() (v *fastpathDsBincBytes) { return }
|
||||
|
||||
type fastpathEBincIO struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderBincIO, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDBincIO struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderBincIO, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsBincIO [0]fastpathEBincIO
|
||||
type fastpathDsBincIO [0]fastpathDBincIO
|
||||
|
||||
func (helperEncDriverBincIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderBincIO) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverBincIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderBincIO) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverBincIO) fastpathEList() (v *fastpathEsBincIO) { return }
|
||||
func (helperDecDriverBincIO) fastpathDList() (v *fastpathDsBincIO) { return }
|
||||
219
vendor/github.com/ugorji/go/codec/build.sh
generated
vendored
Normal file
219
vendor/github.com/ugorji/go/codec/build.sh
generated
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Build and Run the different test permutations.
|
||||
# This helps validate that nothing gets broken.
|
||||
|
||||
_build_proceed() {
|
||||
# return success (0) if we should, and 1 (fail) if not
|
||||
if [[ "${zforce}" ]]; then return 0; fi
|
||||
for a in "fastpath.generated.go" "json.mono.generated.go"; do
|
||||
if [[ ! -e "$a" ]]; then return 0; fi
|
||||
for b in `ls -1 *.go.tmpl gen.go gen_mono.go values_test.go`; do
|
||||
if [[ "$a" -ot "$b" ]]; then return 0; fi
|
||||
done
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# _build generates fastpath.go
|
||||
_build() {
|
||||
# if ! [[ "${zforce}" || $(_ng "fastpath.generated.go") || $(_ng "json.mono.generated.go") ]]; then return 0; fi
|
||||
_build_proceed
|
||||
if [ $? -eq 1 ]; then return 0; fi
|
||||
if [ "${zbak}" ]; then
|
||||
_zts=`date '+%m%d%Y_%H%M%S'`
|
||||
_gg=".generated.go"
|
||||
[ -e "fastpath${_gg}" ] && mv fastpath${_gg} fastpath${_gg}__${_zts}.bak
|
||||
[ -e "gen${_gg}" ] && mv gen${_gg} gen${_gg}__${_zts}.bak
|
||||
fi
|
||||
|
||||
rm -f fast*path.generated.go *mono*generated.go *_generated_test.go gen-from-tmpl*.generated.go
|
||||
|
||||
local btags="codec.build codec.notmono codec.safe codec.notfastpath"
|
||||
|
||||
cat > gen-from-tmpl.codec.generated.go <<EOF
|
||||
package codec
|
||||
func GenTmplRun2Go(in, out string) { genTmplRun2Go(in, out) }
|
||||
func GenMonoAll() { genMonoAll() }
|
||||
EOF
|
||||
|
||||
cat > gen-from-tmpl.generated.go <<EOF
|
||||
//go:build ignore
|
||||
package main
|
||||
import "${zpkg}"
|
||||
func main() {
|
||||
codec.GenTmplRun2Go("fastpath.go.tmpl", "base.fastpath.generated.go")
|
||||
codec.GenTmplRun2Go("fastpath.notmono.go.tmpl", "base.fastpath.notmono.generated.go")
|
||||
codec.GenTmplRun2Go("mammoth_test.go.tmpl", "mammoth_generated_test.go")
|
||||
codec.GenMonoAll()
|
||||
}
|
||||
EOF
|
||||
|
||||
# explicitly return 0 if this passes, else return 1
|
||||
${gocmd} run -tags "$btags" gen-from-tmpl.generated.go || return 1
|
||||
rm -f gen-from-tmpl*.generated.go
|
||||
return 0
|
||||
}
|
||||
|
||||
_prebuild() {
|
||||
local d="$PWD"
|
||||
local zfin="test_values.generated.go"
|
||||
local zfin2="test_values_flex.generated.go"
|
||||
local zpkg="github.com/ugorji/go/codec"
|
||||
local returncode=1
|
||||
|
||||
# zpkg=${d##*/src/}
|
||||
# zgobase=${d%%/src/*}
|
||||
# rm -f *_generated_test.go
|
||||
# if [[ $zforce ]]; then ${gocmd} install ${zargs[*]} .; fi &&
|
||||
true &&
|
||||
_build &&
|
||||
cp $d/values_test.go $d/$zfin &&
|
||||
cp $d/values_flex_test.go $d/$zfin2 &&
|
||||
if [[ "$(type -t _codegenerators_external )" = "function" ]]; then _codegenerators_external ; fi &&
|
||||
returncode=0 &&
|
||||
echo "prebuild done successfully"
|
||||
rm -f $d/$zfin $d/$zfin2
|
||||
return $returncode
|
||||
# unset zfin zfin2 zpkg
|
||||
}
|
||||
|
||||
_make() {
|
||||
_prebuild && ${gocmd} install ${zargs[*]} .
|
||||
}
|
||||
|
||||
_clean() {
|
||||
rm -f \
|
||||
gen-from-tmpl.*generated.go \
|
||||
test_values.generated.go test_values_flex.generated.go
|
||||
}
|
||||
|
||||
_tests_run_one() {
|
||||
local tt="alltests $i"
|
||||
local rr="TestCodecSuite"
|
||||
if [[ "x$i" == "xx" ]]; then tt="codec.notmono codec.notfastpath x"; rr='Test.*X$'; fi
|
||||
local g=( ${zargs[*]} ${ztestargs[*]} -count $nc -cpu $cpus -vet "$vet" -tags "$tt" -run "$rr" )
|
||||
[[ "$zcover" == "1" ]] && g+=( -cover )
|
||||
# g+=( -ti "$k" )
|
||||
g+=( -tdiff )
|
||||
[[ "$zcover" == "1" ]] && g+=( -test.gocoverdir $covdir )
|
||||
local -
|
||||
set -x
|
||||
${gocmd} test "${g[@]}" &
|
||||
}
|
||||
|
||||
_tests() {
|
||||
local vet="" # TODO: make it off
|
||||
local gover=$( ${gocmd} version | cut -f 3 -d ' ' )
|
||||
# go tool cover is not supported for gccgo, gollvm, other non-standard go compilers
|
||||
[[ $( ${gocmd} version ) == *"gccgo"* ]] && zcover=0
|
||||
[[ $( ${gocmd} version ) == *"gollvm"* ]] && zcover=0
|
||||
case $gover in
|
||||
go1.2[0-9]*|go2.*|devel*) true ;;
|
||||
*) return 1
|
||||
esac
|
||||
# we test the following permutations wnich all execute different code paths as below.
|
||||
echo "TestCodecSuite: (fastpath/unsafe), (!fastpath/unsafe), (fastpath/!unsafe), (!fastpath/!unsafe)"
|
||||
local nc=2 # count
|
||||
local cpus="1,$(nproc)"
|
||||
# if using the race detector, then set nc to
|
||||
if [[ " ${zargs[@]} " =~ "-race" ]]; then
|
||||
cpus="$(nproc)"
|
||||
fi
|
||||
local covdir=""
|
||||
local a=( "" "codec.safe" "codec.notfastpath" "codec.safe codec.notfastpath"
|
||||
"codec.notmono" "codec.notmono codec.safe"
|
||||
"codec.notmono codec.notfastpath" "codec.notmono codec.safe codec.notfastpath" )
|
||||
[[ "$zextra" == "1" ]] && a+=( "x" )
|
||||
[[ "$zcover" == "1" ]] && covdir=`mktemp -d`
|
||||
${gocmd} vet -printfuncs "errorf" "$@" || return 1
|
||||
for i in "${a[@]}"; do
|
||||
local j=${i:-default}; j="${j// /-}"; j="${j//codec./}"
|
||||
[[ "$zwait" == "1" ]] && echo ">>>> TAGS: 'alltests $i'; RUN: 'TestCodecSuite'"
|
||||
_tests_run_one
|
||||
[[ "$zwait" == "1" ]] && wait
|
||||
# if [[ "$?" != 0 ]]; then return 1; fi
|
||||
done
|
||||
wait
|
||||
[[ "$zcover" == "1" ]] &&
|
||||
echo "go tool covdata output" &&
|
||||
${gocmd} tool covdata percent -i $covdir &&
|
||||
${gocmd} tool covdata textfmt -i $covdir -o __cov.out &&
|
||||
${gocmd} tool cover -html=__cov.out
|
||||
}
|
||||
|
||||
_usage() {
|
||||
# hidden args:
|
||||
# -pf [p=prebuild (f=force)]
|
||||
|
||||
cat <<EOF
|
||||
primary usage: $0
|
||||
-t[esow] -> t=tests [e=extra, s=short, o=cover, w=wait]
|
||||
-[md] -> [m=make, d=race detector]
|
||||
-v -> v=verbose (more v's to increase verbose level)
|
||||
EOF
|
||||
if [[ "$(type -t _usage_run)" = "function" ]]; then _usage_run ; fi
|
||||
}
|
||||
|
||||
_main() {
|
||||
if [[ -z "$1" ]]; then _usage; return 1; fi
|
||||
local x # determines the main action to run in this build
|
||||
local zforce # force
|
||||
local zcover # generate cover profile and show in browser when done
|
||||
local zwait # run tests in sequence, not parallel ie wait for one to finish before starting another
|
||||
local zextra # means run extra (python based tests, etc) during testing
|
||||
|
||||
local ztestargs=()
|
||||
local zargs=()
|
||||
local zverbose=()
|
||||
local zbenchflags=""
|
||||
|
||||
local gocmd=${MYGOCMD:-go}
|
||||
|
||||
OPTIND=1
|
||||
while getopts ":cetmnrgpfvldsowikxyz" flag
|
||||
do
|
||||
case "x$flag" in
|
||||
'xw') zwait=1 ;;
|
||||
'xv') zverbose+=(1) ;;
|
||||
'xo') zcover=1 ;;
|
||||
'xe') zextra=1 ;;
|
||||
'xf') zforce=1 ;;
|
||||
'xs') ztestargs+=("-short") ;;
|
||||
'xl') zargs+=("-gcflags"); zargs+=("-l=4") ;;
|
||||
'xn') zargs+=("-gcflags"); zargs+=("-m=2") ;;
|
||||
'xd') zargs+=("-race") ;;
|
||||
# 'xi') x='i'; zbenchflags=${OPTARG} ;;
|
||||
x\?) _usage; return 1 ;;
|
||||
*) x=$flag ;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND-1))
|
||||
# echo ">>>> _main: extra args: $@"
|
||||
case "x$x" in
|
||||
'xt') _tests "$@" ;;
|
||||
'xm') _make "$@" ;;
|
||||
'xr') _release "$@" ;;
|
||||
'xg') _go ;;
|
||||
'xp') _prebuild "$@" ;;
|
||||
'xc') _clean "$@" ;;
|
||||
esac
|
||||
|
||||
# handle from local run.sh
|
||||
case "x$x" in
|
||||
'xi') _check_inlining_one "$@" ;;
|
||||
'xk') _go_compiler_validation_suite ;;
|
||||
'xx') _analyze_checks "$@" ;;
|
||||
'xy') _analyze_debug_types "$@" ;;
|
||||
'xz') _analyze_do_inlining_and_more "$@" ;;
|
||||
esac
|
||||
# unset zforce zargs zbenchflags
|
||||
}
|
||||
|
||||
[ "." = `dirname $0` ] && _main "$@"
|
||||
|
||||
# _xtrace() {
|
||||
# local -
|
||||
# set -x
|
||||
# "${@}"
|
||||
# }
|
||||
160
vendor/github.com/ugorji/go/codec/cbor.base.go
generated
vendored
Normal file
160
vendor/github.com/ugorji/go/codec/cbor.base.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// major
|
||||
const (
|
||||
cborMajorUint byte = iota
|
||||
cborMajorNegInt
|
||||
cborMajorBytes
|
||||
cborMajorString
|
||||
cborMajorArray
|
||||
cborMajorMap
|
||||
cborMajorTag
|
||||
cborMajorSimpleOrFloat
|
||||
)
|
||||
|
||||
// simple
|
||||
const (
|
||||
cborBdFalse byte = 0xf4 + iota
|
||||
cborBdTrue
|
||||
cborBdNil
|
||||
cborBdUndefined
|
||||
cborBdExt
|
||||
cborBdFloat16
|
||||
cborBdFloat32
|
||||
cborBdFloat64
|
||||
)
|
||||
|
||||
// indefinite
|
||||
const (
|
||||
cborBdIndefiniteBytes byte = 0x5f
|
||||
cborBdIndefiniteString byte = 0x7f
|
||||
cborBdIndefiniteArray byte = 0x9f
|
||||
cborBdIndefiniteMap byte = 0xbf
|
||||
cborBdBreak byte = 0xff
|
||||
)
|
||||
|
||||
// These define some in-stream descriptors for
|
||||
// manual encoding e.g. when doing explicit indefinite-length
|
||||
const (
|
||||
CborStreamBytes byte = 0x5f
|
||||
CborStreamString byte = 0x7f
|
||||
CborStreamArray byte = 0x9f
|
||||
CborStreamMap byte = 0xbf
|
||||
CborStreamBreak byte = 0xff
|
||||
)
|
||||
|
||||
// base values
|
||||
const (
|
||||
cborBaseUint byte = 0x00
|
||||
cborBaseNegInt byte = 0x20
|
||||
cborBaseBytes byte = 0x40
|
||||
cborBaseString byte = 0x60
|
||||
cborBaseArray byte = 0x80
|
||||
cborBaseMap byte = 0xa0
|
||||
cborBaseTag byte = 0xc0
|
||||
cborBaseSimple byte = 0xe0
|
||||
)
|
||||
|
||||
// const (
|
||||
// cborSelfDesrTag byte = 0xd9
|
||||
// cborSelfDesrTag2 byte = 0xd9
|
||||
// cborSelfDesrTag3 byte = 0xf7
|
||||
// )
|
||||
|
||||
var (
|
||||
cbordescSimpleNames = map[byte]string{
|
||||
cborBdNil: "nil",
|
||||
cborBdFalse: "false",
|
||||
cborBdTrue: "true",
|
||||
cborBdFloat16: "float",
|
||||
cborBdFloat32: "float",
|
||||
cborBdFloat64: "float",
|
||||
cborBdBreak: "break",
|
||||
}
|
||||
cbordescIndefNames = map[byte]string{
|
||||
cborBdIndefiniteBytes: "bytes*",
|
||||
cborBdIndefiniteString: "string*",
|
||||
cborBdIndefiniteArray: "array*",
|
||||
cborBdIndefiniteMap: "map*",
|
||||
}
|
||||
cbordescMajorNames = map[byte]string{
|
||||
cborMajorUint: "(u)int",
|
||||
cborMajorNegInt: "int",
|
||||
cborMajorBytes: "bytes",
|
||||
cborMajorString: "string",
|
||||
cborMajorArray: "array",
|
||||
cborMajorMap: "map",
|
||||
cborMajorTag: "tag",
|
||||
cborMajorSimpleOrFloat: "simple",
|
||||
}
|
||||
)
|
||||
|
||||
func cbordesc(bd byte) (s string) {
|
||||
bm := bd >> 5
|
||||
if bm == cborMajorSimpleOrFloat {
|
||||
s = cbordescSimpleNames[bd]
|
||||
} else {
|
||||
s = cbordescMajorNames[bm]
|
||||
if s == "" {
|
||||
s = cbordescIndefNames[bd]
|
||||
}
|
||||
}
|
||||
if s == "" {
|
||||
s = "unknown"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------------------------
|
||||
|
||||
// CborHandle is a Handle for the CBOR encoding format,
|
||||
// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io .
|
||||
//
|
||||
// CBOR is comprehensively supported, including support for:
|
||||
// - indefinite-length arrays/maps/bytes/strings
|
||||
// - (extension) tags in range 0..0xffff (0 .. 65535)
|
||||
// - half, single and double-precision floats
|
||||
// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers)
|
||||
// - nil, true, false, ...
|
||||
// - arrays and maps, bytes and text strings
|
||||
//
|
||||
// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box.
|
||||
// Users can implement them as needed (using SetExt), including spec-documented ones:
|
||||
// - timestamp, BigNum, BigFloat, Decimals,
|
||||
// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc.
|
||||
type CborHandle struct {
|
||||
binaryEncodingType
|
||||
notJsonType
|
||||
// noElemSeparators
|
||||
BasicHandle
|
||||
|
||||
// IndefiniteLength=true, means that we encode using indefinitelength
|
||||
IndefiniteLength bool
|
||||
|
||||
// TimeRFC3339 says to encode time.Time using RFC3339 format.
|
||||
// If unset, we encode time.Time using seconds past epoch.
|
||||
TimeRFC3339 bool
|
||||
|
||||
// SkipUnexpectedTags says to skip over any tags for which extensions are
|
||||
// not defined. This is in keeping with the cbor spec on "Optional Tagging of Items".
|
||||
//
|
||||
// Furthermore, this allows the skipping over of the Self Describing Tag 0xd9d9f7.
|
||||
SkipUnexpectedTags bool
|
||||
}
|
||||
|
||||
// Name returns the name of the handle: cbor
|
||||
func (h *CborHandle) Name() string { return "cbor" }
|
||||
|
||||
func (h *CborHandle) desc(bd byte) string { return cbordesc(bd) }
|
||||
|
||||
// SetInterfaceExt sets an extension
|
||||
func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
return h.SetExt(rt, tag, makeExt(ext))
|
||||
}
|
||||
12482
vendor/github.com/ugorji/go/codec/cbor.fastpath.mono.generated.go
generated
vendored
Normal file
12482
vendor/github.com/ugorji/go/codec/cbor.fastpath.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
995
vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
Normal file
995
vendor/github.com/ugorji/go/codec/cbor.go
generated
vendored
Normal file
@@ -0,0 +1,995 @@
|
||||
//go:build notmono || codec.notmono
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// -------------------
|
||||
|
||||
type cborEncDriver[T encWriter] struct {
|
||||
noBuiltInTypes
|
||||
encDriverNoState
|
||||
encDriverNoopContainerWriter
|
||||
encDriverContainerNoTrackerT
|
||||
|
||||
h *CborHandle
|
||||
e *encoderBase
|
||||
w T
|
||||
enc encoderI
|
||||
|
||||
// scratch buffer for: encode time, numbers, etc
|
||||
//
|
||||
// RFC3339Nano uses 35 chars: 2006-01-02T15:04:05.999999999Z07:00
|
||||
b [40]byte
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeNil() {
|
||||
e.w.writen1(cborBdNil)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeBool(b bool) {
|
||||
if b {
|
||||
e.w.writen1(cborBdTrue)
|
||||
} else {
|
||||
e.w.writen1(cborBdFalse)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeFloat32(f float32) {
|
||||
b := math.Float32bits(f)
|
||||
if e.h.OptimumSize {
|
||||
if h := floatToHalfFloatBits(b); halfFloatToFloatBits(h) == b {
|
||||
e.w.writen1(cborBdFloat16)
|
||||
e.w.writen2(bigen.PutUint16(h))
|
||||
return
|
||||
}
|
||||
}
|
||||
e.w.writen1(cborBdFloat32)
|
||||
e.w.writen4(bigen.PutUint32(b))
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeFloat64(f float64) {
|
||||
if e.h.OptimumSize {
|
||||
if f32 := float32(f); float64(f32) == f {
|
||||
e.EncodeFloat32(f32)
|
||||
return
|
||||
}
|
||||
}
|
||||
e.w.writen1(cborBdFloat64)
|
||||
e.w.writen8(bigen.PutUint64(math.Float64bits(f)))
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) encUint(v uint64, bd byte) {
|
||||
if v <= 0x17 {
|
||||
e.w.writen1(byte(v) + bd)
|
||||
} else if v <= math.MaxUint8 {
|
||||
e.w.writen2(bd+0x18, uint8(v))
|
||||
} else if v <= math.MaxUint16 {
|
||||
e.w.writen1(bd + 0x19)
|
||||
e.w.writen2(bigen.PutUint16(uint16(v)))
|
||||
} else if v <= math.MaxUint32 {
|
||||
e.w.writen1(bd + 0x1a)
|
||||
e.w.writen4(bigen.PutUint32(uint32(v)))
|
||||
} else { // if v <= math.MaxUint64 {
|
||||
e.w.writen1(bd + 0x1b)
|
||||
e.w.writen8(bigen.PutUint64(v))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeInt(v int64) {
|
||||
if v < 0 {
|
||||
e.encUint(uint64(-1-v), cborBaseNegInt)
|
||||
} else {
|
||||
e.encUint(uint64(v), cborBaseUint)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeUint(v uint64) {
|
||||
e.encUint(v, cborBaseUint)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) encLen(bd byte, length int) {
|
||||
e.encUint(uint64(length), bd)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeTime(t time.Time) {
|
||||
if t.IsZero() {
|
||||
e.EncodeNil()
|
||||
} else if e.h.TimeRFC3339 {
|
||||
e.encUint(0, cborBaseTag)
|
||||
e.encStringBytesS(cborBaseString, stringView(t.AppendFormat(e.b[:0], time.RFC3339Nano)))
|
||||
} else {
|
||||
e.encUint(1, cborBaseTag)
|
||||
t = t.UTC().Round(time.Microsecond)
|
||||
sec, nsec := t.Unix(), uint64(t.Nanosecond())
|
||||
if nsec == 0 {
|
||||
e.EncodeInt(sec)
|
||||
} else {
|
||||
e.EncodeFloat64(float64(sec) + float64(nsec)/1e9)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
|
||||
e.encUint(uint64(xtag), cborBaseTag)
|
||||
if ext == SelfExt {
|
||||
e.enc.encodeAs(rv, basetype, false)
|
||||
} else if v := ext.ConvertExt(rv); v == nil {
|
||||
e.writeNilBytes()
|
||||
} else {
|
||||
e.enc.encodeI(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeRawExt(re *RawExt) {
|
||||
e.encUint(uint64(re.Tag), cborBaseTag)
|
||||
if re.Data != nil {
|
||||
e.w.writeb(re.Data)
|
||||
} else if re.Value != nil {
|
||||
e.enc.encodeI(re.Value)
|
||||
} else {
|
||||
e.EncodeNil()
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) WriteArrayEmpty() {
|
||||
if e.h.IndefiniteLength {
|
||||
e.w.writen2(cborBdIndefiniteArray, cborBdBreak)
|
||||
} else {
|
||||
e.w.writen1(cborBaseArray)
|
||||
// e.encLen(cborBaseArray, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) WriteMapEmpty() {
|
||||
if e.h.IndefiniteLength {
|
||||
e.w.writen2(cborBdIndefiniteMap, cborBdBreak)
|
||||
} else {
|
||||
e.w.writen1(cborBaseMap)
|
||||
// e.encLen(cborBaseMap, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) WriteArrayStart(length int) {
|
||||
if e.h.IndefiniteLength {
|
||||
e.w.writen1(cborBdIndefiniteArray)
|
||||
} else {
|
||||
e.encLen(cborBaseArray, length)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) WriteMapStart(length int) {
|
||||
if e.h.IndefiniteLength {
|
||||
e.w.writen1(cborBdIndefiniteMap)
|
||||
} else {
|
||||
e.encLen(cborBaseMap, length)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) WriteMapEnd() {
|
||||
if e.h.IndefiniteLength {
|
||||
e.w.writen1(cborBdBreak)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) WriteArrayEnd() {
|
||||
if e.h.IndefiniteLength {
|
||||
e.w.writen1(cborBdBreak)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeString(v string) {
|
||||
bb := cborBaseString
|
||||
if e.h.StringToRaw {
|
||||
bb = cborBaseBytes
|
||||
}
|
||||
e.encStringBytesS(bb, v)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) }
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeStringBytesRaw(v []byte) {
|
||||
e.encStringBytesS(cborBaseBytes, stringView(v))
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) encStringBytesS(bb byte, v string) {
|
||||
if e.h.IndefiniteLength {
|
||||
if bb == cborBaseBytes {
|
||||
e.w.writen1(cborBdIndefiniteBytes)
|
||||
} else {
|
||||
e.w.writen1(cborBdIndefiniteString)
|
||||
}
|
||||
vlen := uint(len(v))
|
||||
n := max(4, min(vlen/4, 1024))
|
||||
for i := uint(0); i < vlen; {
|
||||
i2 := i + n
|
||||
if i2 >= vlen {
|
||||
i2 = vlen
|
||||
}
|
||||
v2 := v[i:i2]
|
||||
e.encLen(bb, len(v2))
|
||||
e.w.writestr(v2)
|
||||
i = i2
|
||||
}
|
||||
e.w.writen1(cborBdBreak)
|
||||
} else {
|
||||
e.encLen(bb, len(v))
|
||||
e.w.writestr(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) EncodeBytes(v []byte) {
|
||||
if v == nil {
|
||||
e.writeNilBytes()
|
||||
return
|
||||
}
|
||||
e.EncodeStringBytesRaw(v)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) writeNilOr(v byte) {
|
||||
if !e.h.NilCollectionToZeroLength {
|
||||
v = cborBdNil
|
||||
}
|
||||
e.w.writen1(v)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) writeNilArray() {
|
||||
e.writeNilOr(cborBaseArray)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) writeNilMap() {
|
||||
e.writeNilOr(cborBaseMap)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) writeNilBytes() {
|
||||
e.writeNilOr(cborBaseBytes)
|
||||
}
|
||||
|
||||
// ----------------------
|
||||
|
||||
type cborDecDriver[T decReader] struct {
|
||||
decDriverNoopContainerReader
|
||||
// decDriverNoopNumberHelper
|
||||
noBuiltInTypes
|
||||
|
||||
h *CborHandle
|
||||
d *decoderBase
|
||||
r T
|
||||
dec decoderI
|
||||
bdAndBdread
|
||||
// st bool // skip tags
|
||||
// bytes bool
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) readNextBd() {
|
||||
d.bd = d.r.readn1()
|
||||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) advanceNil() (null bool) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == cborBdNil || d.bd == cborBdUndefined {
|
||||
d.bdRead = false
|
||||
return true // null = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) TryNil() bool {
|
||||
return d.advanceNil()
|
||||
}
|
||||
|
||||
// skipTags is called to skip any tags in the stream.
|
||||
//
|
||||
// Since any value can be tagged, then we should call skipTags
|
||||
// before any value is decoded.
|
||||
//
|
||||
// By definition, skipTags should not be called before
|
||||
// checking for break, or nil or undefined.
|
||||
func (d *cborDecDriver[T]) skipTags() {
|
||||
for d.bd>>5 == cborMajorTag {
|
||||
d.decUint()
|
||||
d.bd = d.r.readn1()
|
||||
}
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) ContainerType() (vt valueType) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
if d.bd == cborBdNil {
|
||||
d.bdRead = false // always consume nil after seeing it in container type
|
||||
return valueTypeNil
|
||||
}
|
||||
major := d.bd >> 5
|
||||
if major == cborMajorBytes {
|
||||
return valueTypeBytes
|
||||
} else if major == cborMajorString {
|
||||
return valueTypeString
|
||||
} else if major == cborMajorArray {
|
||||
return valueTypeArray
|
||||
} else if major == cborMajorMap {
|
||||
return valueTypeMap
|
||||
}
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) CheckBreak() (v bool) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == cborBdBreak {
|
||||
d.bdRead = false
|
||||
v = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) decUint() (ui uint64) {
|
||||
v := d.bd & 0x1f
|
||||
if v <= 0x17 {
|
||||
ui = uint64(v)
|
||||
} else if v == 0x18 {
|
||||
ui = uint64(d.r.readn1())
|
||||
} else if v == 0x19 {
|
||||
ui = uint64(bigen.Uint16(d.r.readn2()))
|
||||
} else if v == 0x1a {
|
||||
ui = uint64(bigen.Uint32(d.r.readn4()))
|
||||
} else if v == 0x1b {
|
||||
ui = uint64(bigen.Uint64(d.r.readn8()))
|
||||
} else {
|
||||
halt.errorf("invalid descriptor decoding uint: %x/%s (%x)", d.bd, cbordesc(d.bd), v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) decLen() int {
|
||||
return int(d.decUint())
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) decFloat() (f float64, ok bool) {
|
||||
ok = true
|
||||
switch d.bd {
|
||||
case cborBdFloat16:
|
||||
f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readn2()))))
|
||||
case cborBdFloat32:
|
||||
f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4())))
|
||||
case cborBdFloat64:
|
||||
f = math.Float64frombits(bigen.Uint64(d.r.readn8()))
|
||||
default:
|
||||
if d.bd>>5 == cborMajorTag {
|
||||
// extension tag for bignum/decimal
|
||||
switch d.bd & 0x1f { // tag
|
||||
case 2:
|
||||
f = d.decTagBigIntAsFloat(false)
|
||||
case 3:
|
||||
f = d.decTagBigIntAsFloat(true)
|
||||
case 4:
|
||||
f = d.decTagBigFloatAsFloat(true)
|
||||
case 5:
|
||||
f = d.decTagBigFloatAsFloat(false)
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
} else {
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) decInteger() (ui uint64, neg, ok bool) {
|
||||
ok = true
|
||||
switch d.bd >> 5 {
|
||||
case cborMajorUint:
|
||||
ui = d.decUint()
|
||||
case cborMajorNegInt:
|
||||
ui = d.decUint()
|
||||
neg = true
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeInt64() (i int64) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
v1, v2, v3 := d.decInteger()
|
||||
i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, true)
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeUint64() (ui uint64) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger())
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeFloat64() (f float64) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
v1, v2 := d.decFloat()
|
||||
f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, true)
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
// bool can be decoded from bool only (single byte).
|
||||
func (d *cborDecDriver[T]) DecodeBool() (b bool) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
if d.bd == cborBdTrue {
|
||||
b = true
|
||||
} else if d.bd == cborBdFalse {
|
||||
} else {
|
||||
halt.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd))
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) ReadMapStart() (length int) {
|
||||
if d.advanceNil() {
|
||||
return containerLenNil
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
d.bdRead = false
|
||||
if d.bd == cborBdIndefiniteMap {
|
||||
return containerLenUnknown
|
||||
}
|
||||
if d.bd>>5 != cborMajorMap {
|
||||
halt.errorf("error reading map; got major type: %x, expected %x/%s", d.bd>>5, cborMajorMap, cbordesc(d.bd))
|
||||
}
|
||||
return d.decLen()
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) ReadArrayStart() (length int) {
|
||||
if d.advanceNil() {
|
||||
return containerLenNil
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
d.bdRead = false
|
||||
if d.bd == cborBdIndefiniteArray {
|
||||
return containerLenUnknown
|
||||
}
|
||||
if d.bd>>5 != cborMajorArray {
|
||||
halt.errorf("invalid array; got major type: %x, expect: %x/%s", d.bd>>5, cborMajorArray, cbordesc(d.bd))
|
||||
}
|
||||
return d.decLen()
|
||||
}
|
||||
|
||||
// MARKER d.d.buf is ONLY used within DecodeBytes.
|
||||
// Safe to use freely here only.
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.skipTags()
|
||||
}
|
||||
fnEnsureNonNilBytes := func() {
|
||||
// buf is nil at first. Ensure a non-nil value is returned.
|
||||
if bs == nil {
|
||||
bs = zeroByteSlice
|
||||
state = dBytesDetach
|
||||
}
|
||||
}
|
||||
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
|
||||
major := d.bd >> 5
|
||||
val4str := d.h.ValidateUnicode && major == cborMajorString
|
||||
bs = d.d.buf[:0]
|
||||
d.bdRead = false
|
||||
for !d.CheckBreak() {
|
||||
if d.bd>>5 != major {
|
||||
const msg = "malformed indefinite string/bytes %x (%s); " +
|
||||
"contains chunk with major type %v, expected %v"
|
||||
halt.errorf(msg, d.bd, cbordesc(d.bd), d.bd>>5, major)
|
||||
}
|
||||
n := uint(d.decLen())
|
||||
bs = append(bs, d.r.readx(n)...)
|
||||
d.bdRead = false
|
||||
if val4str && !utf8.Valid(bs[len(bs)-int(n):]) {
|
||||
const msg = "indefinite-length text string contains chunk " +
|
||||
"that is not a valid utf-8 sequence: 0x%x"
|
||||
halt.errorf(msg, bs[len(bs)-int(n):])
|
||||
}
|
||||
}
|
||||
d.bdRead = false
|
||||
d.d.buf = bs
|
||||
state = dBytesAttachBuffer
|
||||
fnEnsureNonNilBytes()
|
||||
return
|
||||
}
|
||||
if d.bd == cborBdIndefiniteArray {
|
||||
d.bdRead = false
|
||||
bs = d.d.buf[:0]
|
||||
for !d.CheckBreak() {
|
||||
bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
|
||||
}
|
||||
d.d.buf = bs
|
||||
state = dBytesAttachBuffer
|
||||
fnEnsureNonNilBytes()
|
||||
return
|
||||
}
|
||||
var cond bool
|
||||
if d.bd>>5 == cborMajorArray {
|
||||
d.bdRead = false
|
||||
slen := d.decLen()
|
||||
bs, cond = usableByteSlice(d.d.buf, slen)
|
||||
for i := 0; i < len(bs); i++ {
|
||||
bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
|
||||
}
|
||||
for i := len(bs); i < slen; i++ {
|
||||
bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
|
||||
}
|
||||
if cond {
|
||||
d.d.buf = bs
|
||||
}
|
||||
state = dBytesAttachBuffer
|
||||
fnEnsureNonNilBytes()
|
||||
return
|
||||
}
|
||||
clen := d.decLen()
|
||||
d.bdRead = false
|
||||
bs, cond = d.r.readxb(uint(clen))
|
||||
state = d.d.attachState(cond)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeStringAsBytes() (out []byte, state dBytesAttachState) {
|
||||
out, state = d.DecodeBytes()
|
||||
if d.h.ValidateUnicode && !utf8.Valid(out) {
|
||||
halt.errorf("DecodeStringAsBytes: invalid UTF-8: %s", out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeTime() (t time.Time) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.bd>>5 != cborMajorTag {
|
||||
halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5)
|
||||
}
|
||||
xtag := d.decUint()
|
||||
d.bdRead = false
|
||||
return d.decodeTime(xtag)
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) decodeTime(xtag uint64) (t time.Time) {
|
||||
switch xtag {
|
||||
case 0:
|
||||
var err error
|
||||
t, err = time.Parse(time.RFC3339, stringView(bytesOKs(d.DecodeStringAsBytes())))
|
||||
halt.onerror(err)
|
||||
case 1:
|
||||
f1, f2 := math.Modf(d.DecodeFloat64())
|
||||
t = time.Unix(int64(f1), int64(f2*1e9))
|
||||
default:
|
||||
halt.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag)
|
||||
}
|
||||
t = t.UTC().Round(time.Microsecond)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) preDecodeExt(checkTag bool, xtag uint64) (realxtag uint64, ok bool) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.bd>>5 != cborMajorTag {
|
||||
halt.errorf("error reading tag; expected major type: %x, got: %x", cborMajorTag, d.bd>>5)
|
||||
}
|
||||
realxtag = d.decUint()
|
||||
d.bdRead = false
|
||||
if checkTag && xtag != realxtag {
|
||||
halt.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag)
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeRawExt(re *RawExt) {
|
||||
if realxtag, ok := d.preDecodeExt(false, 0); ok {
|
||||
re.Tag = realxtag
|
||||
d.dec.decode(&re.Value)
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
|
||||
if _, ok := d.preDecodeExt(true, xtag); ok {
|
||||
if ext == SelfExt {
|
||||
d.dec.decodeAs(rv, basetype, false)
|
||||
} else {
|
||||
d.dec.interfaceExtConvertAndDecode(rv, ext)
|
||||
}
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) decTagBigIntAsFloat(neg bool) (f float64) {
|
||||
bs, _ := d.DecodeBytes()
|
||||
bi := new(big.Int).SetBytes(bs)
|
||||
if neg { // neg big.Int
|
||||
bi0 := bi
|
||||
bi = new(big.Int).Sub(big.NewInt(-1), bi0)
|
||||
}
|
||||
f, _ = bi.Float64()
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) decTagBigFloatAsFloat(decimal bool) (f float64) {
|
||||
if nn := d.r.readn1(); nn != 82 {
|
||||
halt.errorf("(%d) decoding decimal/big.Float: expected 2 numbers", nn)
|
||||
}
|
||||
exp := d.DecodeInt64()
|
||||
mant := d.DecodeInt64()
|
||||
if decimal { // m*(10**e)
|
||||
// MARKER: if precision/other issues crop, consider using big.Float on base 10.
|
||||
// The logic is more convoluted, which is why we leverage readFloatResult for now.
|
||||
rf := readFloatResult{exp: int8(exp)}
|
||||
if mant >= 0 {
|
||||
rf.mantissa = uint64(mant)
|
||||
} else {
|
||||
rf.neg = true
|
||||
rf.mantissa = uint64(-mant)
|
||||
}
|
||||
f, _ = parseFloat64_reader(rf)
|
||||
// f = float64(mant) * math.Pow10(exp)
|
||||
} else { // m*(2**e)
|
||||
// f = float64(mant) * math.Pow(2, exp)
|
||||
bfm := new(big.Float).SetPrec(64).SetInt64(mant)
|
||||
bf := new(big.Float).SetPrec(64).SetMantExp(bfm, int(exp))
|
||||
f, _ = bf.Float64()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
|
||||
n := d.d.naked()
|
||||
var decodeFurther bool
|
||||
switch d.bd >> 5 {
|
||||
case cborMajorUint:
|
||||
if d.h.SignedInteger {
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt64()
|
||||
} else {
|
||||
n.v = valueTypeUint
|
||||
n.u = d.DecodeUint64()
|
||||
}
|
||||
case cborMajorNegInt:
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt64()
|
||||
case cborMajorBytes:
|
||||
d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy)
|
||||
case cborMajorString:
|
||||
n.v = valueTypeString
|
||||
n.s = d.d.detach2Str(d.DecodeStringAsBytes())
|
||||
case cborMajorArray:
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case cborMajorMap:
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
case cborMajorTag:
|
||||
n.v = valueTypeExt
|
||||
n.u = d.decUint()
|
||||
d.bdRead = false
|
||||
n.l = nil
|
||||
xx := d.h.getExtForTag(n.u)
|
||||
if xx == nil {
|
||||
switch n.u {
|
||||
case 0, 1:
|
||||
n.v = valueTypeTime
|
||||
n.t = d.decodeTime(n.u)
|
||||
case 2:
|
||||
n.f = d.decTagBigIntAsFloat(false)
|
||||
n.v = valueTypeFloat
|
||||
case 3:
|
||||
n.f = d.decTagBigIntAsFloat(true)
|
||||
n.v = valueTypeFloat
|
||||
case 4:
|
||||
n.f = d.decTagBigFloatAsFloat(true)
|
||||
n.v = valueTypeFloat
|
||||
case 5:
|
||||
n.f = d.decTagBigFloatAsFloat(false)
|
||||
n.v = valueTypeFloat
|
||||
case 55799: // skip
|
||||
d.DecodeNaked()
|
||||
default:
|
||||
if d.h.SkipUnexpectedTags {
|
||||
d.DecodeNaked()
|
||||
}
|
||||
// else we will use standard mode to decode ext e.g. into a RawExt
|
||||
}
|
||||
return
|
||||
}
|
||||
// if n.u == 0 || n.u == 1 {
|
||||
// d.bdRead = false
|
||||
// n.v = valueTypeTime
|
||||
// n.t = d.decodeTime(n.u)
|
||||
// } else if d.h.SkipUnexpectedTags && d.h.getExtForTag(n.u) == nil {
|
||||
// // d.skipTags() // no need to call this - tags already skipped
|
||||
// d.bdRead = false
|
||||
// d.DecodeNaked()
|
||||
// return // return when done (as true recursive function)
|
||||
// }
|
||||
case cborMajorSimpleOrFloat:
|
||||
switch d.bd {
|
||||
case cborBdNil, cborBdUndefined:
|
||||
n.v = valueTypeNil
|
||||
case cborBdFalse:
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case cborBdTrue:
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
case cborBdFloat16, cborBdFloat32, cborBdFloat64:
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat64()
|
||||
default:
|
||||
halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
|
||||
}
|
||||
default: // should never happen
|
||||
halt.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd)
|
||||
}
|
||||
if !decodeFurther {
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) uintBytes() (v []byte, ui uint64) {
|
||||
// this is only used by nextValueBytes, so it's ok to
|
||||
// use readx and bigenstd here.
|
||||
switch vv := d.bd & 0x1f; vv {
|
||||
case 0x18:
|
||||
v = d.r.readx(1)
|
||||
ui = uint64(v[0])
|
||||
case 0x19:
|
||||
v = d.r.readx(2)
|
||||
ui = uint64(bigenstd.Uint16(v))
|
||||
case 0x1a:
|
||||
v = d.r.readx(4)
|
||||
ui = uint64(bigenstd.Uint32(v))
|
||||
case 0x1b:
|
||||
v = d.r.readx(8)
|
||||
ui = uint64(bigenstd.Uint64(v))
|
||||
default:
|
||||
if vv > 0x1b {
|
||||
halt.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd))
|
||||
}
|
||||
ui = uint64(vv)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) nextValueBytes() (v []byte) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
d.r.startRecording()
|
||||
d.nextValueBytesBdReadR()
|
||||
v = d.r.stopRecording()
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
// func (d *cborDecDriver[T]) nextValueBytesR(v0 []byte) (v []byte) {
|
||||
// d.readNextBd()
|
||||
// v0 = append(v0, d.bd)
|
||||
// d.r.startRecording(v0)
|
||||
// d.nextValueBytesBdReadR()
|
||||
// v = d.r.stopRecording()
|
||||
// return
|
||||
// }
|
||||
|
||||
func (d *cborDecDriver[T]) nextValueBytesBdReadR() {
|
||||
// var bs []byte
|
||||
var ui uint64
|
||||
|
||||
switch d.bd >> 5 {
|
||||
case cborMajorUint, cborMajorNegInt:
|
||||
d.uintBytes()
|
||||
case cborMajorString, cborMajorBytes:
|
||||
if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString {
|
||||
for {
|
||||
d.readNextBd()
|
||||
if d.bd == cborBdBreak {
|
||||
break
|
||||
}
|
||||
_, ui = d.uintBytes()
|
||||
d.r.skip(uint(ui))
|
||||
}
|
||||
} else {
|
||||
_, ui = d.uintBytes()
|
||||
d.r.skip(uint(ui))
|
||||
}
|
||||
case cborMajorArray:
|
||||
if d.bd == cborBdIndefiniteArray {
|
||||
for {
|
||||
d.readNextBd()
|
||||
if d.bd == cborBdBreak {
|
||||
break
|
||||
}
|
||||
d.nextValueBytesBdReadR()
|
||||
}
|
||||
} else {
|
||||
_, ui = d.uintBytes()
|
||||
for i := uint64(0); i < ui; i++ {
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
}
|
||||
}
|
||||
case cborMajorMap:
|
||||
if d.bd == cborBdIndefiniteMap {
|
||||
for {
|
||||
d.readNextBd()
|
||||
if d.bd == cborBdBreak {
|
||||
break
|
||||
}
|
||||
d.nextValueBytesBdReadR()
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
}
|
||||
} else {
|
||||
_, ui = d.uintBytes()
|
||||
for i := uint64(0); i < ui; i++ {
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
}
|
||||
}
|
||||
case cborMajorTag:
|
||||
d.uintBytes()
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
case cborMajorSimpleOrFloat:
|
||||
switch d.bd {
|
||||
case cborBdNil, cborBdUndefined, cborBdFalse, cborBdTrue: // pass
|
||||
case cborBdFloat16:
|
||||
d.r.skip(2)
|
||||
case cborBdFloat32:
|
||||
d.r.skip(4)
|
||||
case cborBdFloat64:
|
||||
d.r.skip(8)
|
||||
default:
|
||||
halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd)
|
||||
}
|
||||
default: // should never happen
|
||||
halt.errorf("nextValueBytes: Unrecognized d.bd: 0x%x", d.bd)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) reset() {
|
||||
d.bdAndBdread.reset()
|
||||
// d.st = d.h.SkipUnexpectedTags
|
||||
}
|
||||
|
||||
// ----
|
||||
//
|
||||
// The following below are similar across all format files (except for the format name).
|
||||
//
|
||||
// We keep them together here, so that we can easily copy and compare.
|
||||
|
||||
// ----
|
||||
|
||||
func (d *cborEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) {
|
||||
callMake(&d.w)
|
||||
d.h = hh.(*CborHandle)
|
||||
d.e = shared
|
||||
if shared.bytes {
|
||||
fp = cborFpEncBytes
|
||||
} else {
|
||||
fp = cborFpEncIO
|
||||
}
|
||||
// d.w.init()
|
||||
d.init2(enc)
|
||||
return
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) }
|
||||
|
||||
// func (e *cborEncDriver[T]) writeStringAsisDblQuoted(v string) { e.w.writeqstr(v) }
|
||||
|
||||
func (e *cborEncDriver[T]) writerEnd() { e.w.end() }
|
||||
|
||||
func (e *cborEncDriver[T]) resetOutBytes(out *[]byte) {
|
||||
e.w.resetBytes(*out, out)
|
||||
}
|
||||
|
||||
func (e *cborEncDriver[T]) resetOutIO(out io.Writer) {
|
||||
e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
func (d *cborDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) {
|
||||
callMake(&d.r)
|
||||
d.h = hh.(*CborHandle)
|
||||
d.d = shared
|
||||
if shared.bytes {
|
||||
fp = cborFpDecBytes
|
||||
} else {
|
||||
fp = cborFpDecIO
|
||||
}
|
||||
// d.r.init()
|
||||
d.init2(dec)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) NumBytesRead() int {
|
||||
return int(d.r.numread())
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) resetInBytes(in []byte) {
|
||||
d.r.resetBytes(in)
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) resetInIO(r io.Reader) {
|
||||
d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist)
|
||||
}
|
||||
|
||||
// ---- (custom stanza)
|
||||
|
||||
func (d *cborDecDriver[T]) descBd() string {
|
||||
return sprintf("%v (%s)", d.bd, cbordesc(d.bd))
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) DecodeFloat32() (f float32) {
|
||||
return float32(chkOvf.Float32V(d.DecodeFloat64()))
|
||||
}
|
||||
|
||||
func (d *cborEncDriver[T]) init2(enc encoderI) {
|
||||
d.enc = enc
|
||||
}
|
||||
|
||||
func (d *cborDecDriver[T]) init2(dec decoderI) {
|
||||
d.dec = dec
|
||||
// d.d.cbor = true
|
||||
}
|
||||
7985
vendor/github.com/ugorji/go/codec/cbor.mono.generated.go
generated
vendored
Normal file
7985
vendor/github.com/ugorji/go/codec/cbor.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
52
vendor/github.com/ugorji/go/codec/cbor.notfastpath.mono.generated.go
generated
vendored
Normal file
52
vendor/github.com/ugorji/go/codec/cbor.notfastpath.mono.generated.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type fastpathECborBytes struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderCborBytes, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDCborBytes struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderCborBytes, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsCborBytes [0]fastpathECborBytes
|
||||
type fastpathDsCborBytes [0]fastpathDCborBytes
|
||||
|
||||
func (helperEncDriverCborBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborBytes) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverCborBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborBytes) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverCborBytes) fastpathEList() (v *fastpathEsCborBytes) { return }
|
||||
func (helperDecDriverCborBytes) fastpathDList() (v *fastpathDsCborBytes) { return }
|
||||
|
||||
type fastpathECborIO struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderCborIO, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDCborIO struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderCborIO, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsCborIO [0]fastpathECborIO
|
||||
type fastpathDsCborIO [0]fastpathDCborIO
|
||||
|
||||
func (helperEncDriverCborIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderCborIO) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverCborIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderCborIO) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverCborIO) fastpathEList() (v *fastpathEsCborIO) { return }
|
||||
func (helperDecDriverCborIO) fastpathDList() (v *fastpathDsCborIO) { return }
|
||||
191
vendor/github.com/ugorji/go/codec/custom_time.go
generated
vendored
Normal file
191
vendor/github.com/ugorji/go/codec/custom_time.go
generated
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EncodeTime encodes a time.Time as a []byte, including
|
||||
// information on the instant in time and UTC offset.
|
||||
//
|
||||
// Format Description
|
||||
//
|
||||
// A timestamp is composed of 3 components:
|
||||
//
|
||||
// - secs: signed integer representing seconds since unix epoch
|
||||
// - nsces: unsigned integer representing fractional seconds as a
|
||||
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
|
||||
// - tz: signed integer representing timezone offset in minutes east of UTC,
|
||||
// and a dst (daylight savings time) flag
|
||||
//
|
||||
// When encoding a timestamp, the first byte is the descriptor, which
|
||||
// defines which components are encoded and how many bytes are used to
|
||||
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
|
||||
// is not encoded in the byte array explicitly*.
|
||||
//
|
||||
// Descriptor 8 bits are of the form `A B C DDD EE`:
|
||||
// A: Is secs component encoded? 1 = true
|
||||
// B: Is nsecs component encoded? 1 = true
|
||||
// C: Is tz component encoded? 1 = true
|
||||
// DDD: Number of extra bytes for secs (range 0-7).
|
||||
// If A = 1, secs encoded in DDD+1 bytes.
|
||||
// If A = 0, secs is not encoded, and is assumed to be 0.
|
||||
// If A = 1, then we need at least 1 byte to encode secs.
|
||||
// DDD says the number of extra bytes beyond that 1.
|
||||
// E.g. if DDD=0, then secs is represented in 1 byte.
|
||||
// if DDD=2, then secs is represented in 3 bytes.
|
||||
// EE: Number of extra bytes for nsecs (range 0-3).
|
||||
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
|
||||
//
|
||||
// Following the descriptor bytes, subsequent bytes are:
|
||||
//
|
||||
// secs component encoded in `DDD + 1` bytes (if A == 1)
|
||||
// nsecs component encoded in `EE + 1` bytes (if B == 1)
|
||||
// tz component encoded in 2 bytes (if C == 1)
|
||||
//
|
||||
// secs and nsecs components are integers encoded in a BigEndian
|
||||
// 2-complement encoding format.
|
||||
//
|
||||
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
|
||||
// Least significant bit 0 are described below:
|
||||
//
|
||||
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
|
||||
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
|
||||
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
|
||||
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
|
||||
func customEncodeTime(t time.Time) []byte {
|
||||
// t := rv2i(rv).(time.Time)
|
||||
tsecs, tnsecs := t.Unix(), t.Nanosecond()
|
||||
var (
|
||||
bd byte
|
||||
bs [16]byte
|
||||
i int = 1
|
||||
)
|
||||
l := t.Location()
|
||||
if l == time.UTC {
|
||||
l = nil
|
||||
}
|
||||
if tsecs != 0 {
|
||||
bd = bd | 0x80
|
||||
btmp := bigen.PutUint64(uint64(tsecs))
|
||||
f := pruneSignExt(btmp[:], tsecs >= 0)
|
||||
bd = bd | (byte(7-f) << 2)
|
||||
copy(bs[i:], btmp[f:])
|
||||
i = i + (8 - f)
|
||||
}
|
||||
if tnsecs != 0 {
|
||||
bd = bd | 0x40
|
||||
btmp := bigen.PutUint32(uint32(tnsecs))
|
||||
f := pruneSignExt(btmp[:4], true)
|
||||
bd = bd | byte(3-f)
|
||||
copy(bs[i:], btmp[f:4])
|
||||
i = i + (4 - f)
|
||||
}
|
||||
if l != nil {
|
||||
bd = bd | 0x20
|
||||
// Note that Go Libs do not give access to dst flag.
|
||||
_, zoneOffset := t.Zone()
|
||||
// zoneName, zoneOffset := t.Zone()
|
||||
zoneOffset /= 60
|
||||
z := uint16(zoneOffset)
|
||||
btmp0, btmp1 := bigen.PutUint16(z)
|
||||
// clear dst flags
|
||||
bs[i] = btmp0 & 0x3f
|
||||
bs[i+1] = btmp1
|
||||
i = i + 2
|
||||
}
|
||||
bs[0] = bd
|
||||
return bs[0:i]
|
||||
}
|
||||
|
||||
// customDecodeTime decodes a []byte into a time.Time.
|
||||
func customDecodeTime(bs []byte) (tt time.Time, err error) {
|
||||
bd := bs[0]
|
||||
var (
|
||||
tsec int64
|
||||
tnsec uint32
|
||||
tz uint16
|
||||
i byte = 1
|
||||
i2 byte
|
||||
n byte
|
||||
)
|
||||
if bd&(1<<7) != 0 {
|
||||
var btmp [8]byte
|
||||
n = ((bd >> 2) & 0x7) + 1
|
||||
i2 = i + n
|
||||
copy(btmp[8-n:], bs[i:i2])
|
||||
// if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
|
||||
if bs[i]&(1<<7) != 0 {
|
||||
copy(btmp[0:8-n], bsAll0xff)
|
||||
}
|
||||
i = i2
|
||||
tsec = int64(bigen.Uint64(btmp))
|
||||
}
|
||||
if bd&(1<<6) != 0 {
|
||||
var btmp [4]byte
|
||||
n = (bd & 0x3) + 1
|
||||
i2 = i + n
|
||||
copy(btmp[4-n:], bs[i:i2])
|
||||
i = i2
|
||||
tnsec = bigen.Uint32(btmp)
|
||||
}
|
||||
if bd&(1<<5) == 0 {
|
||||
tt = time.Unix(tsec, int64(tnsec)).UTC()
|
||||
return
|
||||
}
|
||||
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
|
||||
// However, we need name here, so it can be shown when time is printf.d.
|
||||
// Zone name is in form: UTC-08:00.
|
||||
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
|
||||
|
||||
tz = bigen.Uint16([2]byte{bs[i], bs[i+1]})
|
||||
// sign extend sign bit into top 2 MSB (which were dst bits):
|
||||
if tz&(1<<13) == 0 { // positive
|
||||
tz = tz & 0x3fff //clear 2 MSBs: dst bits
|
||||
} else { // negative
|
||||
tz = tz | 0xc000 //set 2 MSBs: dst bits
|
||||
}
|
||||
tzint := int16(tz)
|
||||
if tzint == 0 {
|
||||
tt = time.Unix(tsec, int64(tnsec)).UTC()
|
||||
} else {
|
||||
// For Go Time, do not use a descriptive timezone.
|
||||
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
|
||||
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
|
||||
// var zoneName = timeLocUTCName(tzint)
|
||||
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// customEncodeTimeAsNum encodes time.Time exactly as cbor does.
|
||||
func customEncodeTimeAsNum(t time.Time) (r interface{}) {
|
||||
t = t.UTC().Round(time.Microsecond)
|
||||
sec, nsec := t.Unix(), uint64(t.Nanosecond())
|
||||
if nsec == 0 {
|
||||
r = sec
|
||||
} else {
|
||||
r = float64(sec) + float64(nsec)/1e9
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// customDecodeTimeAsNum decodes time.Time exactly as cbor does.
|
||||
func customDecodeTimeAsNum(v interface{}) (t time.Time) {
|
||||
switch vv := v.(type) {
|
||||
case int64:
|
||||
t = time.Unix(vv, 0)
|
||||
case uint64:
|
||||
t = time.Unix((int64)(vv), 0)
|
||||
case float64:
|
||||
f1, f2 := math.Modf(vv)
|
||||
t = time.Unix(int64(f1), int64(f2*1e9))
|
||||
default:
|
||||
halt.errorf("expect int64/float64 for time.Time ext: got %T", v)
|
||||
}
|
||||
t = t.UTC().Round(time.Microsecond)
|
||||
return
|
||||
}
|
||||
514
vendor/github.com/ugorji/go/codec/decimal.go
generated
vendored
Normal file
514
vendor/github.com/ugorji/go/codec/decimal.go
generated
vendored
Normal file
@@ -0,0 +1,514 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type readFloatResult struct {
|
||||
mantissa uint64
|
||||
exp int8
|
||||
neg bool
|
||||
trunc bool
|
||||
bad bool // bad decimal string
|
||||
hardexp bool // exponent is hard to handle (> 2 digits, etc)
|
||||
ok bool
|
||||
// sawdot bool
|
||||
// sawexp bool
|
||||
//_ [2]bool // padding
|
||||
}
|
||||
|
||||
// Per go spec, floats are represented in memory as
|
||||
// IEEE single or double precision floating point values.
|
||||
//
|
||||
// We also looked at the source for stdlib math/modf.go,
|
||||
// reviewed https://github.com/chewxy/math32
|
||||
// and read wikipedia documents describing the formats.
|
||||
//
|
||||
// It became clear that we could easily look at the bits to determine
|
||||
// whether any fraction exists.
|
||||
|
||||
func parseFloat32(b []byte) (f float32, err error) {
|
||||
return parseFloat32_custom(b)
|
||||
}
|
||||
|
||||
func parseFloat64(b []byte) (f float64, err error) {
|
||||
return parseFloat64_custom(b)
|
||||
}
|
||||
|
||||
func parseFloat32_strconv(b []byte) (f float32, err error) {
|
||||
f64, err := strconv.ParseFloat(stringView(b), 32)
|
||||
f = float32(f64)
|
||||
return
|
||||
}
|
||||
|
||||
func parseFloat64_strconv(b []byte) (f float64, err error) {
|
||||
return strconv.ParseFloat(stringView(b), 64)
|
||||
}
|
||||
|
||||
// ------ parseFloat custom below --------
|
||||
|
||||
// JSON really supports decimal numbers in base 10 notation, with exponent support.
|
||||
//
|
||||
// We assume the following:
|
||||
// - a lot of floating point numbers in json files will have defined precision
|
||||
// (in terms of number of digits after decimal point), etc.
|
||||
// - these (referenced above) can be written in exact format.
|
||||
//
|
||||
// strconv.ParseFloat has some unnecessary overhead which we can do without
|
||||
// for the common case:
|
||||
//
|
||||
// - expensive char-by-char check to see if underscores are in right place
|
||||
// - testing for and skipping underscores
|
||||
// - check if the string matches ignorecase +/- inf, +/- infinity, nan
|
||||
// - support for base 16 (0xFFFF...)
|
||||
//
|
||||
// The functions below will try a fast-path for floats which can be decoded
|
||||
// without any loss of precision, meaning they:
|
||||
//
|
||||
// - fits within the significand bits of the 32-bits or 64-bits
|
||||
// - exponent fits within the exponent value
|
||||
// - there is no truncation (any extra numbers are all trailing zeros)
|
||||
//
|
||||
// To figure out what the values are for maxMantDigits, use this idea below:
|
||||
//
|
||||
// 2^23 = 838 8608 (between 10^ 6 and 10^ 7) (significand bits of uint32)
|
||||
// 2^32 = 42 9496 7296 (between 10^ 9 and 10^10) (full uint32)
|
||||
// 2^52 = 4503 5996 2737 0496 (between 10^15 and 10^16) (significand bits of uint64)
|
||||
// 2^64 = 1844 6744 0737 0955 1616 (between 10^19 and 10^20) (full uint64)
|
||||
//
|
||||
// Note: we only allow for up to what can comfortably fit into the significand
|
||||
// ignoring the exponent, and we only try to parse iff significand fits.
|
||||
|
||||
const (
|
||||
fMaxMultiplierForExactPow10_64 = 1e15
|
||||
fMaxMultiplierForExactPow10_32 = 1e7
|
||||
|
||||
fUint64Cutoff = (1<<64-1)/10 + 1
|
||||
// fUint32Cutoff = (1<<32-1)/10 + 1
|
||||
|
||||
fBase = 10
|
||||
)
|
||||
|
||||
const (
|
||||
thousand = 1000
|
||||
million = thousand * thousand
|
||||
billion = thousand * million
|
||||
trillion = thousand * billion
|
||||
quadrillion = thousand * trillion
|
||||
quintillion = thousand * quadrillion
|
||||
)
|
||||
|
||||
// Exact powers of 10.
|
||||
var uint64pow10 = [...]uint64{
|
||||
1, 10, 100,
|
||||
1 * thousand, 10 * thousand, 100 * thousand,
|
||||
1 * million, 10 * million, 100 * million,
|
||||
1 * billion, 10 * billion, 100 * billion,
|
||||
1 * trillion, 10 * trillion, 100 * trillion,
|
||||
1 * quadrillion, 10 * quadrillion, 100 * quadrillion,
|
||||
1 * quintillion, 10 * quintillion,
|
||||
}
|
||||
var float64pow10 = [...]float64{
|
||||
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
|
||||
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
|
||||
1e20, 1e21, 1e22,
|
||||
}
|
||||
var float32pow10 = [...]float32{
|
||||
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10,
|
||||
}
|
||||
|
||||
type floatinfo struct {
|
||||
mantbits uint8
|
||||
|
||||
// expbits uint8 // (unused)
|
||||
// bias int16 // (unused)
|
||||
// is32bit bool // (unused)
|
||||
|
||||
exactPow10 int8 // Exact powers of ten are <= 10^N (32: 10, 64: 22)
|
||||
|
||||
exactInts int8 // Exact integers are <= 10^N (for non-float, set to 0)
|
||||
|
||||
// maxMantDigits int8 // 10^19 fits in uint64, while 10^9 fits in uint32
|
||||
|
||||
mantCutoffIsUint64Cutoff bool
|
||||
|
||||
mantCutoff uint64
|
||||
}
|
||||
|
||||
var fi32 = floatinfo{23, 10, 7, false, 1<<23 - 1}
|
||||
var fi64 = floatinfo{52, 22, 15, false, 1<<52 - 1}
|
||||
|
||||
var fi64u = floatinfo{0, 19, 0, true, fUint64Cutoff}
|
||||
|
||||
func noFrac64(fbits uint64) bool {
|
||||
if fbits == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
exp := uint64(fbits>>52)&0x7FF - 1023 // uint(x>>shift)&mask - bias
|
||||
// clear top 12+e bits, the integer part; if the rest is 0, then no fraction.
|
||||
return exp < 52 && fbits<<(12+exp) == 0 // means there's no fractional part
|
||||
}
|
||||
|
||||
func noFrac32(fbits uint32) bool {
|
||||
if fbits == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
exp := uint32(fbits>>23)&0xFF - 127 // uint(x>>shift)&mask - bias
|
||||
// clear top 9+e bits, the integer part; if the rest is 0, then no fraction.
|
||||
return exp < 23 && fbits<<(9+exp) == 0 // means there's no fractional part
|
||||
}
|
||||
|
||||
func strconvParseErr(b []byte, fn string) error {
|
||||
return &strconv.NumError{
|
||||
Func: fn,
|
||||
Err: strconv.ErrSyntax,
|
||||
Num: string(b),
|
||||
}
|
||||
}
|
||||
|
||||
func parseFloat32_reader(r readFloatResult) (f float32, fail bool) {
|
||||
f = float32(r.mantissa)
|
||||
if r.exp == 0 {
|
||||
} else if r.exp < 0 { // int / 10^k
|
||||
f /= float32pow10[uint8(-r.exp)]
|
||||
} else { // exp > 0
|
||||
if r.exp > fi32.exactPow10 {
|
||||
f *= float32pow10[r.exp-fi32.exactPow10]
|
||||
if f > fMaxMultiplierForExactPow10_32 { // exponent too large - outside range
|
||||
fail = true
|
||||
return // ok = false
|
||||
}
|
||||
f *= float32pow10[fi32.exactPow10]
|
||||
} else {
|
||||
f *= float32pow10[uint8(r.exp)]
|
||||
}
|
||||
}
|
||||
if r.neg {
|
||||
f = -f
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseFloat32_custom(b []byte) (f float32, err error) {
|
||||
r := readFloat(b, fi32)
|
||||
if r.bad {
|
||||
return 0, strconvParseErr(b, "ParseFloat")
|
||||
}
|
||||
if r.ok {
|
||||
f, r.bad = parseFloat32_reader(r)
|
||||
if !r.bad {
|
||||
return
|
||||
}
|
||||
}
|
||||
return parseFloat32_strconv(b)
|
||||
}
|
||||
|
||||
func parseFloat64_reader(r readFloatResult) (f float64, fail bool) {
|
||||
f = float64(r.mantissa)
|
||||
if r.exp == 0 {
|
||||
} else if r.exp < 0 { // int / 10^k
|
||||
f /= float64pow10[-uint8(r.exp)]
|
||||
} else { // exp > 0
|
||||
if r.exp > fi64.exactPow10 {
|
||||
f *= float64pow10[r.exp-fi64.exactPow10]
|
||||
if f > fMaxMultiplierForExactPow10_64 { // exponent too large - outside range
|
||||
fail = true
|
||||
return
|
||||
}
|
||||
f *= float64pow10[fi64.exactPow10]
|
||||
} else {
|
||||
f *= float64pow10[uint8(r.exp)]
|
||||
}
|
||||
}
|
||||
if r.neg {
|
||||
f = -f
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseFloat64_custom(b []byte) (f float64, err error) {
|
||||
r := readFloat(b, fi64)
|
||||
if r.bad {
|
||||
return 0, strconvParseErr(b, "ParseFloat")
|
||||
}
|
||||
if r.ok {
|
||||
f, r.bad = parseFloat64_reader(r)
|
||||
if !r.bad {
|
||||
return
|
||||
}
|
||||
}
|
||||
return parseFloat64_strconv(b)
|
||||
}
|
||||
|
||||
func parseUint64_simple(b []byte) (n uint64, ok bool) {
|
||||
if len(b) > 1 && b[0] == '0' { // punt on numbers with leading zeros
|
||||
return
|
||||
}
|
||||
|
||||
var i int
|
||||
var n1 uint64
|
||||
var c uint8
|
||||
LOOP:
|
||||
if i < len(b) {
|
||||
c = b[i]
|
||||
// unsigned integers don't overflow well on multiplication, so check cutoff here
|
||||
// e.g. (maxUint64-5)*10 doesn't overflow well ...
|
||||
// if n >= fUint64Cutoff || !isDigitChar(b[i]) { // if c < '0' || c > '9' {
|
||||
if n >= fUint64Cutoff || c < '0' || c > '9' {
|
||||
return
|
||||
} else if c == '0' {
|
||||
n *= fBase
|
||||
} else {
|
||||
n1 = n
|
||||
n = n*fBase + uint64(c-'0')
|
||||
if n < n1 {
|
||||
return
|
||||
}
|
||||
}
|
||||
i++
|
||||
goto LOOP
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func parseUint64_reader(r readFloatResult) (f uint64, fail bool) {
|
||||
f = r.mantissa
|
||||
if r.exp == 0 {
|
||||
} else if r.exp < 0 { // int / 10^k
|
||||
if f%uint64pow10[uint8(-r.exp)] != 0 {
|
||||
fail = true
|
||||
} else {
|
||||
f /= uint64pow10[uint8(-r.exp)]
|
||||
}
|
||||
} else { // exp > 0
|
||||
f *= uint64pow10[uint8(r.exp)]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parseInteger_bytes(b []byte) (u uint64, neg, ok bool) {
|
||||
if len(b) == 0 {
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
if b[0] == '-' {
|
||||
if len(b) == 1 {
|
||||
return
|
||||
}
|
||||
neg = true
|
||||
b = b[1:]
|
||||
}
|
||||
|
||||
u, ok = parseUint64_simple(b)
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
r := readFloat(b, fi64u)
|
||||
if r.ok {
|
||||
var fail bool
|
||||
u, fail = parseUint64_reader(r)
|
||||
if fail {
|
||||
f, err := parseFloat64(b)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !noFrac64(math.Float64bits(f)) {
|
||||
return
|
||||
}
|
||||
u = uint64(f)
|
||||
}
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// parseNumber will return an integer if only composed of [-]?[0-9]+
|
||||
// Else it will return a float.
|
||||
func parseNumber(b []byte, z *fauxUnion, preferSignedInt bool) (err error) {
|
||||
var ok, neg bool
|
||||
var f uint64
|
||||
|
||||
if len(b) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if b[0] == '-' {
|
||||
neg = true
|
||||
f, ok = parseUint64_simple(b[1:])
|
||||
} else {
|
||||
f, ok = parseUint64_simple(b)
|
||||
}
|
||||
|
||||
if ok {
|
||||
if neg {
|
||||
z.v = valueTypeInt
|
||||
if chkOvf.Uint2Int(f, neg) {
|
||||
return strconvParseErr(b, "ParseInt")
|
||||
}
|
||||
z.i = -int64(f)
|
||||
} else if preferSignedInt {
|
||||
z.v = valueTypeInt
|
||||
if chkOvf.Uint2Int(f, neg) {
|
||||
return strconvParseErr(b, "ParseInt")
|
||||
}
|
||||
z.i = int64(f)
|
||||
} else {
|
||||
z.v = valueTypeUint
|
||||
z.u = f
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
z.v = valueTypeFloat
|
||||
z.f, err = parseFloat64_custom(b)
|
||||
return
|
||||
}
|
||||
|
||||
func readFloat(s []byte, y floatinfo) (r readFloatResult) {
|
||||
var i uint // uint, so that we eliminate bounds checking
|
||||
var slen = uint(len(s))
|
||||
if slen == 0 {
|
||||
// read an empty string as the zero value
|
||||
// r.bad = true
|
||||
r.ok = true
|
||||
return
|
||||
}
|
||||
|
||||
if s[0] == '-' {
|
||||
r.neg = true
|
||||
i++
|
||||
}
|
||||
|
||||
// considered punting early if string has length > maxMantDigits, but doesn't account
|
||||
// for trailing 0's e.g. 700000000000000000000 can be encoded exactly as it is 7e20
|
||||
|
||||
var nd, ndMant, dp int8
|
||||
var sawdot, sawexp bool
|
||||
var xu uint64
|
||||
|
||||
if i+1 < slen && s[i] == '0' {
|
||||
switch s[i+1] {
|
||||
case '.', 'e', 'E':
|
||||
// ok
|
||||
default:
|
||||
r.bad = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
LOOP:
|
||||
for ; i < slen; i++ {
|
||||
switch s[i] {
|
||||
case '.':
|
||||
if sawdot {
|
||||
r.bad = true
|
||||
return
|
||||
}
|
||||
sawdot = true
|
||||
dp = nd
|
||||
case 'e', 'E':
|
||||
sawexp = true
|
||||
break LOOP
|
||||
case '0':
|
||||
if nd == 0 {
|
||||
dp--
|
||||
continue LOOP
|
||||
}
|
||||
nd++
|
||||
if r.mantissa < y.mantCutoff {
|
||||
r.mantissa *= fBase
|
||||
ndMant++
|
||||
}
|
||||
case '1', '2', '3', '4', '5', '6', '7', '8', '9':
|
||||
nd++
|
||||
if y.mantCutoffIsUint64Cutoff && r.mantissa < fUint64Cutoff {
|
||||
r.mantissa *= fBase
|
||||
xu = r.mantissa + uint64(s[i]-'0')
|
||||
if xu < r.mantissa {
|
||||
r.trunc = true
|
||||
return
|
||||
}
|
||||
r.mantissa = xu
|
||||
} else if r.mantissa < y.mantCutoff {
|
||||
// mantissa = (mantissa << 1) + (mantissa << 3) + uint64(c-'0')
|
||||
r.mantissa = r.mantissa*fBase + uint64(s[i]-'0')
|
||||
} else {
|
||||
r.trunc = true
|
||||
return
|
||||
}
|
||||
ndMant++
|
||||
default:
|
||||
r.bad = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !sawdot {
|
||||
dp = nd
|
||||
}
|
||||
|
||||
if sawexp {
|
||||
i++
|
||||
if i < slen {
|
||||
var eneg bool
|
||||
if s[i] == '+' {
|
||||
i++
|
||||
} else if s[i] == '-' {
|
||||
i++
|
||||
eneg = true
|
||||
}
|
||||
if i < slen {
|
||||
// for exact match, exponent is 1 or 2 digits (float64: -22 to 37, float32: -1 to 17).
|
||||
// exit quick if exponent is more than 2 digits.
|
||||
if i+2 < slen {
|
||||
r.hardexp = true
|
||||
return
|
||||
}
|
||||
var e int8
|
||||
if s[i] < '0' || s[i] > '9' { // !isDigitChar(s[i]) { //
|
||||
r.bad = true
|
||||
return
|
||||
}
|
||||
e = int8(s[i] - '0')
|
||||
i++
|
||||
if i < slen {
|
||||
if s[i] < '0' || s[i] > '9' { // !isDigitChar(s[i]) { //
|
||||
r.bad = true
|
||||
return
|
||||
}
|
||||
e = e*fBase + int8(s[i]-'0') // (e << 1) + (e << 3) + int8(s[i]-'0')
|
||||
i++
|
||||
}
|
||||
if eneg {
|
||||
dp -= e
|
||||
} else {
|
||||
dp += e
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r.mantissa != 0 {
|
||||
r.exp = dp - ndMant
|
||||
// do not set ok=true for cases we cannot handle
|
||||
if r.exp < -y.exactPow10 ||
|
||||
r.exp > y.exactInts+y.exactPow10 ||
|
||||
(y.mantbits != 0 && r.mantissa>>y.mantbits != 0) {
|
||||
r.hardexp = true
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
_ = i // no-op
|
||||
r.ok = true
|
||||
return
|
||||
}
|
||||
944
vendor/github.com/ugorji/go/codec/decode.base.go
generated
vendored
Normal file
944
vendor/github.com/ugorji/go/codec/decode.base.go
generated
vendored
Normal file
@@ -0,0 +1,944 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, v := range []interface{}{
|
||||
(*string)(nil),
|
||||
(*bool)(nil),
|
||||
(*int)(nil),
|
||||
(*int8)(nil),
|
||||
(*int16)(nil),
|
||||
(*int32)(nil),
|
||||
(*int64)(nil),
|
||||
(*uint)(nil),
|
||||
(*uint8)(nil),
|
||||
(*uint16)(nil),
|
||||
(*uint32)(nil),
|
||||
(*uint64)(nil),
|
||||
(*uintptr)(nil),
|
||||
(*float32)(nil),
|
||||
(*float64)(nil),
|
||||
(*complex64)(nil),
|
||||
(*complex128)(nil),
|
||||
(*[]byte)(nil),
|
||||
([]byte)(nil),
|
||||
(*time.Time)(nil),
|
||||
(*Raw)(nil),
|
||||
(*interface{})(nil),
|
||||
} {
|
||||
decBuiltinRtids = append(decBuiltinRtids, i2rtid(v))
|
||||
}
|
||||
slices.Sort(decBuiltinRtids)
|
||||
}
|
||||
|
||||
const msgBadDesc = "unrecognized descriptor byte"
|
||||
|
||||
var decBuiltinRtids []uintptr
|
||||
|
||||
// decDriver calls (DecodeBytes and DecodeStringAsBytes) return a state
|
||||
// of the view they return, allowing consumers to handle appropriately.
|
||||
//
|
||||
// sequencing of this is intentional:
|
||||
// - mutable if <= dBytesAttachBuffer (buf | view | invalid)
|
||||
// - noCopy if >= dBytesAttachViewZerocopy
|
||||
type dBytesAttachState uint8
|
||||
|
||||
const (
|
||||
dBytesAttachInvalid dBytesAttachState = iota
|
||||
dBytesAttachView // (bytes && !zerocopy && !buf)
|
||||
dBytesAttachBuffer // (buf)
|
||||
dBytesAttachViewZerocopy // (bytes && zerocopy && !buf)
|
||||
dBytesDetach // (!bytes && !buf)
|
||||
)
|
||||
|
||||
type dBytesIntoState uint8
|
||||
|
||||
const (
|
||||
dBytesIntoNoChange dBytesIntoState = iota
|
||||
dBytesIntoParamOut
|
||||
dBytesIntoParamOutSlice
|
||||
dBytesIntoNew
|
||||
)
|
||||
|
||||
func (x dBytesAttachState) String() string {
|
||||
switch x {
|
||||
case dBytesAttachInvalid:
|
||||
return "invalid"
|
||||
case dBytesAttachView:
|
||||
return "view"
|
||||
case dBytesAttachBuffer:
|
||||
return "buffer"
|
||||
case dBytesAttachViewZerocopy:
|
||||
return "view-zerocopy"
|
||||
case dBytesDetach:
|
||||
return "detach"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
const (
|
||||
decDefMaxDepth = 1024 // maximum depth
|
||||
decDefChanCap = 64 // should be large, as cap cannot be expanded
|
||||
decScratchByteArrayLen = (4 + 3) * 8 // around cacheLineSize ie ~64, depending on Decoder size
|
||||
|
||||
// MARKER: massage decScratchByteArrayLen to ensure xxxDecDriver structs fit within cacheLine*N
|
||||
|
||||
// decFailNonEmptyIntf configures whether we error
|
||||
// when decoding naked into a non-empty interface.
|
||||
//
|
||||
// Typically, we cannot decode non-nil stream value into
|
||||
// nil interface with methods (e.g. io.Reader).
|
||||
// However, in some scenarios, this should be allowed:
|
||||
// - MapType
|
||||
// - SliceType
|
||||
// - Extensions
|
||||
//
|
||||
// Consequently, we should relax this. Put it behind a const flag for now.
|
||||
decFailNonEmptyIntf = false
|
||||
|
||||
// decUseTransient says whether we should use the transient optimization.
|
||||
//
|
||||
// There's potential for GC corruption or memory overwrites if transient isn't
|
||||
// used carefully, so this flag helps turn it off quickly if needed.
|
||||
//
|
||||
// Use it everywhere needed so we can completely remove unused code blocks.
|
||||
decUseTransient = true
|
||||
)
|
||||
|
||||
var (
|
||||
errNeedMapOrArrayDecodeToStruct = errors.New("only encoded map or array can decode into struct")
|
||||
errCannotDecodeIntoNil = errors.New("cannot decode into nil")
|
||||
|
||||
errExpandSliceCannotChange = errors.New("expand slice: cannot change")
|
||||
|
||||
errDecoderNotInitialized = errors.New("Decoder not initialized")
|
||||
|
||||
errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read")
|
||||
errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read")
|
||||
errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown")
|
||||
errMaxDepthExceeded = errors.New("maximum decoding depth exceeded")
|
||||
)
|
||||
|
||||
type decNotDecodeableReason uint8
|
||||
|
||||
const (
|
||||
decNotDecodeableReasonUnknown decNotDecodeableReason = iota
|
||||
decNotDecodeableReasonBadKind
|
||||
decNotDecodeableReasonNonAddrValue
|
||||
decNotDecodeableReasonNilReference
|
||||
)
|
||||
|
||||
type decDriverI interface {
|
||||
|
||||
// this will check if the next token is a break.
|
||||
CheckBreak() bool
|
||||
|
||||
// TryNil tries to decode as nil.
|
||||
// If a nil is in the stream, it consumes it and returns true.
|
||||
//
|
||||
// Note: if TryNil returns true, that must be handled.
|
||||
TryNil() bool
|
||||
|
||||
// ContainerType returns one of: Bytes, String, Nil, Slice or Map.
|
||||
//
|
||||
// Return unSet if not known.
|
||||
//
|
||||
// Note: Implementations MUST fully consume sentinel container types, specifically Nil.
|
||||
ContainerType() (vt valueType)
|
||||
|
||||
// DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt.
|
||||
// For maps and arrays, it will not do the decoding in-band, but will signal
|
||||
// the decoder, so that is done later, by setting the fauxUnion.valueType field.
|
||||
//
|
||||
// Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types).
|
||||
// for extensions, DecodeNaked must read the tag and the []byte if it exists.
|
||||
// if the []byte is not read, then kInterfaceNaked will treat it as a Handle
|
||||
// that stores the subsequent value in-band, and complete reading the RawExt.
|
||||
//
|
||||
// extensions should also use readx to decode them, for efficiency.
|
||||
// kInterface will extract the detached byte slice if it has to pass it outside its realm.
|
||||
DecodeNaked()
|
||||
|
||||
DecodeInt64() (i int64)
|
||||
DecodeUint64() (ui uint64)
|
||||
|
||||
DecodeFloat32() (f float32)
|
||||
DecodeFloat64() (f float64)
|
||||
|
||||
DecodeBool() (b bool)
|
||||
|
||||
// DecodeStringAsBytes returns the bytes representing a string.
|
||||
// It will return a view into scratch buffer or input []byte (if applicable).
|
||||
//
|
||||
// Note: This can also decode symbols, if supported.
|
||||
//
|
||||
// Users should consume it right away and not store it for later use.
|
||||
DecodeStringAsBytes() (v []byte, state dBytesAttachState)
|
||||
|
||||
// DecodeBytes returns the bytes representing a binary value.
|
||||
// It will return a view into scratch buffer or input []byte (if applicable).
|
||||
DecodeBytes() (out []byte, state dBytesAttachState)
|
||||
// DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte)
|
||||
|
||||
// DecodeExt will decode into an extension.
|
||||
// ext is never nil.
|
||||
DecodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext)
|
||||
// decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte)
|
||||
|
||||
// DecodeRawExt will decode into a *RawExt
|
||||
DecodeRawExt(re *RawExt)
|
||||
|
||||
DecodeTime() (t time.Time)
|
||||
|
||||
// ReadArrayStart will return the length of the array.
|
||||
// If the format doesn't prefix the length, it returns containerLenUnknown.
|
||||
// If the expected array was a nil in the stream, it returns containerLenNil.
|
||||
ReadArrayStart() int
|
||||
|
||||
// ReadMapStart will return the length of the array.
|
||||
// If the format doesn't prefix the length, it returns containerLenUnknown.
|
||||
// If the expected array was a nil in the stream, it returns containerLenNil.
|
||||
ReadMapStart() int
|
||||
|
||||
decDriverContainerTracker
|
||||
|
||||
reset()
|
||||
|
||||
// atEndOfDecode()
|
||||
|
||||
// nextValueBytes will return the bytes representing the next value in the stream.
|
||||
// It generally will include the last byte read, as that is a part of the next value
|
||||
// in the stream.
|
||||
nextValueBytes() []byte
|
||||
|
||||
// descBd will describe the token descriptor that signifies what type was decoded
|
||||
descBd() string
|
||||
|
||||
// isBytes() bool
|
||||
|
||||
resetInBytes(in []byte)
|
||||
resetInIO(r io.Reader)
|
||||
|
||||
NumBytesRead() int
|
||||
|
||||
init(h Handle, shared *decoderBase, dec decoderI) (fp interface{})
|
||||
|
||||
// driverStateManager
|
||||
decNegintPosintFloatNumber
|
||||
}
|
||||
|
||||
type decInit2er struct{}
|
||||
|
||||
func (decInit2er) init2(dec decoderI) {}
|
||||
|
||||
type decDriverContainerTracker interface {
|
||||
ReadArrayElem(firstTime bool)
|
||||
ReadMapElemKey(firstTime bool)
|
||||
ReadMapElemValue()
|
||||
ReadArrayEnd()
|
||||
ReadMapEnd()
|
||||
}
|
||||
|
||||
type decNegintPosintFloatNumber interface {
|
||||
decInteger() (ui uint64, neg, ok bool)
|
||||
decFloat() (f float64, ok bool)
|
||||
}
|
||||
|
||||
type decDriverNoopNumberHelper struct{}
|
||||
|
||||
func (x decDriverNoopNumberHelper) decInteger() (ui uint64, neg, ok bool) {
|
||||
panic("decInteger unsupported")
|
||||
}
|
||||
func (x decDriverNoopNumberHelper) decFloat() (f float64, ok bool) { panic("decFloat unsupported") }
|
||||
|
||||
type decDriverNoopContainerReader struct{}
|
||||
|
||||
func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { panic("ReadArrayStart unsupported") }
|
||||
func (x decDriverNoopContainerReader) ReadMapStart() (v int) { panic("ReadMapStart unsupported") }
|
||||
func (x decDriverNoopContainerReader) ReadArrayEnd() {}
|
||||
func (x decDriverNoopContainerReader) ReadMapEnd() {}
|
||||
func (x decDriverNoopContainerReader) ReadArrayElem(firstTime bool) {}
|
||||
func (x decDriverNoopContainerReader) ReadMapElemKey(firstTime bool) {}
|
||||
func (x decDriverNoopContainerReader) ReadMapElemValue() {}
|
||||
func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return }
|
||||
|
||||
// ----
|
||||
|
||||
type decFnInfo struct {
|
||||
ti *typeInfo
|
||||
xfFn Ext
|
||||
xfTag uint64
|
||||
addrD bool // decoding into a pointer is preferred
|
||||
addrDf bool // force: if addrD, then decode function MUST take a ptr
|
||||
}
|
||||
|
||||
// DecodeOptions captures configuration options during decode.
|
||||
type DecodeOptions struct {
|
||||
// MapType specifies type to use during schema-less decoding of a map in the stream.
|
||||
// If nil (unset), we default to map[string]interface{} iff json handle and MapKeyAsString=true,
|
||||
// else map[interface{}]interface{}.
|
||||
MapType reflect.Type
|
||||
|
||||
// SliceType specifies type to use during schema-less decoding of an array in the stream.
|
||||
// If nil (unset), we default to []interface{} for all formats.
|
||||
SliceType reflect.Type
|
||||
|
||||
// MaxInitLen defines the maxinum initial length that we "make" a collection
|
||||
// (string, slice, map, chan). If 0 or negative, we default to a sensible value
|
||||
// based on the size of an element in the collection.
|
||||
//
|
||||
// For example, when decoding, a stream may say that it has 2^64 elements.
|
||||
// We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash.
|
||||
// Instead, we provision up to MaxInitLen, fill that up, and start appending after that.
|
||||
MaxInitLen int
|
||||
|
||||
// ReaderBufferSize is the size of the buffer used when reading.
|
||||
//
|
||||
// if > 0, we use a smart buffer internally for performance purposes.
|
||||
ReaderBufferSize int
|
||||
|
||||
// MaxDepth defines the maximum depth when decoding nested
|
||||
// maps and slices. If 0 or negative, we default to a suitably large number (currently 1024).
|
||||
MaxDepth int16
|
||||
|
||||
// If ErrorIfNoField, return an error when decoding a map
|
||||
// from a codec stream into a struct, and no matching struct field is found.
|
||||
ErrorIfNoField bool
|
||||
|
||||
// If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded.
|
||||
// For example, the stream contains an array of 8 items, but you are decoding into a [4]T array,
|
||||
// or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set).
|
||||
ErrorIfNoArrayExpand bool
|
||||
|
||||
// If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64).
|
||||
SignedInteger bool
|
||||
|
||||
// MapValueReset controls how we decode into a map value.
|
||||
//
|
||||
// By default, we MAY retrieve the mapping for a key, and then decode into that.
|
||||
// However, especially with big maps, that retrieval may be expensive and unnecessary
|
||||
// if the stream already contains all that is necessary to recreate the value.
|
||||
//
|
||||
// If true, we will never retrieve the previous mapping,
|
||||
// but rather decode into a new value and set that in the map.
|
||||
//
|
||||
// If false, we will retrieve the previous mapping if necessary e.g.
|
||||
// the previous mapping is a pointer, or is a struct or array with pre-set state,
|
||||
// or is an interface.
|
||||
MapValueReset bool
|
||||
|
||||
// SliceElementReset: on decoding a slice, reset the element to a zero value first.
|
||||
//
|
||||
// concern: if the slice already contained some garbage, we will decode into that garbage.
|
||||
SliceElementReset bool
|
||||
|
||||
// InterfaceReset controls how we decode into an interface.
|
||||
//
|
||||
// By default, when we see a field that is an interface{...},
|
||||
// or a map with interface{...} value, we will attempt decoding into the
|
||||
// "contained" value.
|
||||
//
|
||||
// However, this prevents us from reading a string into an interface{}
|
||||
// that formerly contained a number.
|
||||
//
|
||||
// If true, we will decode into a new "blank" value, and set that in the interface.
|
||||
// If false, we will decode into whatever is contained in the interface.
|
||||
InterfaceReset bool
|
||||
|
||||
// InternString controls interning of strings during decoding.
|
||||
//
|
||||
// Some handles, e.g. json, typically will read map keys as strings.
|
||||
// If the set of keys are finite, it may help reduce allocation to
|
||||
// look them up from a map (than to allocate them afresh).
|
||||
//
|
||||
// Note: Handles will be smart when using the intern functionality.
|
||||
// Every string should not be interned.
|
||||
// An excellent use-case for interning is struct field names,
|
||||
// or map keys where key type is string.
|
||||
InternString bool
|
||||
|
||||
// PreferArrayOverSlice controls whether to decode to an array or a slice.
|
||||
//
|
||||
// This only impacts decoding into a nil interface{}.
|
||||
//
|
||||
// Consequently, it has no effect on codecgen.
|
||||
//
|
||||
// *Note*: This only applies if using go1.5 and above,
|
||||
// as it requires reflect.ArrayOf support which was absent before go1.5.
|
||||
PreferArrayOverSlice bool
|
||||
|
||||
// DeleteOnNilMapValue controls how to decode a nil value in the stream.
|
||||
//
|
||||
// If true, we will delete the mapping of the key.
|
||||
// Else, just set the mapping to the zero value of the type.
|
||||
//
|
||||
// Deprecated: This does NOTHING and is left behind for compiling compatibility.
|
||||
// This change is necessitated because 'nil' in a stream now consistently
|
||||
// means the zero value (ie reset the value to its zero state).
|
||||
DeleteOnNilMapValue bool
|
||||
|
||||
// RawToString controls how raw bytes in a stream are decoded into a nil interface{}.
|
||||
// By default, they are decoded as []byte, but can be decoded as string (if configured).
|
||||
RawToString bool
|
||||
|
||||
// ZeroCopy controls whether decoded values of []byte or string type
|
||||
// point into the input []byte parameter passed to a NewDecoderBytes/ResetBytes(...) call.
|
||||
//
|
||||
// To illustrate, if ZeroCopy and decoding from a []byte (not io.Writer),
|
||||
// then a []byte or string in the output result may just be a slice of (point into)
|
||||
// the input bytes.
|
||||
//
|
||||
// This optimization prevents unnecessary copying.
|
||||
//
|
||||
// However, it is made optional, as the caller MUST ensure that the input parameter []byte is
|
||||
// not modified after the Decode() happens, as any changes are mirrored in the decoded result.
|
||||
ZeroCopy bool
|
||||
|
||||
// PreferPointerForStructOrArray controls whether a struct or array
|
||||
// is stored in a nil interface{}, or a pointer to it.
|
||||
//
|
||||
// This mostly impacts when we decode registered extensions.
|
||||
PreferPointerForStructOrArray bool
|
||||
|
||||
// ValidateUnicode controls will cause decoding to fail if an expected unicode
|
||||
// string is well-formed but include invalid codepoints.
|
||||
//
|
||||
// This could have a performance impact.
|
||||
ValidateUnicode bool
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
|
||||
type decoderBase struct {
|
||||
perType decPerType
|
||||
|
||||
h *BasicHandle
|
||||
|
||||
rtidFn, rtidFnNoExt *atomicRtidFnSlice
|
||||
|
||||
buf []byte
|
||||
|
||||
// used for interning strings
|
||||
is internerMap
|
||||
|
||||
err error
|
||||
|
||||
// sd decoderI
|
||||
|
||||
blist bytesFreeList
|
||||
|
||||
mtr bool // is maptype a known type?
|
||||
str bool // is slicetype a known type?
|
||||
jsms bool // is json handle, and MapKeyAsString
|
||||
|
||||
bytes bool // uses a bytes reader
|
||||
bufio bool // uses a ioDecReader with buffer size > 0
|
||||
|
||||
// ---- cpu cache line boundary?
|
||||
// ---- writable fields during execution --- *try* to keep in sep cache line
|
||||
maxdepth int16
|
||||
depth int16
|
||||
|
||||
// Extensions can call Decode() within a current Decode() call.
|
||||
// We need to know when the top level Decode() call returns,
|
||||
// so we can decide whether to Release() or not.
|
||||
calls uint16 // what depth in mustDecode are we in now.
|
||||
|
||||
c containerState
|
||||
|
||||
// decByteState
|
||||
|
||||
n fauxUnion
|
||||
|
||||
// b is an always-available scratch buffer used by Decoder and decDrivers.
|
||||
// By being always-available, it can be used for one-off things without
|
||||
// having to get from freelist, use, and return back to freelist.
|
||||
//
|
||||
// Use it for a narrow set of things e.g.
|
||||
// - binc uses it for parsing numbers, represented at 8 or less bytes
|
||||
// - uses as potential buffer for struct field names
|
||||
b [decScratchByteArrayLen]byte
|
||||
|
||||
hh Handle
|
||||
// cache the mapTypeId and sliceTypeId for faster comparisons
|
||||
mtid uintptr
|
||||
stid uintptr
|
||||
}
|
||||
|
||||
func (d *decoderBase) maxInitLen() uint {
|
||||
return uint(max(1024, d.h.MaxInitLen))
|
||||
}
|
||||
|
||||
func (d *decoderBase) naked() *fauxUnion {
|
||||
return &d.n
|
||||
}
|
||||
|
||||
func (d *decoderBase) fauxUnionReadRawBytes(dr decDriverI, asString, rawToString bool) { //, handleZeroCopy bool) {
|
||||
// fauxUnion is only used within DecodeNaked calls; consequently, we should try to intern.
|
||||
d.n.l, d.n.a = dr.DecodeBytes()
|
||||
if asString || rawToString {
|
||||
d.n.v = valueTypeString
|
||||
d.n.s = d.detach2Str(d.n.l, d.n.a)
|
||||
} else {
|
||||
d.n.v = valueTypeBytes
|
||||
d.n.l = d.detach2Bytes(d.n.l, d.n.a)
|
||||
}
|
||||
}
|
||||
|
||||
// Return a fixed (detached) string representation of a []byte.
|
||||
//
|
||||
// Possibly get an interned version of a string,
|
||||
// iff InternString=true and decoding a map key.
|
||||
//
|
||||
// This should mostly be used for map keys, struct field names, etc
|
||||
// where the key type is string. This is because keys of a map/struct are
|
||||
// typically reused across many objects.
|
||||
func (d *decoderBase) detach2Str(v []byte, state dBytesAttachState) (s string) {
|
||||
// note: string([]byte) checks - and optimizes - for len 0 and len 1
|
||||
if len(v) <= 1 {
|
||||
s = string(v)
|
||||
} else if state >= dBytesAttachViewZerocopy { // !scratchBuf && d.bytes && d.h.ZeroCopy
|
||||
s = stringView(v)
|
||||
} else if d.is == nil || d.c != containerMapKey || len(v) > internMaxStrLen {
|
||||
s = string(v)
|
||||
} else {
|
||||
s = d.is.string(v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decoderBase) usableStructFieldNameBytes(buf, v []byte, state dBytesAttachState) (out []byte) {
|
||||
// In JSON, mapElemValue reads a colon and spaces.
|
||||
// In bufio mode of ioDecReader, fillbuf could overwrite the read buffer
|
||||
// which readXXX() calls return sub-slices from.
|
||||
//
|
||||
// Consequently, we detach the bytes in this special case.
|
||||
//
|
||||
// Note: ioDecReader (non-bufio) and bytesDecReader do not have
|
||||
// this issue (as no fillbuf exists where bytes might be returned from).
|
||||
if d.bufio && d.h.jsonHandle && state < dBytesAttachViewZerocopy {
|
||||
if cap(buf) > len(v) {
|
||||
out = buf[:len(v)]
|
||||
} else if len(d.b) > len(v) {
|
||||
out = d.b[:len(v)]
|
||||
} else {
|
||||
out = make([]byte, len(v), max(64, len(v)))
|
||||
}
|
||||
copy(out, v)
|
||||
return
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (d *decoderBase) detach2Bytes(in []byte, state dBytesAttachState) (out []byte) {
|
||||
if cap(in) == 0 || state >= dBytesAttachViewZerocopy {
|
||||
return in
|
||||
}
|
||||
if len(in) == 0 {
|
||||
return zeroByteSlice
|
||||
}
|
||||
out = make([]byte, len(in))
|
||||
copy(out, in)
|
||||
return out
|
||||
}
|
||||
|
||||
func (d *decoderBase) attachState(usingBufFromReader bool) (r dBytesAttachState) {
|
||||
if usingBufFromReader {
|
||||
r = dBytesAttachBuffer
|
||||
} else if !d.bytes {
|
||||
r = dBytesDetach
|
||||
} else if d.h.ZeroCopy {
|
||||
r = dBytesAttachViewZerocopy
|
||||
} else {
|
||||
r = dBytesAttachView
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decoderBase) mapStart(v int) int {
|
||||
if v != containerLenNil {
|
||||
d.depthIncr()
|
||||
d.c = containerMapStart
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (d *decoderBase) HandleName() string {
|
||||
return d.hh.Name()
|
||||
}
|
||||
|
||||
func (d *decoderBase) isBytes() bool {
|
||||
return d.bytes
|
||||
}
|
||||
|
||||
type decoderI interface {
|
||||
Decode(v interface{}) (err error)
|
||||
HandleName() string
|
||||
MustDecode(v interface{})
|
||||
NumBytesRead() int
|
||||
Release() // deprecated
|
||||
Reset(r io.Reader)
|
||||
ResetBytes(in []byte)
|
||||
ResetString(s string)
|
||||
|
||||
isBytes() bool
|
||||
wrapErr(v error, err *error)
|
||||
swallow()
|
||||
|
||||
nextValueBytes() []byte // wrapper method, for use in tests
|
||||
// getDecDriver() decDriverI
|
||||
|
||||
decode(v interface{})
|
||||
decodeAs(v interface{}, t reflect.Type, ext bool)
|
||||
|
||||
interfaceExtConvertAndDecode(v interface{}, ext InterfaceExt)
|
||||
}
|
||||
|
||||
var errDecNoResetBytesWithReader = errors.New("cannot reset an Decoder reading from []byte with a io.Reader")
|
||||
var errDecNoResetReaderWithBytes = errors.New("cannot reset an Decoder reading from io.Reader with a []byte")
|
||||
|
||||
func setZero(iv interface{}) {
|
||||
rv, isnil := isNil(iv, false)
|
||||
if isnil {
|
||||
return
|
||||
}
|
||||
if !rv.IsValid() {
|
||||
rv = reflect.ValueOf(iv)
|
||||
}
|
||||
if isnilBitset.isset(byte(rv.Kind())) && rvIsNil(rv) {
|
||||
return
|
||||
}
|
||||
// var canDecode bool
|
||||
switch v := iv.(type) {
|
||||
case *string:
|
||||
*v = ""
|
||||
case *bool:
|
||||
*v = false
|
||||
case *int:
|
||||
*v = 0
|
||||
case *int8:
|
||||
*v = 0
|
||||
case *int16:
|
||||
*v = 0
|
||||
case *int32:
|
||||
*v = 0
|
||||
case *int64:
|
||||
*v = 0
|
||||
case *uint:
|
||||
*v = 0
|
||||
case *uint8:
|
||||
*v = 0
|
||||
case *uint16:
|
||||
*v = 0
|
||||
case *uint32:
|
||||
*v = 0
|
||||
case *uint64:
|
||||
*v = 0
|
||||
case *float32:
|
||||
*v = 0
|
||||
case *float64:
|
||||
*v = 0
|
||||
case *complex64:
|
||||
*v = 0
|
||||
case *complex128:
|
||||
*v = 0
|
||||
case *[]byte:
|
||||
*v = nil
|
||||
case *Raw:
|
||||
*v = nil
|
||||
case *time.Time:
|
||||
*v = time.Time{}
|
||||
case reflect.Value:
|
||||
decSetNonNilRV2Zero(v)
|
||||
default:
|
||||
if !fastpathDecodeSetZeroTypeSwitch(iv) {
|
||||
decSetNonNilRV2Zero(rv)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// decSetNonNilRV2Zero will set the non-nil value to its zero value.
|
||||
func decSetNonNilRV2Zero(v reflect.Value) {
|
||||
// If not decodeable (settable), we do not touch it.
|
||||
// We considered empty'ing it if not decodeable e.g.
|
||||
// - if chan, drain it
|
||||
// - if map, clear it
|
||||
// - if slice or array, zero all elements up to len
|
||||
//
|
||||
// However, we decided instead that we either will set the
|
||||
// whole value to the zero value, or leave AS IS.
|
||||
|
||||
k := v.Kind()
|
||||
if k == reflect.Interface {
|
||||
decSetNonNilRV2Zero4Intf(v)
|
||||
} else if k == reflect.Ptr {
|
||||
decSetNonNilRV2Zero4Ptr(v)
|
||||
} else if v.CanSet() {
|
||||
rvSetDirectZero(v)
|
||||
}
|
||||
}
|
||||
|
||||
func decSetNonNilRV2Zero4Ptr(v reflect.Value) {
|
||||
ve := v.Elem()
|
||||
if ve.CanSet() {
|
||||
rvSetZero(ve) // we can have a pointer to an interface
|
||||
} else if v.CanSet() {
|
||||
rvSetZero(v)
|
||||
}
|
||||
}
|
||||
|
||||
func decSetNonNilRV2Zero4Intf(v reflect.Value) {
|
||||
ve := v.Elem()
|
||||
if ve.CanSet() {
|
||||
rvSetDirectZero(ve) // interfaces always have element as a non-interface
|
||||
} else if v.CanSet() {
|
||||
rvSetZero(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoderBase) arrayCannotExpand(sliceLen, streamLen int) {
|
||||
if d.h.ErrorIfNoArrayExpand {
|
||||
halt.errorf("cannot expand array len during decode from %v to %v", any(sliceLen), any(streamLen))
|
||||
}
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func (d *decoderBase) haltAsNotDecodeable(rv reflect.Value) {
|
||||
if !rv.IsValid() {
|
||||
halt.onerror(errCannotDecodeIntoNil)
|
||||
}
|
||||
// check if an interface can be retrieved, before grabbing an interface
|
||||
if !rv.CanInterface() {
|
||||
halt.errorf("cannot decode into a value without an interface: %v", rv)
|
||||
}
|
||||
halt.errorf("cannot decode into value of kind: %v, %#v", rv.Kind(), rv2i(rv))
|
||||
}
|
||||
|
||||
func (d *decoderBase) depthIncr() {
|
||||
d.depth++
|
||||
if d.depth >= d.maxdepth {
|
||||
halt.onerror(errMaxDepthExceeded)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoderBase) depthDecr() {
|
||||
d.depth--
|
||||
}
|
||||
|
||||
func (d *decoderBase) arrayStart(v int) int {
|
||||
if v != containerLenNil {
|
||||
d.depthIncr()
|
||||
d.c = containerArrayStart
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (d *decoderBase) oneShotAddrRV(rvt reflect.Type, rvk reflect.Kind) reflect.Value {
|
||||
// MARKER 2025: is this slow for calling oneShot?
|
||||
if decUseTransient && d.h.getTypeInfo4RT(baseRT(rvt)).flagCanTransient {
|
||||
return d.perType.TransientAddrK(rvt, rvk)
|
||||
}
|
||||
return rvZeroAddrK(rvt, rvk)
|
||||
}
|
||||
|
||||
// decNegintPosintFloatNumberHelper is used for formats that are binary
|
||||
// and have distinct ways of storing positive integers vs negative integers
|
||||
// vs floats, which are uniquely identified by the byte descriptor.
|
||||
//
|
||||
// Currently, these formats are binc, cbor and simple.
|
||||
type decNegintPosintFloatNumberHelper struct {
|
||||
d decDriverI
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) uint64(ui uint64, neg, ok bool) uint64 {
|
||||
if ok && !neg {
|
||||
return ui
|
||||
}
|
||||
return x.uint64TryFloat(ok)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) uint64TryFloat(neg bool) (ui uint64) {
|
||||
if neg { // neg = true
|
||||
halt.errorStr("assigning negative signed value to unsigned type")
|
||||
}
|
||||
f, ok := x.d.decFloat()
|
||||
if !(ok && f >= 0 && noFrac64(math.Float64bits(f))) {
|
||||
halt.errorStr2("invalid number loading uint64, with descriptor: ", x.d.descBd())
|
||||
}
|
||||
return uint64(f)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) int64(ui uint64, neg, ok, cbor bool) (i int64) {
|
||||
if ok {
|
||||
return decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor)
|
||||
}
|
||||
// return x.int64TryFloat()
|
||||
// }
|
||||
// func (x decNegintPosintFloatNumberHelper) int64TryFloat() (i int64) {
|
||||
f, ok := x.d.decFloat()
|
||||
if !(ok && noFrac64(math.Float64bits(f))) {
|
||||
halt.errorf("invalid number loading uint64 (%v), with descriptor: %s", f, x.d.descBd())
|
||||
}
|
||||
return int64(f)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) float64(f float64, ok, cbor bool) float64 {
|
||||
if ok {
|
||||
return f
|
||||
}
|
||||
return x.float64TryInteger(cbor)
|
||||
}
|
||||
|
||||
func (x decNegintPosintFloatNumberHelper) float64TryInteger(cbor bool) float64 {
|
||||
ui, neg, ok := x.d.decInteger()
|
||||
if !ok {
|
||||
halt.errorStr2("invalid descriptor for float: ", x.d.descBd())
|
||||
}
|
||||
return float64(decNegintPosintFloatNumberHelperInt64v(ui, neg, cbor))
|
||||
}
|
||||
|
||||
func decNegintPosintFloatNumberHelperInt64v(ui uint64, neg, incrIfNeg bool) (i int64) {
|
||||
if neg && incrIfNeg {
|
||||
ui++
|
||||
}
|
||||
i = chkOvf.SignedIntV(ui)
|
||||
if neg {
|
||||
i = -i
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isDecodeable checks if value can be decoded into
|
||||
//
|
||||
// decode can take any reflect.Value that is a inherently addressable i.e.
|
||||
// - non-nil chan (we will SEND to it)
|
||||
// - non-nil slice (we will set its elements)
|
||||
// - non-nil map (we will put into it)
|
||||
// - non-nil pointer (we can "update" it)
|
||||
// - func: no
|
||||
// - interface: no
|
||||
// - array: if canAddr=true
|
||||
// - any other value pointer: if canAddr=true
|
||||
func isDecodeable(rv reflect.Value) (canDecode bool, reason decNotDecodeableReason) {
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.Chan, reflect.Map:
|
||||
canDecode = !rvIsNil(rv)
|
||||
reason = decNotDecodeableReasonNilReference
|
||||
case reflect.Func, reflect.Interface, reflect.Invalid, reflect.UnsafePointer:
|
||||
reason = decNotDecodeableReasonBadKind
|
||||
default:
|
||||
canDecode = rv.CanAddr()
|
||||
reason = decNotDecodeableReasonNonAddrValue
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// decInferLen will infer a sensible length, given the following:
|
||||
// - clen: length wanted.
|
||||
// - maxlen: max length to be returned.
|
||||
// if <= 0, it is unset, and we infer it based on the unit size
|
||||
// - unit: number of bytes for each element of the collection
|
||||
func decInferLen(clen int, maxlen, unit uint) (n uint) {
|
||||
// anecdotal testing showed increase in allocation with map length of 16.
|
||||
// We saw same typical alloc from 0-8, then a 20% increase at 16.
|
||||
// Thus, we set it to 8.
|
||||
|
||||
const (
|
||||
minLenIfUnset = 8
|
||||
maxMem = 1024 * 1024 // 1 MB Memory
|
||||
)
|
||||
|
||||
// handle when maxlen is not set i.e. <= 0
|
||||
|
||||
// clen==0: use 0
|
||||
// maxlen<=0, clen<0: use default
|
||||
// maxlen> 0, clen<0: use default
|
||||
// maxlen<=0, clen>0: infer maxlen, and cap on it
|
||||
// maxlen> 0, clen>0: cap at maxlen
|
||||
|
||||
if clen == 0 || clen == containerLenNil {
|
||||
return 0
|
||||
}
|
||||
if clen < 0 {
|
||||
// if unspecified, return 64 for bytes, ... 8 for uint64, ... and everything else
|
||||
return max(64/unit, minLenIfUnset)
|
||||
}
|
||||
if unit == 0 {
|
||||
return uint(clen)
|
||||
}
|
||||
if maxlen == 0 {
|
||||
maxlen = maxMem / unit
|
||||
}
|
||||
return min(uint(clen), maxlen)
|
||||
}
|
||||
|
||||
type Decoder struct {
|
||||
decoderI
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader.
|
||||
//
|
||||
// For efficiency, Users are encouraged to configure ReaderBufferSize on the handle
|
||||
// OR pass in a memory buffered reader (eg bufio.Reader, bytes.Buffer).
|
||||
func NewDecoder(r io.Reader, h Handle) *Decoder {
|
||||
return &Decoder{h.newDecoder(r)}
|
||||
}
|
||||
|
||||
// NewDecoderBytes returns a Decoder which efficiently decodes directly
|
||||
// from a byte slice with zero copying.
|
||||
func NewDecoderBytes(in []byte, h Handle) *Decoder {
|
||||
return &Decoder{h.newDecoderBytes(in)}
|
||||
}
|
||||
|
||||
// NewDecoderString returns a Decoder which efficiently decodes directly
|
||||
// from a string with zero copying.
|
||||
//
|
||||
// It is a convenience function that calls NewDecoderBytes with a
|
||||
// []byte view into the string.
|
||||
//
|
||||
// This can be an efficient zero-copy if using default mode i.e. without codec.safe tag.
|
||||
func NewDecoderString(s string, h Handle) *Decoder {
|
||||
return NewDecoderBytes(bytesView(s), h)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
func sideDecode(h Handle, p *sync.Pool, fn func(decoderI)) {
|
||||
var s decoderI
|
||||
if usePoolForSideDecode {
|
||||
s = p.Get().(decoderI)
|
||||
defer p.Put(s)
|
||||
} else {
|
||||
// initialization cycle error
|
||||
// s = NewDecoderBytes(nil, h).decoderI
|
||||
s = p.New().(decoderI)
|
||||
}
|
||||
fn(s)
|
||||
}
|
||||
|
||||
func oneOffDecode(sd decoderI, v interface{}, in []byte, basetype reflect.Type, ext bool) {
|
||||
sd.ResetBytes(in)
|
||||
sd.decodeAs(v, basetype, ext)
|
||||
// d.sideDecoder(xbs)
|
||||
// d.sideDecode(rv, basetype)
|
||||
}
|
||||
|
||||
func bytesOKdbi(v []byte, _ dBytesIntoState) []byte {
|
||||
return v
|
||||
}
|
||||
|
||||
func bytesOKs(bs []byte, _ dBytesAttachState) []byte {
|
||||
return bs
|
||||
}
|
||||
1961
vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
Normal file
1961
vendor/github.com/ugorji/go/codec/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
330
vendor/github.com/ugorji/go/codec/doc.go
generated
vendored
Normal file
330
vendor/github.com/ugorji/go/codec/doc.go
generated
vendored
Normal file
@@ -0,0 +1,330 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package codec provides a High Performance, Feature-Rich Idiomatic Go
|
||||
codec/encoding library for binc, msgpack, cbor, json.
|
||||
|
||||
Supported Serialization formats are:
|
||||
|
||||
- msgpack: https://github.com/msgpack/msgpack
|
||||
- binc: http://github.com/ugorji/binc
|
||||
- cbor: http://cbor.io http://tools.ietf.org/html/rfc7049
|
||||
- json: http://json.org http://tools.ietf.org/html/rfc7159
|
||||
- simple: (unpublished)
|
||||
|
||||
For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer .
|
||||
|
||||
The idiomatic Go support is as seen in other encoding packages in
|
||||
the standard library (ie json, xml, gob, etc).
|
||||
|
||||
Rich Feature Set includes:
|
||||
|
||||
- Simple but extremely powerful and feature-rich API
|
||||
- Support for go 1.21 and above, selectively using newer APIs for later releases
|
||||
- Excellent code coverage ( ~ 85-90% )
|
||||
- Very High Performance, significantly outperforming libraries for Gob, Json, Bson, etc
|
||||
- Careful selected use of 'unsafe' for targeted performance gains.
|
||||
- 100% safe mode supported, where 'unsafe' is not used at all.
|
||||
- Lock-free (sans mutex) concurrency for scaling to 100's of cores
|
||||
- In-place updates during decode, with option to zero value in maps and slices prior to decode
|
||||
- Coerce types where appropriate e.g. decode an int in the stream into a
|
||||
float, decode numbers from formatted strings, etc
|
||||
- Corner Cases: Overflows, nil maps/slices, nil values in streams are handled correctly
|
||||
- Standard field renaming via tags
|
||||
- Support for omitting empty fields during an encoding
|
||||
- Encoding from any value and decoding into pointer to any value (struct,
|
||||
slice, map, primitives, pointers, interface{}, etc)
|
||||
- Extensions to support efficient encoding/decoding of any named types
|
||||
- Support encoding.(Binary|Text)(M|Unm)arshaler interfaces
|
||||
- Support using existence of `IsZero() bool` to determine if a zero value
|
||||
- Decoding without a schema (into a interface{}). Includes Options to
|
||||
configure what specific map or slice type to use when decoding an encoded
|
||||
list or map into a nil interface{}
|
||||
- Mapping a non-interface type to an interface, so we can decode appropriately
|
||||
into any interface type with a correctly configured non-interface value.
|
||||
- Encode a struct as an array, and decode struct from an array in the data stream
|
||||
- Option to encode struct keys as numbers (instead of strings) (to support
|
||||
structured streams with fields encoded as numeric codes)
|
||||
- Comprehensive support for anonymous fields
|
||||
- Fast (no-reflection) encoding/decoding of common maps and slices
|
||||
- Code-generation for faster performance, supported in go 1.6+
|
||||
- Support binary (e.g. messagepack, cbor) and text (e.g. json) formats
|
||||
- Support indefinite-length formats to enable true streaming (for formats
|
||||
which support it e.g. json, cbor)
|
||||
- Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes.
|
||||
This mostly applies to maps, where iteration order is non-deterministic.
|
||||
- NIL in data stream decoded as zero value
|
||||
- Never silently skip data when decoding. User decides whether to return an
|
||||
error or silently skip data when keys or indexes in the data stream do not
|
||||
map to fields in the struct.
|
||||
- Detect and error when encoding a cyclic reference (instead of stack overflow shutdown)
|
||||
- Encode/Decode from/to chan types (for iterative streaming support)
|
||||
- Drop-in replacement for encoding/json. `json:` key in struct tag supported.
|
||||
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
|
||||
- Handle unique idiosyncrasies of codecs e.g. For messagepack,
|
||||
configure how ambiguities in handling raw bytes are resolved and provide
|
||||
rpc server/client codec to support msgpack-rpc protocol defined at:
|
||||
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||
|
||||
# Supported build tags
|
||||
|
||||
We gain performance by code-generating fast-paths for slices and maps of built-in types,
|
||||
and monomorphizing generic code explicitly so we gain inlining and de-virtualization benefits.
|
||||
|
||||
The results are 20-50% performance improvements over v1.2.
|
||||
|
||||
Building and running is configured using build tags as below.
|
||||
|
||||
At runtime:
|
||||
|
||||
- codec.safe: run in safe mode (not using unsafe optimizations)
|
||||
- codec.notmono: use generics code (bypassing performance-boosting monomorphized code)
|
||||
- codec.notfastpath: skip fast path code for slices and maps of built-in types (number, bool, string, bytes)
|
||||
|
||||
Each of these "runtime" tags have a convenience synonym i.e. safe, notmono, notfastpath.
|
||||
Pls use these mostly during development - use codec.XXX in your go files.
|
||||
|
||||
Build only:
|
||||
|
||||
- codec.build: used to generate fastpath and monomorphization code
|
||||
|
||||
Test only:
|
||||
|
||||
- codec.notmammoth: skip the mammoth generated tests
|
||||
|
||||
# Extension Support
|
||||
|
||||
Users can register a function to handle the encoding or decoding of
|
||||
their custom types.
|
||||
|
||||
There are no restrictions on what the custom type can be. Some examples:
|
||||
|
||||
type BisSet []int
|
||||
type BitSet64 uint64
|
||||
type UUID string
|
||||
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
|
||||
type GifImage struct { ... }
|
||||
|
||||
As an illustration, MyStructWithUnexportedFields would normally be
|
||||
encoded as an empty map because it has no exported fields, while UUID
|
||||
would be encoded as a string. However, with extension support, you can
|
||||
encode any of these however you like.
|
||||
|
||||
There is also seamless support provided for registering an extension (with a tag)
|
||||
but letting the encoding mechanism default to the standard way.
|
||||
|
||||
# Custom Encoding and Decoding
|
||||
|
||||
This package maintains symmetry in the encoding and decoding halfs.
|
||||
We determine how to encode or decode by walking this decision tree
|
||||
|
||||
- is there an extension registered for the type?
|
||||
- is type a codec.Selfer?
|
||||
- is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler?
|
||||
- is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler?
|
||||
- is format text-based, and type an encoding.TextMarshaler and TextUnmarshaler?
|
||||
- else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc
|
||||
|
||||
This symmetry is important to reduce chances of issues happening because the
|
||||
encoding and decoding sides are out of sync e.g. decoded via very specific
|
||||
encoding.TextUnmarshaler but encoded via kind-specific generalized mode.
|
||||
|
||||
Consequently, if a type only defines one-half of the symmetry
|
||||
(e.g. it implements UnmarshalJSON() but not MarshalJSON() ),
|
||||
then that type doesn't satisfy the check and we will continue walking down the
|
||||
decision tree.
|
||||
|
||||
# RPC
|
||||
|
||||
RPC Client and Server Codecs are implemented, so the codecs can be used
|
||||
with the standard net/rpc package.
|
||||
|
||||
# Usage
|
||||
|
||||
The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification.
|
||||
|
||||
The Encoder and Decoder are NOT safe for concurrent use.
|
||||
|
||||
Consequently, the usage model is basically:
|
||||
|
||||
- Create and initialize the Handle before any use.
|
||||
Once created, DO NOT modify it.
|
||||
- Multiple Encoders or Decoders can now use the Handle concurrently.
|
||||
They only read information off the Handle (never write).
|
||||
- However, each Encoder or Decoder MUST not be used concurrently
|
||||
- To re-use an Encoder/Decoder, call Reset(...) on it first.
|
||||
This allows you use state maintained on the Encoder/Decoder.
|
||||
|
||||
Sample usage model:
|
||||
|
||||
// create and configure Handle
|
||||
var (
|
||||
bh codec.BincHandle
|
||||
mh codec.MsgpackHandle
|
||||
ch codec.CborHandle
|
||||
)
|
||||
|
||||
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
|
||||
|
||||
// configure extensions
|
||||
// e.g. for msgpack, define functions and enable Time support for tag 1
|
||||
// mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt)
|
||||
|
||||
// create and use decoder/encoder
|
||||
var (
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
b []byte
|
||||
h = &bh // or mh to use msgpack
|
||||
)
|
||||
|
||||
dec = codec.NewDecoder(r, h)
|
||||
dec = codec.NewDecoderBytes(b, h)
|
||||
err = dec.Decode(&v)
|
||||
|
||||
enc = codec.NewEncoder(w, h)
|
||||
enc = codec.NewEncoderBytes(&b, h)
|
||||
err = enc.Encode(v)
|
||||
|
||||
//RPC Server
|
||||
go func() {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
|
||||
rpc.ServeCodec(rpcCodec)
|
||||
}
|
||||
}()
|
||||
|
||||
//RPC Communication (client side)
|
||||
conn, err = net.Dial("tcp", "localhost:5555")
|
||||
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
|
||||
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
|
||||
client := rpc.NewClientWithCodec(rpcCodec)
|
||||
|
||||
# Running Tests
|
||||
|
||||
To run tests, use the following:
|
||||
|
||||
go test
|
||||
|
||||
To run the full suite of tests, use the following:
|
||||
|
||||
go test -tags codec.alltests -run Suite
|
||||
|
||||
You can run the tag 'codec.safe' to run tests or build in safe mode. e.g.
|
||||
|
||||
go test -tags codec.safe -run Json
|
||||
go test -tags "codec.alltests codec.safe" -run Suite
|
||||
|
||||
You can run the tag 'codec.notmono' to build bypassing the monomorphized code e.g.
|
||||
|
||||
go test -tags codec.notmono -run Json
|
||||
|
||||
Running Benchmarks
|
||||
|
||||
cd bench
|
||||
go test -bench . -benchmem -benchtime 1s
|
||||
|
||||
Please see http://github.com/ugorji/go-codec-bench .
|
||||
|
||||
# Caveats
|
||||
|
||||
Struct fields matching the following are ignored during encoding and decoding
|
||||
- struct tag value set to -
|
||||
- func, complex numbers, unsafe pointers
|
||||
- unexported and not embedded
|
||||
- unexported and embedded and not struct kind
|
||||
- unexported and embedded pointers (from go1.10)
|
||||
|
||||
Every other field in a struct will be encoded/decoded.
|
||||
|
||||
Embedded fields are encoded as if they exist in the top-level struct,
|
||||
with some caveats. See Encode documentation.
|
||||
*/
|
||||
package codec
|
||||
|
||||
/*
|
||||
Generics
|
||||
|
||||
Generics are used across to board to reduce boilerplate, and hopefully
|
||||
improve performance by
|
||||
- reducing need for interface calls (de-virtualization)
|
||||
- resultant inlining of those calls
|
||||
|
||||
encoder/decoder --> Driver (json/cbor/...) --> input/output (bytes or io abstraction)
|
||||
|
||||
There are 2 * 5 * 2 (20) combinations of monomorphized values.
|
||||
|
||||
Key rules
|
||||
- do not use top-level generic functions.
|
||||
Due to type inference, monomorphizing them proves challenging
|
||||
- only use generic methods.
|
||||
Monomorphizing is done at the type once, and method names need not change
|
||||
- do not have method calls have a parameter of an encWriter or decReader.
|
||||
All those calls are handled directly by the driver.
|
||||
- Include a helper type for each parameterized thing, and add all generic functions to them e.g.
|
||||
helperEncWriter[T encWriter]
|
||||
helperEncReader[T decReader]
|
||||
helperEncDriver[T encDriver]
|
||||
helperDecDriver[T decDriver]
|
||||
- Always use T as the generic type name (when needed)
|
||||
- No inline types
|
||||
- No closures taking parameters of generic types
|
||||
|
||||
*/
|
||||
/*
|
||||
Naming convention:
|
||||
|
||||
Currently, as generic and non-generic types/functions/vars are put in the same files,
|
||||
we suffer because:
|
||||
- build takes longer as non-generic code is built when a build tag wants only monomorphised code
|
||||
- files have many lines which are not used at runtime (due to type parameters)
|
||||
- code coverage is inaccurate on a single run
|
||||
|
||||
To resolve this, we are streamlining our file naming strategy.
|
||||
|
||||
Basically, we will have the following nomenclature for filenames:
|
||||
- fastpath (tag:notfastpath): *.notfastpath.*.go vs *.fastpath.*.go
|
||||
- typed parameters (tag:notmono): *.notmono.*.go vs *.mono.*.go
|
||||
- safe (tag:safe): *.safe.*.go vs *.unsafe.go
|
||||
- generated files: *.generated.go
|
||||
- all others (tags:N/A): *.go without safe/mono/fastpath/generated in the name
|
||||
|
||||
The following files will be affected and split/renamed accordingly
|
||||
|
||||
Base files:
|
||||
- binc.go
|
||||
- cbor.go
|
||||
- json.go
|
||||
- msgpack.go
|
||||
- simple.go
|
||||
- decode.go
|
||||
- encode.go
|
||||
|
||||
For each base file, split into __file__.go (containing type parameters) and __file__.base.go.
|
||||
__file__.go will only build with notmono.
|
||||
|
||||
Other files:
|
||||
- fastpath.generated.go -> base.fastpath.generated.go and base.fastpath.notmono.generated.go
|
||||
- fastpath.not.go -> base.notfastpath.go
|
||||
- init.go -> init.notmono.go
|
||||
|
||||
Appropriate build tags will be included in the files, and the right ones only used for
|
||||
monomorphization.
|
||||
*/
|
||||
/*
|
||||
Caching Handle options for fast runtime use
|
||||
|
||||
If using cached values from Handle options, then
|
||||
- re-cache them at each reset() call
|
||||
- reset is always called at the start of each (Must)(En|De)code
|
||||
- which calls (en|de)coder.reset([]byte|io.Reader|String)
|
||||
- which calls (en|de)cDriver.reset()
|
||||
- at reset, (en|de)c(oder|Driver) can re-cache Handle options before each run
|
||||
|
||||
Some examples:
|
||||
- json: e.rawext,di,d,ks,is / d.rawext
|
||||
- decode: (decoderBase) d.jsms,mtr,str,
|
||||
*/
|
||||
461
vendor/github.com/ugorji/go/codec/encode.base.go
generated
vendored
Normal file
461
vendor/github.com/ugorji/go/codec/encode.base.go
generated
vendored
Normal file
@@ -0,0 +1,461 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var errEncoderNotInitialized = errors.New("encoder not initialized")
|
||||
|
||||
var encBuiltinRtids []uintptr
|
||||
|
||||
func init() {
|
||||
for _, v := range []interface{}{
|
||||
(string)(""),
|
||||
(bool)(false),
|
||||
(int)(0),
|
||||
(int8)(0),
|
||||
(int16)(0),
|
||||
(int32)(0),
|
||||
(int64)(0),
|
||||
(uint)(0),
|
||||
(uint8)(0),
|
||||
(uint16)(0),
|
||||
(uint32)(0),
|
||||
(uint64)(0),
|
||||
(uintptr)(0),
|
||||
(float32)(0),
|
||||
(float64)(0),
|
||||
(complex64)(0),
|
||||
(complex128)(0),
|
||||
(time.Time{}),
|
||||
([]byte)(nil),
|
||||
(Raw{}),
|
||||
// (interface{})(nil),
|
||||
} {
|
||||
t := reflect.TypeOf(v)
|
||||
encBuiltinRtids = append(encBuiltinRtids, rt2id(t), rt2id(reflect.PointerTo(t)))
|
||||
}
|
||||
slices.Sort(encBuiltinRtids)
|
||||
}
|
||||
|
||||
// encDriver abstracts the actual codec (binc vs msgpack, etc)
|
||||
type encDriverI interface {
|
||||
EncodeNil()
|
||||
EncodeInt(i int64)
|
||||
EncodeUint(i uint64)
|
||||
EncodeBool(b bool)
|
||||
EncodeFloat32(f float32)
|
||||
EncodeFloat64(f float64)
|
||||
// re is never nil
|
||||
EncodeRawExt(re *RawExt)
|
||||
// ext is never nil
|
||||
EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext)
|
||||
// EncodeString using cUTF8, honor'ing StringToRaw flag
|
||||
EncodeString(v string)
|
||||
EncodeStringNoEscape4Json(v string)
|
||||
// encode a non-nil []byte
|
||||
EncodeStringBytesRaw(v []byte)
|
||||
// encode a []byte as nil, empty or encoded sequence of bytes depending on context
|
||||
EncodeBytes(v []byte)
|
||||
EncodeTime(time.Time)
|
||||
WriteArrayStart(length int)
|
||||
WriteArrayEnd()
|
||||
WriteMapStart(length int)
|
||||
WriteMapEnd()
|
||||
|
||||
// these write a zero-len map or array into the stream
|
||||
WriteMapEmpty()
|
||||
WriteArrayEmpty()
|
||||
|
||||
writeNilMap()
|
||||
writeNilArray()
|
||||
writeNilBytes()
|
||||
|
||||
// these are no-op except for json
|
||||
encDriverContainerTracker
|
||||
|
||||
// reset will reset current encoding runtime state, and cached information from the handle
|
||||
reset()
|
||||
|
||||
atEndOfEncode()
|
||||
writerEnd()
|
||||
|
||||
writeBytesAsis(b []byte)
|
||||
// writeStringAsisDblQuoted(v string)
|
||||
|
||||
resetOutBytes(out *[]byte)
|
||||
resetOutIO(out io.Writer)
|
||||
|
||||
init(h Handle, shared *encoderBase, enc encoderI) (fp interface{})
|
||||
|
||||
// driverStateManager
|
||||
}
|
||||
|
||||
type encInit2er struct{}
|
||||
|
||||
func (encInit2er) init2(enc encoderI) {}
|
||||
|
||||
type encDriverContainerTracker interface {
|
||||
WriteArrayElem(firstTime bool)
|
||||
WriteMapElemKey(firstTime bool)
|
||||
WriteMapElemValue()
|
||||
}
|
||||
|
||||
type encDriverNoState struct{}
|
||||
|
||||
// func (encDriverNoState) captureState() interface{} { return nil }
|
||||
// func (encDriverNoState) resetState() {}
|
||||
// func (encDriverNoState) restoreState(v interface{}) {}
|
||||
func (encDriverNoState) reset() {}
|
||||
|
||||
type encDriverNoopContainerWriter struct{}
|
||||
|
||||
func (encDriverNoopContainerWriter) WriteArrayStart(length int) {}
|
||||
func (encDriverNoopContainerWriter) WriteArrayEnd() {}
|
||||
func (encDriverNoopContainerWriter) WriteMapStart(length int) {}
|
||||
func (encDriverNoopContainerWriter) WriteMapEnd() {}
|
||||
func (encDriverNoopContainerWriter) atEndOfEncode() {}
|
||||
|
||||
// encStructFieldObj[Slice] is used for sorting when there are missing fields and canonical flag is set
|
||||
type encStructFieldObj struct {
|
||||
key string
|
||||
rv reflect.Value
|
||||
intf interface{}
|
||||
isRv bool
|
||||
noEsc4json bool
|
||||
builtin bool
|
||||
}
|
||||
|
||||
type encStructFieldObjSlice []encStructFieldObj
|
||||
|
||||
func (p encStructFieldObjSlice) Len() int { return len(p) }
|
||||
func (p encStructFieldObjSlice) Swap(i, j int) { p[uint(i)], p[uint(j)] = p[uint(j)], p[uint(i)] }
|
||||
func (p encStructFieldObjSlice) Less(i, j int) bool {
|
||||
return p[uint(i)].key < p[uint(j)].key
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type orderedRv[T cmp.Ordered] struct {
|
||||
v T
|
||||
r reflect.Value
|
||||
}
|
||||
|
||||
func cmpOrderedRv[T cmp.Ordered](v1, v2 orderedRv[T]) int {
|
||||
return cmp.Compare(v1.v, v2.v)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
type encFnInfo struct {
|
||||
ti *typeInfo
|
||||
xfFn Ext
|
||||
xfTag uint64
|
||||
addrE bool
|
||||
// addrEf bool // force: if addrE, then encode function MUST take a ptr
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
// EncodeOptions captures configuration options during encode.
|
||||
type EncodeOptions struct {
|
||||
// WriterBufferSize is the size of the buffer used when writing.
|
||||
//
|
||||
// if > 0, we use a smart buffer internally for performance purposes.
|
||||
WriterBufferSize int
|
||||
|
||||
// ChanRecvTimeout is the timeout used when selecting from a chan.
|
||||
//
|
||||
// Configuring this controls how we receive from a chan during the encoding process.
|
||||
// - If ==0, we only consume the elements currently available in the chan.
|
||||
// - if <0, we consume until the chan is closed.
|
||||
// - If >0, we consume until this timeout.
|
||||
ChanRecvTimeout time.Duration
|
||||
|
||||
// StructToArray specifies to encode a struct as an array, and not as a map
|
||||
StructToArray bool
|
||||
|
||||
// Canonical representation means that encoding a value will always result in the same
|
||||
// sequence of bytes.
|
||||
//
|
||||
// This only affects maps, as the iteration order for maps is random.
|
||||
//
|
||||
// The implementation MAY use the natural sort order for the map keys if possible:
|
||||
//
|
||||
// - If there is a natural sort order (ie for number, bool, string or []byte keys),
|
||||
// then the map keys are first sorted in natural order and then written
|
||||
// with corresponding map values to the strema.
|
||||
// - If there is no natural sort order, then the map keys will first be
|
||||
// encoded into []byte, and then sorted,
|
||||
// before writing the sorted keys and the corresponding map values to the stream.
|
||||
//
|
||||
Canonical bool
|
||||
|
||||
// CheckCircularRef controls whether we check for circular references
|
||||
// and error fast during an encode.
|
||||
//
|
||||
// If enabled, an error is received if a pointer to a struct
|
||||
// references itself either directly or through one of its fields (iteratively).
|
||||
//
|
||||
// This is opt-in, as there may be a performance hit to checking circular references.
|
||||
CheckCircularRef bool
|
||||
|
||||
// RecursiveEmptyCheck controls how we determine whether a value is empty.
|
||||
//
|
||||
// If true, we descend into interfaces and pointers to reursively check if value is empty.
|
||||
//
|
||||
// We *might* check struct fields one by one to see if empty
|
||||
// (if we cannot directly check if a struct value is equal to its zero value).
|
||||
// If so, we honor IsZero, Comparable, IsCodecEmpty(), etc.
|
||||
// Note: This *may* make OmitEmpty more expensive due to the large number of reflect calls.
|
||||
//
|
||||
// If false, we check if the value is equal to its zero value (newly allocated state).
|
||||
RecursiveEmptyCheck bool
|
||||
|
||||
// Raw controls whether we encode Raw values.
|
||||
// This is a "dangerous" option and must be explicitly set.
|
||||
// If set, we blindly encode Raw values as-is, without checking
|
||||
// if they are a correct representation of a value in that format.
|
||||
// If unset, we error out.
|
||||
Raw bool
|
||||
|
||||
// StringToRaw controls how strings are encoded.
|
||||
//
|
||||
// As a go string is just an (immutable) sequence of bytes,
|
||||
// it can be encoded either as raw bytes or as a UTF string.
|
||||
//
|
||||
// By default, strings are encoded as UTF-8.
|
||||
// but can be treated as []byte during an encode.
|
||||
//
|
||||
// Note that things which we know (by definition) to be UTF-8
|
||||
// are ALWAYS encoded as UTF-8 strings.
|
||||
// These include encoding.TextMarshaler, time.Format calls, struct field names, etc.
|
||||
StringToRaw bool
|
||||
|
||||
// OptimumSize controls whether we optimize for the smallest size.
|
||||
//
|
||||
// Some formats will use this flag to determine whether to encode
|
||||
// in the smallest size possible, even if it takes slightly longer.
|
||||
//
|
||||
// For example, some formats that support half-floats might check if it is possible
|
||||
// to store a float64 as a half float. Doing this check has a small performance cost,
|
||||
// but the benefit is that the encoded message will be smaller.
|
||||
OptimumSize bool
|
||||
|
||||
// NoAddressableReadonly controls whether we try to force a non-addressable value
|
||||
// to be addressable so we can call a pointer method on it e.g. for types
|
||||
// that support Selfer, json.Marshaler, etc.
|
||||
//
|
||||
// Use it in the very rare occurrence that your types modify a pointer value when calling
|
||||
// an encode callback function e.g. JsonMarshal, TextMarshal, BinaryMarshal or CodecEncodeSelf.
|
||||
NoAddressableReadonly bool
|
||||
|
||||
// NilCollectionToZeroLength controls whether we encode nil collections (map, slice, chan)
|
||||
// as nil (e.g. null if using JSON) or as zero length collections (e.g. [] or {} if using JSON).
|
||||
//
|
||||
// This is useful in many scenarios e.g.
|
||||
// - encoding in go, but decoding the encoded stream in python
|
||||
// where context of the type is missing but needed
|
||||
//
|
||||
// Note: this flag ignores the MapBySlice tag, and will encode nil slices, maps and chan
|
||||
// in their natural zero-length formats e.g. a slice in json encoded as []
|
||||
// (and not nil or {} if MapBySlice tag).
|
||||
NilCollectionToZeroLength bool
|
||||
}
|
||||
|
||||
// ---------------------------------------------
|
||||
|
||||
// encoderBase is shared as a field between Encoder and its encDrivers.
|
||||
// This way, encDrivers need not hold a referece to the Encoder itself.
|
||||
type encoderBase struct {
|
||||
perType encPerType
|
||||
|
||||
h *BasicHandle
|
||||
|
||||
// MARKER: these fields below should belong directly in Encoder.
|
||||
// There should not be any pointers here - just values.
|
||||
// we pack them here for space efficiency and cache-line optimization.
|
||||
|
||||
rtidFn, rtidFnNoExt *atomicRtidFnSlice
|
||||
|
||||
// se encoderI
|
||||
err error
|
||||
|
||||
blist bytesFreeList
|
||||
|
||||
// js bool // is json encoder?
|
||||
// be bool // is binary encoder?
|
||||
|
||||
bytes bool
|
||||
|
||||
c containerState
|
||||
|
||||
calls uint16
|
||||
seq uint16 // sequencer (e.g. used by binc for symbols, etc)
|
||||
|
||||
// ---- cpu cache line boundary
|
||||
hh Handle
|
||||
|
||||
// ---- cpu cache line boundary
|
||||
|
||||
// ---- writable fields during execution --- *try* to keep in sep cache line
|
||||
|
||||
ci circularRefChecker
|
||||
|
||||
slist sfiRvFreeList
|
||||
}
|
||||
|
||||
func (e *encoderBase) HandleName() string {
|
||||
return e.hh.Name()
|
||||
}
|
||||
|
||||
// Release is a no-op.
|
||||
//
|
||||
// Deprecated: Pooled resources are not used with an Encoder.
|
||||
// This method is kept for compatibility reasons only.
|
||||
func (e *encoderBase) Release() {
|
||||
}
|
||||
|
||||
func (e *encoderBase) setContainerState(cs containerState) {
|
||||
if cs != 0 {
|
||||
e.c = cs
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoderBase) haltOnMbsOddLen(length int) {
|
||||
if length&1 != 0 { // similar to &1==1 or %2 == 1
|
||||
halt.errorInt("mapBySlice requires even slice length, but got ", int64(length))
|
||||
}
|
||||
}
|
||||
|
||||
// addrRV returns a addressable value given that rv is not addressable
|
||||
func (e *encoderBase) addrRV(rv reflect.Value, typ, ptrType reflect.Type) (rva reflect.Value) {
|
||||
// if rv.CanAddr() {
|
||||
// return rvAddr(rv, ptrType)
|
||||
// }
|
||||
if e.h.NoAddressableReadonly {
|
||||
rva = reflect.New(typ)
|
||||
rvSetDirect(rva.Elem(), rv)
|
||||
return
|
||||
}
|
||||
return rvAddr(e.perType.AddressableRO(rv), ptrType)
|
||||
}
|
||||
|
||||
func (e *encoderBase) wrapErr(v error, err *error) {
|
||||
*err = wrapCodecErr(v, e.hh.Name(), 0, true)
|
||||
}
|
||||
|
||||
func (e *encoderBase) kErr(_ *encFnInfo, rv reflect.Value) {
|
||||
halt.errorf("unsupported encoding kind: %s, for %#v", rv.Kind(), any(rv))
|
||||
}
|
||||
|
||||
func chanToSlice(rv reflect.Value, rtslice reflect.Type, timeout time.Duration) (rvcs reflect.Value) {
|
||||
rvcs = rvZeroK(rtslice, reflect.Slice)
|
||||
if timeout < 0 { // consume until close
|
||||
for {
|
||||
recv, recvOk := rv.Recv()
|
||||
if !recvOk {
|
||||
break
|
||||
}
|
||||
rvcs = reflect.Append(rvcs, recv)
|
||||
}
|
||||
} else {
|
||||
cases := make([]reflect.SelectCase, 2)
|
||||
cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv}
|
||||
if timeout == 0 {
|
||||
cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault}
|
||||
} else {
|
||||
tt := time.NewTimer(timeout)
|
||||
cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)}
|
||||
}
|
||||
for {
|
||||
chosen, recv, recvOk := reflect.Select(cases)
|
||||
if chosen == 1 || !recvOk {
|
||||
break
|
||||
}
|
||||
rvcs = reflect.Append(rvcs, recv)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type encoderI interface {
|
||||
Encode(v interface{}) error
|
||||
MustEncode(v interface{})
|
||||
Release()
|
||||
Reset(w io.Writer)
|
||||
ResetBytes(out *[]byte)
|
||||
|
||||
wrapErr(v error, err *error)
|
||||
atEndOfEncode()
|
||||
writerEnd()
|
||||
|
||||
encodeI(v interface{})
|
||||
encodeR(v reflect.Value)
|
||||
encodeAs(v interface{}, t reflect.Type, ext bool)
|
||||
|
||||
setContainerState(cs containerState) // needed for canonical encoding via side encoder
|
||||
}
|
||||
|
||||
var errEncNoResetBytesWithWriter = errors.New("cannot reset an Encoder which outputs to []byte with a io.Writer")
|
||||
var errEncNoResetWriterWithBytes = errors.New("cannot reset an Encoder which outputs to io.Writer with a []byte")
|
||||
|
||||
type encDriverContainerNoTrackerT struct{}
|
||||
|
||||
func (encDriverContainerNoTrackerT) WriteArrayElem(firstTime bool) {}
|
||||
func (encDriverContainerNoTrackerT) WriteMapElemKey(firstTime bool) {}
|
||||
func (encDriverContainerNoTrackerT) WriteMapElemValue() {}
|
||||
|
||||
type Encoder struct {
|
||||
encoderI
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder for encoding into an io.Writer.
|
||||
//
|
||||
// For efficiency, Users are encouraged to configure WriterBufferSize on the handle
|
||||
// OR pass in a memory buffered writer (eg bufio.Writer, bytes.Buffer).
|
||||
func NewEncoder(w io.Writer, h Handle) *Encoder {
|
||||
return &Encoder{h.newEncoder(w)}
|
||||
}
|
||||
|
||||
// NewEncoderBytes returns an encoder for encoding directly and efficiently
|
||||
// into a byte slice, using zero-copying to temporary slices.
|
||||
//
|
||||
// It will potentially replace the output byte slice pointed to.
|
||||
// After encoding, the out parameter contains the encoded contents.
|
||||
func NewEncoderBytes(out *[]byte, h Handle) *Encoder {
|
||||
return &Encoder{h.newEncoderBytes(out)}
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
func sideEncode(h Handle, p *sync.Pool, fn func(encoderI)) {
|
||||
var s encoderI
|
||||
if usePoolForSideEncode {
|
||||
s = p.Get().(encoderI)
|
||||
defer p.Put(s)
|
||||
} else {
|
||||
// initialization cycle error
|
||||
// s = NewEncoderBytes(nil, h).encoderI
|
||||
s = p.New().(encoderI)
|
||||
}
|
||||
fn(s)
|
||||
}
|
||||
|
||||
func oneOffEncode(se encoderI, v interface{}, out *[]byte, basetype reflect.Type, ext bool) {
|
||||
se.ResetBytes(out)
|
||||
se.encodeAs(v, basetype, ext)
|
||||
se.atEndOfEncode()
|
||||
se.writerEnd()
|
||||
// e.sideEncoder(&bs)
|
||||
// e.sideEncode(v, basetype, 0)
|
||||
}
|
||||
1652
vendor/github.com/ugorji/go/codec/encode.go
generated
vendored
Normal file
1652
vendor/github.com/ugorji/go/codec/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
134
vendor/github.com/ugorji/go/codec/fastpath.go.tmpl
generated
vendored
Normal file
134
vendor/github.com/ugorji/go/codec/fastpath.go.tmpl
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
//go:build !notfastpath && !codec.notfastpath
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// Code generated from fastpath.go.tmpl - DO NOT EDIT.
|
||||
|
||||
package codec
|
||||
|
||||
// Fast path functions try to create a fast path encode or decode implementation
|
||||
// for common maps and slices.
|
||||
//
|
||||
// We define the functions and register them in this single file
|
||||
// so as not to pollute the encode.go and decode.go, and create a dependency in there.
|
||||
// This file can be omitted without causing a build failure.
|
||||
//
|
||||
// The advantage of fast paths is:
|
||||
// - Many calls bypass reflection altogether
|
||||
//
|
||||
// Currently support
|
||||
// - slice of all builtin types (numeric, bool, string, []byte)
|
||||
// - maps of builtin types to builtin or interface{} type, EXCEPT FOR
|
||||
// keys of type uintptr, int8/16/32, uint16/32, float32/64, bool, interface{}
|
||||
// AND values of type type int8/16/32, uint16/32
|
||||
// This should provide adequate "typical" implementations.
|
||||
//
|
||||
// Note that fast track decode functions must handle values for which an address cannot be obtained.
|
||||
// For example:
|
||||
// m2 := map[string]int{}
|
||||
// p2 := []interface{}{m2}
|
||||
// // decoding into p2 will bomb if fast track functions do not treat like unaddressable.
|
||||
//
|
||||
|
||||
{{/*
|
||||
// ----------------
|
||||
fastpathEncMap<KV>R func (mapped to type id), routes to:
|
||||
- ft.EncMap<KV>V
|
||||
|
||||
fastpathEncSlice<E>R func (mapped to type id), delegates to one of:
|
||||
- ft.EncSlice<E>V
|
||||
- ft.EncAsMapSlice<E>V (when mapbyslice ie f.ti.mbs=true)
|
||||
|
||||
// ----------------
|
||||
fastpathDecSlice<E>R func (mapped to type id), delegates to:
|
||||
- ft.DecSliceIntfY (when slice CAN be updated)
|
||||
- ft.DecSliceIntfN (when slice CANNOT be updated e.g. from array or non-addressable slice)
|
||||
|
||||
fastpathDecMap<KV>R func (mapped to type id), routes to
|
||||
- ft.DecMap<KV>L (handles ptr which is changeable, and non-pointer which cannot be made if nil)
|
||||
|
||||
// ----------------
|
||||
NOTE:
|
||||
- fastpath typeswitch directly calls the secondary methods for builtin maps/slices with appropriate nil handling:
|
||||
- except EncAsMapSlice<E>V which only applies to wrapper types not those in the switch
|
||||
- fastpathEncXXX functions mapped to type ID MUST do nil-checks during encode
|
||||
- they are only called by decodeValue/encodeValue or other code (same way kMap et al are called)
|
||||
*/ -}}
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"slices"
|
||||
)
|
||||
|
||||
const fastpathEnabled = true
|
||||
|
||||
{{/*
|
||||
const fastpathMapBySliceErrMsg = "mapBySlice requires even slice length, but got %v"
|
||||
*/ -}}
|
||||
|
||||
type fastpathARtid [{{ .FastpathLen }}]uintptr
|
||||
|
||||
type fastpathRtRtid struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
}
|
||||
type fastpathARtRtid [{{ .FastpathLen }}]fastpathRtRtid
|
||||
|
||||
var (
|
||||
fastpathAvRtidArr fastpathARtid
|
||||
fastpathAvRtRtidArr fastpathARtRtid
|
||||
fastpathAvRtid = fastpathAvRtidArr[:]
|
||||
fastpathAvRtRtid = fastpathAvRtRtidArr[:]
|
||||
)
|
||||
|
||||
func fastpathAvIndex(rtid uintptr) (i uint, ok bool) {
|
||||
return searchRtids(fastpathAvRtid, rtid)
|
||||
}
|
||||
|
||||
func init() {
|
||||
var i uint = 0
|
||||
fn := func(v interface{}) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
xrtid := rt2id(xrt)
|
||||
xptrtid := rt2id(reflect.PointerTo(xrt))
|
||||
{{- /* only the base slice/map rtid is put in fastpathAvIndex, since we only handle slices/map/array */}}
|
||||
fastpathAvRtid[i] = xrtid
|
||||
fastpathAvRtRtid[i] = fastpathRtRtid{ rtid: xrtid, rt: xrt }
|
||||
{{- /* fastpath type switches however handle slices/map/array, and pointers to them */}}
|
||||
encBuiltinRtids = append(encBuiltinRtids, xrtid, xptrtid)
|
||||
decBuiltinRtids = append(decBuiltinRtids, xrtid, xptrtid)
|
||||
i++
|
||||
}
|
||||
{{/* do not register []byte in fastpath */}}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
fn([]{{ .Elem }}(nil))
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
fn(map[{{ .MapKey }}]{{ .Elem }}(nil))
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
sort.Slice(fastpathAvRtid, func(i, j int) bool { return fastpathAvRtid[i] < fastpathAvRtid[j] })
|
||||
sort.Slice(fastpathAvRtRtid, func(i, j int) bool { return fastpathAvRtRtid[i].rtid < fastpathAvRtRtid[j].rtid })
|
||||
slices.Sort(encBuiltinRtids)
|
||||
slices.Sort(decBuiltinRtids)
|
||||
}
|
||||
|
||||
func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool {
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
case *[]{{ .Elem }}:
|
||||
*v = nil
|
||||
{{end}}{{end}}{{end}}
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
case *map[{{ .MapKey }}]{{ .Elem }}:
|
||||
*v = nil
|
||||
{{end}}{{end}}{{end}}
|
||||
default:
|
||||
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
525
vendor/github.com/ugorji/go/codec/fastpath.notmono.go.tmpl
generated
vendored
Normal file
525
vendor/github.com/ugorji/go/codec/fastpath.notmono.go.tmpl
generated
vendored
Normal file
@@ -0,0 +1,525 @@
|
||||
//go:build !notfastpath && !codec.notfastpath && (notmono || codec.notmono)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// Code generated from fastpath.notmono.go.tmpl - DO NOT EDIT.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"slices"
|
||||
)
|
||||
|
||||
type fastpathE[T encDriver] struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
encfn func(*encoder[T], *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathD[T decDriver] struct {
|
||||
rtid uintptr
|
||||
rt reflect.Type
|
||||
decfn func(*decoder[T], *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEs[T encDriver] [{{ .FastpathLen }}]fastpathE[T]
|
||||
type fastpathDs[T decDriver] [{{ .FastpathLen }}]fastpathD[T]
|
||||
|
||||
type fastpathET[T encDriver] struct{}
|
||||
type fastpathDT[T decDriver] struct{}
|
||||
|
||||
func (helperEncDriver[T]) fastpathEList() *fastpathEs[T] {
|
||||
var i uint = 0
|
||||
var s fastpathEs[T]
|
||||
fn := func(v interface{}, fe func(*encoder[T], *encFnInfo, reflect.Value)) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
s[i] = fastpathE[T]{rt2id(xrt), xrt, fe}
|
||||
i++
|
||||
}
|
||||
{{/* do not register []byte in fastpath */}}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
fn([]{{ .Elem }}(nil), (*encoder[T]).{{ .MethodNamePfx "fastpathEnc" false }}R)
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*encoder[T]).{{ .MethodNamePfx "fastpathEnc" false }}R)
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid })
|
||||
return &s
|
||||
}
|
||||
|
||||
func (helperDecDriver[T]) fastpathDList() *fastpathDs[T] {
|
||||
var i uint = 0
|
||||
var s fastpathDs[T]
|
||||
fn := func(v interface{}, fd func(*decoder[T], *decFnInfo, reflect.Value)) {
|
||||
xrt := reflect.TypeOf(v)
|
||||
s[i] = fastpathD[T]{rt2id(xrt), xrt, fd}
|
||||
i++
|
||||
}
|
||||
{{/* do not register []byte in fastpath */}}
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
fn([]{{ .Elem }}(nil), (*decoder[T]).{{ .MethodNamePfx "fastpathDec" false }}R)
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
fn(map[{{ .MapKey }}]{{ .Elem }}(nil), (*decoder[T]).{{ .MethodNamePfx "fastpathDec" false }}R)
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
sort.Slice(s[:], func(i, j int) bool { return s[i].rtid < s[j].rtid })
|
||||
return &s
|
||||
}
|
||||
|
||||
// -- encode
|
||||
|
||||
// -- -- fast path type switch
|
||||
func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool {
|
||||
var ft fastpathET[T]
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
case []{{ .Elem }}:
|
||||
if v == nil { e.e.writeNilArray() } else { ft.{{ .MethodNamePfx "Enc" false }}V(v, e) }
|
||||
{{end}}{{end}}{{end -}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
case map[{{ .MapKey }}]{{ .Elem }}:
|
||||
if v == nil { e.e.writeNilMap() } else { ft.{{ .MethodNamePfx "Enc" false }}V(v, e) }
|
||||
{{end}}{{end}}{{end -}}
|
||||
|
||||
default:
|
||||
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// -- -- fast path functions
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
func (e *encoder[T]) {{ .MethodNamePfx "fastpathEnc" false }}R(f *encFnInfo, rv reflect.Value) {
|
||||
var ft fastpathET[T]
|
||||
var v []{{ .Elem }}
|
||||
if rv.Kind() == reflect.Array {
|
||||
rvGetSlice4Array(rv, &v)
|
||||
} else {
|
||||
v = rv2i(rv).([]{{ .Elem }})
|
||||
}
|
||||
if f.ti.mbs {
|
||||
ft.{{ .MethodNamePfx "EncAsMap" false }}V(v, e)
|
||||
return
|
||||
}
|
||||
ft.{{ .MethodNamePfx "Enc" false }}V(v, e)
|
||||
}
|
||||
func (fastpathET[T]) {{ .MethodNamePfx "Enc" false }}V(v []{{ .Elem }}, e *encoder[T]) {
|
||||
{{ if eq .Elem "uint8" "byte" -}}
|
||||
e.e.EncodeStringBytesRaw(v)
|
||||
{{ else -}}
|
||||
if len(v) == 0 {
|
||||
e.c = 0; e.e.WriteArrayEmpty()
|
||||
return
|
||||
}
|
||||
e.arrayStart(len(v))
|
||||
for j := range v {
|
||||
e.c = containerArrayElem; e.e.WriteArrayElem(j == 0)
|
||||
{{ encmd .Elem "v[j]"}}
|
||||
}
|
||||
e.c = 0; e.e.WriteArrayEnd()
|
||||
{{ end -}}
|
||||
}
|
||||
func (fastpathET[T]) {{ .MethodNamePfx "EncAsMap" false }}V(v []{{ .Elem }}, e *encoder[T]) {
|
||||
if len(v) == 0 {
|
||||
e.c = 0; e.e.WriteMapEmpty()
|
||||
return
|
||||
}
|
||||
e.haltOnMbsOddLen(len(v))
|
||||
e.mapStart(len(v) >> 1) // e.mapStart(len(v) / 2)
|
||||
for j := range v {
|
||||
if j&1 == 0 { // if j%2 == 0 {
|
||||
e.c = containerMapKey; e.e.WriteMapElemKey(j == 0)
|
||||
} else {
|
||||
e.mapElemValue()
|
||||
}
|
||||
{{ encmd .Elem "v[j]"}}
|
||||
}
|
||||
e.c = 0; e.e.WriteMapEnd()
|
||||
}
|
||||
|
||||
{{end}}{{end}}{{end -}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
func (e *encoder[T]) {{ .MethodNamePfx "fastpathEnc" false }}R(f *encFnInfo, rv reflect.Value) {
|
||||
{{/* var ft fastpathET[T]
|
||||
ft.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e) */ -}}
|
||||
fastpathET[T]{}.{{ .MethodNamePfx "Enc" false }}V(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), e)
|
||||
}
|
||||
func (fastpathET[T]) {{ .MethodNamePfx "Enc" false }}V(v map[{{ .MapKey }}]{{ .Elem }}, e *encoder[T]) {
|
||||
{{/* if v == nil { e.e.EncodeNil(); return } */ -}}
|
||||
if len(v) == 0 {
|
||||
e.e.WriteMapEmpty()
|
||||
return
|
||||
}
|
||||
var i uint
|
||||
e.mapStart(len(v))
|
||||
if e.h.Canonical { {{/* need to figure out .NoCanonical */}}
|
||||
{{if eq .MapKey "interface{}"}}{{/* out of band */ -}}
|
||||
var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding
|
||||
e2 := NewEncoderBytes(&mksv, e.hh)
|
||||
v2 := make([]bytesIntf, len(v))
|
||||
var l uint {{/* put loop variables outside. seems currently needed for better perf */}}
|
||||
var vp *bytesIntf
|
||||
for k2 := range v {
|
||||
l = uint(len(mksv))
|
||||
e2.MustEncode(k2)
|
||||
vp = &v2[i]
|
||||
vp.v = mksv[l:]
|
||||
vp.i = k2
|
||||
i++
|
||||
}
|
||||
slices.SortFunc(v2, cmpBytesIntf)
|
||||
for j := range v2 {
|
||||
e.c = containerMapKey; e.e.WriteMapElemKey(j == 0)
|
||||
e.asis(v2[j].v)
|
||||
e.mapElemValue()
|
||||
e.encode(v[v2[j].i])
|
||||
} {{else}}{{ $x := sorttype .MapKey true}}v2 := make([]{{ $x }}, len(v))
|
||||
for k := range v {
|
||||
v2[i] = {{if eq $x .MapKey}}k{{else}}{{ $x }}(k){{end}}
|
||||
i++
|
||||
}
|
||||
slices.Sort(v2)
|
||||
{{/* // sort.Sort({{ sorttype .MapKey false}}(v2)) */ -}}
|
||||
for i, k2 := range v2 {
|
||||
e.c = containerMapKey; e.e.WriteMapElemKey(i == 0)
|
||||
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ $y := printf "%s(k2)" .MapKey }}{{if eq $x .MapKey }}{{ $y = "k2" }}{{end}}{{ encmd .MapKey $y }}{{end}}
|
||||
e.mapElemValue()
|
||||
{{ $y := printf "v[%s(k2)]" .MapKey }}{{if eq $x .MapKey }}{{ $y = "v[k2]" }}{{end}}{{ encmd .Elem $y }}
|
||||
} {{end}}
|
||||
} else {
|
||||
i = 0
|
||||
for k2, v2 := range v {
|
||||
e.c = containerMapKey; e.e.WriteMapElemKey(i == 0)
|
||||
{{if eq .MapKey "string"}} e.e.EncodeString(k2) {{else}}{{ encmd .MapKey "k2"}}{{end}}
|
||||
e.mapElemValue()
|
||||
{{ encmd .Elem "v2"}}
|
||||
i++
|
||||
}
|
||||
}
|
||||
e.c = 0; e.e.WriteMapEnd()
|
||||
}
|
||||
{{end}}{{end}}{{end -}}
|
||||
|
||||
// -- decode
|
||||
|
||||
// -- -- fast path type switch
|
||||
func (helperDecDriver[T]) fastpathDecodeTypeSwitch(iv interface{}, d *decoder[T]) bool {
|
||||
var ft fastpathDT[T]
|
||||
var changed bool
|
||||
var containerLen int
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
case []{{ .Elem }}:
|
||||
ft.{{ .MethodNamePfx "Dec" false }}N(v, d)
|
||||
case *[]{{ .Elem }}:
|
||||
var v2 []{{ .Elem }}
|
||||
if v2, changed = ft.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed {
|
||||
*v = v2
|
||||
}
|
||||
{{end}}{{end}}{{end -}}
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey }}{{/*
|
||||
// maps only change if nil, and in that case, there's no point copying
|
||||
*/ -}}
|
||||
case map[{{ .MapKey }}]{{ .Elem }}:
|
||||
if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen != containerLenNil {
|
||||
if containerLen != 0 {
|
||||
ft.{{ .MethodNamePfx "Dec" false }}L(v, containerLen, d)
|
||||
}
|
||||
d.mapEnd()
|
||||
}
|
||||
case *map[{{ .MapKey }}]{{ .Elem }}:
|
||||
if containerLen = d.mapStart(d.d.ReadMapStart()); containerLen == containerLenNil {
|
||||
*v = nil
|
||||
} else {
|
||||
if *v == nil {
|
||||
*v = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }}))
|
||||
}
|
||||
if containerLen != 0 {
|
||||
ft.{{ .MethodNamePfx "Dec" false }}L(*v, containerLen, d)
|
||||
}
|
||||
d.mapEnd()
|
||||
}
|
||||
{{end}}{{end}}{{end -}}
|
||||
default:
|
||||
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// -- -- fast path functions
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
{{/*
|
||||
Slices can change if they
|
||||
- did not come from an array
|
||||
- are addressable (from a ptr)
|
||||
- are settable (e.g. contained in an interface{})
|
||||
*/}}
|
||||
func (d *decoder[T]) {{ .MethodNamePfx "fastpathDec" false }}R(f *decFnInfo, rv reflect.Value) {
|
||||
var ft fastpathDT[T]
|
||||
{{/*
|
||||
// seqTypeArray=true means that we are not getting a pointer, so no need to check that.
|
||||
if f.seq != seqTypeArray && rv.Kind() == reflect.Ptr {
|
||||
*/ -}}
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr: {{- /* this block is called for types that wrap a fastpath type e.g. wrapSliceUint64 */}}
|
||||
v := rv2i(rv).(*[]{{ .Elem }})
|
||||
if vv, changed := ft.{{ .MethodNamePfx "Dec" false }}Y(*v, d); changed {
|
||||
*v = vv
|
||||
}
|
||||
case reflect.Array:
|
||||
var v []{{ .Elem }}
|
||||
rvGetSlice4Array(rv, &v)
|
||||
ft.{{ .MethodNamePfx "Dec" false }}N(v, d)
|
||||
default:
|
||||
ft.{{ .MethodNamePfx "Dec" false }}N(rv2i(rv).([]{{ .Elem }}), d)
|
||||
}
|
||||
}
|
||||
func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}Y(v []{{ .Elem }}, d *decoder[T]) (v2 []{{ .Elem }}, changed bool) {
|
||||
ctyp := d.d.ContainerType()
|
||||
if ctyp == valueTypeNil {
|
||||
return nil, v != nil
|
||||
}
|
||||
{{ if eq .Elem "uint8" "byte" -}}
|
||||
if ctyp != valueTypeMap {
|
||||
var dbi dBytesIntoState
|
||||
v2, dbi = d.decodeBytesInto(v[:len(v):len(v)], false)
|
||||
return v2, dbi != dBytesIntoParamOut
|
||||
}
|
||||
containerLenS := d.mapStart(d.d.ReadMapStart()) * 2
|
||||
{{ else -}}
|
||||
var containerLenS int
|
||||
isArray := ctyp == valueTypeArray
|
||||
if isArray {
|
||||
containerLenS = d.arrayStart(d.d.ReadArrayStart())
|
||||
} else if ctyp == valueTypeMap {
|
||||
containerLenS = d.mapStart(d.d.ReadMapStart()) * 2
|
||||
} else {
|
||||
halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String())
|
||||
}
|
||||
{{ end -}}
|
||||
hasLen := containerLenS >= 0
|
||||
var j int
|
||||
fnv := func(dst []{{ .Elem }}) { v, changed = dst, true }
|
||||
for ; d.containerNext(j, containerLenS, hasLen); j++ {
|
||||
if j == 0 {
|
||||
if containerLenS == len(v) {
|
||||
} else if containerLenS < 0 || containerLenS > cap(v) {
|
||||
if xlen := int(decInferLen(containerLenS, d.maxInitLen(), {{ .Size }})); xlen <= cap(v) {
|
||||
fnv(v[:uint(xlen)])
|
||||
} else {
|
||||
v2 = make([]{{ .Elem }}, uint(xlen))
|
||||
copy(v2, v)
|
||||
fnv(v2)
|
||||
}
|
||||
} else {
|
||||
fnv(v[:containerLenS])
|
||||
}
|
||||
}
|
||||
{{ if eq .Elem "uint8" "byte" }}{{ else -}}
|
||||
if isArray { d.arrayElem(j == 0) } else
|
||||
{{ end -}}
|
||||
if j&1 == 0 { d.mapElemKey(j == 0) } else { d.mapElemValue() }
|
||||
if j >= len(v) { {{- /* // if indefinite, json, etc, then expand the slice (if necessary) */}}
|
||||
fnv(append(v, {{ zerocmd .Elem }}))
|
||||
}
|
||||
{{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }}
|
||||
}
|
||||
if j < len(v) {
|
||||
fnv(v[:uint(j)])
|
||||
} else if j == 0 && v == nil {
|
||||
fnv([]{{ .Elem }}{})
|
||||
}
|
||||
{{ if eq .Elem "uint8" "byte" -}}
|
||||
d.mapEnd()
|
||||
{{ else -}}
|
||||
if isArray { d.arrayEnd() } else { d.mapEnd() }
|
||||
{{ end -}}
|
||||
return v, changed
|
||||
}
|
||||
func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}N(v []{{ .Elem }}, d *decoder[T]) {
|
||||
ctyp := d.d.ContainerType()
|
||||
if ctyp == valueTypeNil {
|
||||
return
|
||||
}
|
||||
{{ if eq .Elem "uint8" "byte" -}}
|
||||
if ctyp != valueTypeMap {
|
||||
d.decodeBytesInto(v[:len(v):len(v)], true)
|
||||
return
|
||||
}
|
||||
containerLenS := d.mapStart(d.d.ReadMapStart()) * 2
|
||||
{{ else -}}
|
||||
var containerLenS int
|
||||
isArray := ctyp == valueTypeArray
|
||||
if isArray {
|
||||
containerLenS = d.arrayStart(d.d.ReadArrayStart())
|
||||
} else if ctyp == valueTypeMap {
|
||||
containerLenS = d.mapStart(d.d.ReadMapStart()) * 2
|
||||
} else {
|
||||
halt.errorStr2("decoding into a slice, expect map/array - got ", ctyp.String())
|
||||
}
|
||||
{{ end -}}
|
||||
hasLen := containerLenS >= 0
|
||||
for j := 0; d.containerNext(j, containerLenS, hasLen); j++ {
|
||||
{{/* // if indefinite, etc, then expand the slice if necessary */ -}}
|
||||
{{ if not (eq .Elem "uint8" "byte") -}}
|
||||
if isArray { d.arrayElem(j == 0) } else
|
||||
{{ end -}}
|
||||
if j&1 == 0 { d.mapElemKey(j == 0) } else { d.mapElemValue() }
|
||||
if j < len(v) {
|
||||
{{ if eq .Elem "interface{}" }}d.decode(&v[uint(j)]){{ else }}v[uint(j)] = {{ decmd .Elem false }}{{ end }}
|
||||
} else {
|
||||
d.arrayCannotExpand(len(v), j+1)
|
||||
d.swallow()
|
||||
}
|
||||
}
|
||||
{{ if eq .Elem "uint8" "byte" -}}
|
||||
d.mapEnd()
|
||||
{{ else -}}
|
||||
if isArray { d.arrayEnd() } else { d.mapEnd() }
|
||||
{{ end -}}
|
||||
}
|
||||
{{end}}{{end}}{{end -}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
{{/*
|
||||
Maps can change if they are
|
||||
- addressable (from a ptr)
|
||||
- settable (e.g. contained in an interface{})
|
||||
*/ -}}
|
||||
func (d *decoder[T]) {{ .MethodNamePfx "fastpathDec" false }}R(f *decFnInfo, rv reflect.Value) {
|
||||
var ft fastpathDT[T]
|
||||
containerLen := d.mapStart(d.d.ReadMapStart())
|
||||
if rv.Kind() == reflect.Ptr { {{- /* this block is called for types that wrap a fastpath type e.g. wrapMapStringUint64 */}}
|
||||
vp, _ := rv2i(rv).(*map[{{ .MapKey }}]{{ .Elem }})
|
||||
if *vp == nil {
|
||||
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }}))
|
||||
}
|
||||
if containerLen != 0 {
|
||||
ft.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
|
||||
}
|
||||
} else if containerLen != 0 {
|
||||
ft.{{ .MethodNamePfx "Dec" false }}L(rv2i(rv).(map[{{ .MapKey }}]{{ .Elem }}), containerLen, d)
|
||||
}
|
||||
d.mapEnd()
|
||||
}
|
||||
func (fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}L(v map[{{ .MapKey }}]{{ .Elem }}, containerLen int, d *decoder[T]) {
|
||||
if v == nil {
|
||||
halt.errorInt("cannot decode into nil map[{{ .MapKey }}]{{ .Elem }} given stream length: ", int64(containerLen))
|
||||
{{/* d.swallowMapContents(containerLen); return */ -}}
|
||||
}
|
||||
{{if eq .MapKey "interface{}" -}}
|
||||
var mk {{ .MapKey }}
|
||||
{{end -}}
|
||||
{{ if eq .Elem "interface{}" "[]byte" "bytes" -}}
|
||||
var mv {{ .Elem }}
|
||||
mapGet := !d.h.MapValueReset
|
||||
{{- if eq .Elem "interface{}" -}}
|
||||
&& !d.h.InterfaceReset
|
||||
{{- end}}
|
||||
{{end -}}
|
||||
hasLen := containerLen >= 0
|
||||
for j := 0; d.containerNext(j, containerLen, hasLen); j++ {
|
||||
d.mapElemKey(j == 0)
|
||||
{{ if eq .MapKey "interface{}" -}}
|
||||
mk = nil
|
||||
d.decode(&mk)
|
||||
if bv, bok := mk.([]byte); bok {
|
||||
mk = d.detach2Str(bv) {{/* // maps cannot have []byte as key. switch to string. */}}
|
||||
}{{ else }}mk := {{ decmd .MapKey true }}{{ end }}
|
||||
d.mapElemValue()
|
||||
{{ if eq .Elem "interface{}" "[]byte" "bytes" -}}
|
||||
if mapGet { mv = v[mk] } else { mv = nil }
|
||||
{{ end -}}
|
||||
{{ if eq .Elem "interface{}" -}}
|
||||
d.decode(&mv)
|
||||
v[mk] = mv
|
||||
{{ else if eq .Elem "[]byte" "bytes" -}}
|
||||
v[mk], _ = d.decodeBytesInto(mv, false)
|
||||
{{ else -}}
|
||||
v[mk] = {{ decmd .Elem false }}
|
||||
{{ end -}}
|
||||
}
|
||||
}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{- /*
|
||||
|
||||
// -- -- fast path type switch
|
||||
func (helperEncDriver[T]) fastpathEncodeTypeSwitch(iv interface{}, e *encoder[T]) bool {
|
||||
var ft fastpathET[T]
|
||||
switch v := iv.(type) {
|
||||
{{range .Values}}{{if not .Primitive}}{{if not .MapKey -}}
|
||||
case []{{ .Elem }}:
|
||||
if v != nil {
|
||||
ft.{{ .MethodNamePfx "Enc" false }}V(v, e)
|
||||
} else if e.h.NilCollectionToZeroLength {
|
||||
e.e.WriteArrayEmpty()
|
||||
} else {
|
||||
e.e.EncodeNil()
|
||||
}
|
||||
case *[]{{ .Elem }}:
|
||||
if *v != nil {
|
||||
ft.{{ .MethodNamePfx "Enc" false }}V(*v, e)
|
||||
} else if e.h.NilCollectionToZeroLength {
|
||||
e.e.WriteArrayEmpty()
|
||||
} else {
|
||||
e.e.EncodeNil()
|
||||
}
|
||||
{{end}}{{end}}{{end -}}
|
||||
|
||||
{{range .Values}}{{if not .Primitive}}{{if .MapKey -}}
|
||||
case map[{{ .MapKey }}]{{ .Elem }}:
|
||||
if v != nil {
|
||||
ft.{{ .MethodNamePfx "Enc" false }}V(v, e)
|
||||
} else if e.h.NilCollectionToZeroLength {
|
||||
e.e.WriteMapEmpty()
|
||||
} else {
|
||||
e.e.EncodeNil()
|
||||
}
|
||||
case *map[{{ .MapKey }}]{{ .Elem }}:
|
||||
if *v != nil {
|
||||
ft.{{ .MethodNamePfx "Enc" false }}V(*v, e)
|
||||
} else if e.h.NilCollectionToZeroLength {
|
||||
e.e.WriteMapEmpty()
|
||||
} else {
|
||||
e.e.EncodeNil()
|
||||
}
|
||||
{{end}}{{end}}{{end -}}
|
||||
|
||||
default:
|
||||
_ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// used within codecgen, which is no longer supported
|
||||
func (f fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}X(vp *[]{{ .Elem }}, d *decoder[T]) {
|
||||
if v, changed := f.{{ .MethodNamePfx "Dec" false }}Y(*vp, d); changed { *vp = v }
|
||||
}
|
||||
|
||||
func (f fastpathDT[T]) {{ .MethodNamePfx "Dec" false }}X(vp *map[{{ .MapKey }}]{{ .Elem }}, d *decoder[T]) {
|
||||
containerLen := d.mapStart(d.d.ReadMapStart())
|
||||
if containerLen == containerLenNil {
|
||||
*vp = nil
|
||||
return
|
||||
}
|
||||
if *vp == nil {
|
||||
*vp = make(map[{{ .MapKey }}]{{ .Elem }}, decInferLen(containerLen, d.maxInitLen(), {{ .Size }}))
|
||||
}
|
||||
if containerLen != 0 {
|
||||
f.{{ .MethodNamePfx "Dec" false }}L(*vp, containerLen, d)
|
||||
}
|
||||
d.mapEnd()
|
||||
}
|
||||
|
||||
*/ -}}
|
||||
426
vendor/github.com/ugorji/go/codec/gen.go
generated
vendored
Normal file
426
vendor/github.com/ugorji/go/codec/gen.go
generated
vendored
Normal file
@@ -0,0 +1,426 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build codec.build
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
// "ugorji.net/zz"
|
||||
)
|
||||
|
||||
// ---------------------------------------------------
|
||||
|
||||
const (
|
||||
genTopLevelVarName = "x"
|
||||
|
||||
// genFastpathCanonical configures whether we support Canonical in fast path. Low savings.
|
||||
//
|
||||
// MARKER: This MUST ALWAYS BE TRUE. fastpath.go.tmpl doesn't handle it being false.
|
||||
genFastpathCanonical = true
|
||||
|
||||
// genFastpathTrimTypes configures whether we trim uncommon fastpath types.
|
||||
genFastpathTrimTypes = true
|
||||
)
|
||||
|
||||
var genFormats = []string{"Json", "Cbor", "Msgpack", "Binc", "Simple"}
|
||||
|
||||
var (
|
||||
errGenAllTypesSamePkg = errors.New("All types must be in the same package")
|
||||
errGenExpectArrayOrMap = errors.New("unexpected type - expecting array/map/slice")
|
||||
errGenUnexpectedTypeFastpath = errors.New("fastpath: unexpected type - requires map or slice")
|
||||
|
||||
// don't use base64, only 63 characters allowed in valid go identifiers
|
||||
// ie ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_
|
||||
//
|
||||
// don't use numbers, as a valid go identifer must start with a letter.
|
||||
genTypenameEnc = base32.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef")
|
||||
genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
|
||||
)
|
||||
|
||||
// --------
|
||||
|
||||
func genCheckErr(err error) {
|
||||
halt.onerror(err)
|
||||
}
|
||||
|
||||
func genTitleCaseName(s string) string {
|
||||
switch s {
|
||||
case "interface{}", "interface {}":
|
||||
return "Intf"
|
||||
case "[]byte", "[]uint8", "bytes":
|
||||
return "Bytes"
|
||||
default:
|
||||
return strings.ToUpper(s[0:1]) + s[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// --------
|
||||
|
||||
type genFastpathV struct {
|
||||
// genFastpathV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
|
||||
MapKey string
|
||||
Elem string
|
||||
Primitive string
|
||||
Size int
|
||||
NoCanonical bool
|
||||
}
|
||||
|
||||
func (x *genFastpathV) MethodNamePfx(prefix string, prim bool) string {
|
||||
var name []byte
|
||||
if prefix != "" {
|
||||
name = append(name, prefix...)
|
||||
}
|
||||
if prim {
|
||||
name = append(name, genTitleCaseName(x.Primitive)...)
|
||||
} else {
|
||||
if x.MapKey == "" {
|
||||
name = append(name, "Slice"...)
|
||||
} else {
|
||||
name = append(name, "Map"...)
|
||||
name = append(name, genTitleCaseName(x.MapKey)...)
|
||||
}
|
||||
name = append(name, genTitleCaseName(x.Elem)...)
|
||||
}
|
||||
return string(name)
|
||||
}
|
||||
|
||||
// --------
|
||||
|
||||
type genTmpl struct {
|
||||
Values []genFastpathV
|
||||
Formats []string
|
||||
}
|
||||
|
||||
func (x genTmpl) FastpathLen() (l int) {
|
||||
for _, v := range x.Values {
|
||||
// if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") {
|
||||
if v.Primitive == "" {
|
||||
l++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func genTmplZeroValue(s string) string {
|
||||
switch s {
|
||||
case "interface{}", "interface {}":
|
||||
return "nil"
|
||||
case "[]byte", "[]uint8", "bytes":
|
||||
return "nil"
|
||||
case "bool":
|
||||
return "false"
|
||||
case "string":
|
||||
return `""`
|
||||
default:
|
||||
return "0"
|
||||
}
|
||||
}
|
||||
|
||||
var genTmplNonZeroValueIdx [6]uint64
|
||||
var genTmplNonZeroValueStrs = [...][6]string{
|
||||
{`"string-is-an-interface-1"`, "true", `"some-string-1"`, `[]byte("some-string-1")`, "11.1", "111"},
|
||||
{`"string-is-an-interface-2"`, "false", `"some-string-2"`, `[]byte("some-string-2")`, "22.2", "77"},
|
||||
{`"string-is-an-interface-3"`, "true", `"some-string-3"`, `[]byte("some-string-3")`, "33.3e3", "127"},
|
||||
}
|
||||
|
||||
// Note: last numbers must be in range: 0-127 (as they may be put into a int8, uint8, etc)
|
||||
|
||||
func genTmplNonZeroValue(s string) string {
|
||||
var i int
|
||||
switch s {
|
||||
case "interface{}", "interface {}":
|
||||
i = 0
|
||||
case "bool":
|
||||
i = 1
|
||||
case "string":
|
||||
i = 2
|
||||
case "bytes", "[]byte", "[]uint8":
|
||||
i = 3
|
||||
case "float32", "float64", "float", "double", "complex", "complex64", "complex128":
|
||||
i = 4
|
||||
default:
|
||||
i = 5
|
||||
}
|
||||
genTmplNonZeroValueIdx[i]++
|
||||
idx := genTmplNonZeroValueIdx[i]
|
||||
slen := uint64(len(genTmplNonZeroValueStrs))
|
||||
return genTmplNonZeroValueStrs[idx%slen][i] // return string, to remove ambiguity
|
||||
}
|
||||
|
||||
// Note: used for fastpath only
|
||||
func genTmplEncCommandAsString(s string, vname string) string {
|
||||
switch s {
|
||||
case "uint64":
|
||||
return "e.e.EncodeUint(" + vname + ")"
|
||||
case "uint", "uint8", "uint16", "uint32":
|
||||
return "e.e.EncodeUint(uint64(" + vname + "))"
|
||||
case "int64":
|
||||
return "e.e.EncodeInt(" + vname + ")"
|
||||
case "int", "int8", "int16", "int32":
|
||||
return "e.e.EncodeInt(int64(" + vname + "))"
|
||||
case "[]byte", "[]uint8", "bytes":
|
||||
// return fmt.Sprintf(
|
||||
// "if %s != nil { e.e.EncodeStringBytesRaw(%s) } "+
|
||||
// "else if e.h.NilCollectionToZeroLength { e.e.WriteArrayEmpty() } "+
|
||||
// "else { e.e.EncodeNil() }", vname, vname)
|
||||
// return "e.e.EncodeStringBytesRaw(" + vname + ")"
|
||||
return "e.e.EncodeBytes(" + vname + ")"
|
||||
case "string":
|
||||
return "e.e.EncodeString(" + vname + ")"
|
||||
case "float32":
|
||||
return "e.e.EncodeFloat32(" + vname + ")"
|
||||
case "float64":
|
||||
return "e.e.EncodeFloat64(" + vname + ")"
|
||||
case "bool":
|
||||
return "e.e.EncodeBool(" + vname + ")"
|
||||
// case "symbol":
|
||||
// return "e.e.EncodeSymbol(" + vname + ")"
|
||||
default:
|
||||
return fmt.Sprintf("if !e.encodeBuiltin(%s) { e.encodeR(reflect.ValueOf(%s)) }", vname, vname)
|
||||
// return "e.encodeI(" + vname + ")"
|
||||
}
|
||||
}
|
||||
|
||||
// Note: used for fastpath only
|
||||
func genTmplDecCommandAsString(s string, mapkey bool) string {
|
||||
switch s {
|
||||
case "uint":
|
||||
return "uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))"
|
||||
case "uint8":
|
||||
return "uint8(chkOvf.UintV(d.d.DecodeUint64(), 8))"
|
||||
case "uint16":
|
||||
return "uint16(chkOvf.UintV(d.d.DecodeUint64(), 16))"
|
||||
case "uint32":
|
||||
return "uint32(chkOvf.UintV(d.d.DecodeUint64(), 32))"
|
||||
case "uint64":
|
||||
return "d.d.DecodeUint64()"
|
||||
case "uintptr":
|
||||
return "uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize))"
|
||||
case "int":
|
||||
return "int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize))"
|
||||
case "int8":
|
||||
return "int8(chkOvf.IntV(d.d.DecodeInt64(), 8))"
|
||||
case "int16":
|
||||
return "int16(chkOvf.IntV(d.d.DecodeInt64(), 16))"
|
||||
case "int32":
|
||||
return "int32(chkOvf.IntV(d.d.DecodeInt64(), 32))"
|
||||
case "int64":
|
||||
return "d.d.DecodeInt64()"
|
||||
|
||||
case "string":
|
||||
// if mapkey {
|
||||
// return "d.stringZC(d.d.DecodeStringAsBytes())"
|
||||
// }
|
||||
// return "string(d.d.DecodeStringAsBytes())"
|
||||
return "d.detach2Str(d.d.DecodeStringAsBytes())"
|
||||
case "[]byte", "[]uint8", "bytes":
|
||||
// return "bytesOk(d.d.DecodeBytes())"
|
||||
return "bytesOKdbi(d.decodeBytesInto(v[uint(j)], false))"
|
||||
case "float32":
|
||||
return "float32(d.d.DecodeFloat32())"
|
||||
case "float64":
|
||||
return "d.d.DecodeFloat64()"
|
||||
case "complex64":
|
||||
return "complex(d.d.DecodeFloat32(), 0)"
|
||||
case "complex128":
|
||||
return "complex(d.d.DecodeFloat64(), 0)"
|
||||
case "bool":
|
||||
return "d.d.DecodeBool()"
|
||||
default:
|
||||
halt.error(errors.New("gen internal: unknown type for decode: " + s))
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func genTmplSortType(s string, elem bool) string {
|
||||
if elem {
|
||||
return s
|
||||
}
|
||||
return s + "Slice"
|
||||
}
|
||||
|
||||
// var genTmplMu sync.Mutex
|
||||
var genTmplV = genTmpl{}
|
||||
var genTmplFuncs template.FuncMap
|
||||
var genTmplOnce sync.Once
|
||||
|
||||
func genTmplInit() {
|
||||
wordSizeBytes := int(intBitsize) / 8
|
||||
|
||||
typesizes := map[string]int{
|
||||
"interface{}": 2 * wordSizeBytes,
|
||||
"string": 2 * wordSizeBytes,
|
||||
"[]byte": 3 * wordSizeBytes,
|
||||
"uint": 1 * wordSizeBytes,
|
||||
"uint8": 1,
|
||||
"uint16": 2,
|
||||
"uint32": 4,
|
||||
"uint64": 8,
|
||||
"uintptr": 1 * wordSizeBytes,
|
||||
"int": 1 * wordSizeBytes,
|
||||
"int8": 1,
|
||||
"int16": 2,
|
||||
"int32": 4,
|
||||
"int64": 8,
|
||||
"float32": 4,
|
||||
"float64": 8,
|
||||
"complex64": 8,
|
||||
"complex128": 16,
|
||||
"bool": 1,
|
||||
}
|
||||
|
||||
// keep as slice, so it is in specific iteration order.
|
||||
// Initial order was uint64, string, interface{}, int, int64, ...
|
||||
|
||||
var types = [...]string{
|
||||
"interface{}",
|
||||
"string",
|
||||
"[]byte",
|
||||
"float32",
|
||||
"float64",
|
||||
"uint",
|
||||
"uint8",
|
||||
"uint16",
|
||||
"uint32",
|
||||
"uint64",
|
||||
"uintptr",
|
||||
"int",
|
||||
"int8",
|
||||
"int16",
|
||||
"int32",
|
||||
"int64",
|
||||
"bool",
|
||||
}
|
||||
|
||||
var primitivetypes, slicetypes, mapkeytypes, mapvaltypes []string
|
||||
|
||||
primitivetypes = types[:]
|
||||
|
||||
slicetypes = types[:]
|
||||
mapkeytypes = types[:]
|
||||
mapvaltypes = types[:]
|
||||
|
||||
if genFastpathTrimTypes {
|
||||
// Note: we only create fastpaths for commonly used types.
|
||||
// Consequently, things like int8, uint16, uint, etc are commented out.
|
||||
slicetypes = []string{
|
||||
"interface{}",
|
||||
"string",
|
||||
"[]byte",
|
||||
"float32",
|
||||
"float64",
|
||||
"uint8", // keep fastpath, so it doesn't have to go through reflection
|
||||
"uint64",
|
||||
"int",
|
||||
"int32", // rune
|
||||
"int64",
|
||||
"bool",
|
||||
}
|
||||
mapkeytypes = []string{
|
||||
"string",
|
||||
"uint8", // byte
|
||||
"uint64", // used for keys
|
||||
"int", // default number key
|
||||
"int32", // rune
|
||||
}
|
||||
mapvaltypes = []string{
|
||||
"interface{}",
|
||||
"string",
|
||||
"[]byte",
|
||||
"uint8", // byte
|
||||
"uint64", // used for keys, etc
|
||||
"int", // default number
|
||||
"int32", // rune (mostly used for unicode)
|
||||
"float64",
|
||||
"bool",
|
||||
}
|
||||
}
|
||||
|
||||
var gt = genTmpl{Formats: genFormats}
|
||||
|
||||
// For each slice or map type, there must be a (symmetrical) Encode and Decode fastpath function
|
||||
|
||||
for _, s := range primitivetypes {
|
||||
gt.Values = append(gt.Values,
|
||||
genFastpathV{Primitive: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical})
|
||||
}
|
||||
for _, s := range slicetypes {
|
||||
gt.Values = append(gt.Values,
|
||||
genFastpathV{Elem: s, Size: typesizes[s], NoCanonical: !genFastpathCanonical})
|
||||
}
|
||||
for _, s := range mapkeytypes {
|
||||
for _, ms := range mapvaltypes {
|
||||
gt.Values = append(gt.Values,
|
||||
genFastpathV{MapKey: s, Elem: ms, Size: typesizes[s] + typesizes[ms], NoCanonical: !genFastpathCanonical})
|
||||
}
|
||||
}
|
||||
|
||||
funcs := make(template.FuncMap)
|
||||
// funcs["haspfx"] = strings.HasPrefix
|
||||
funcs["encmd"] = genTmplEncCommandAsString
|
||||
funcs["decmd"] = genTmplDecCommandAsString
|
||||
funcs["zerocmd"] = genTmplZeroValue
|
||||
funcs["nonzerocmd"] = genTmplNonZeroValue
|
||||
funcs["hasprefix"] = strings.HasPrefix
|
||||
funcs["sorttype"] = genTmplSortType
|
||||
|
||||
genTmplV = gt
|
||||
genTmplFuncs = funcs
|
||||
}
|
||||
|
||||
// genTmplGoFile is used to generate source files from templates.
|
||||
func genTmplGoFile(r io.Reader, w io.Writer) (err error) {
|
||||
genTmplOnce.Do(genTmplInit)
|
||||
|
||||
gt := genTmplV
|
||||
|
||||
t := template.New("").Funcs(genTmplFuncs)
|
||||
|
||||
tmplstr, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if t, err = t.Parse(string(tmplstr)); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var out bytes.Buffer
|
||||
err = t.Execute(&out, gt)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
bout, err := format.Source(out.Bytes())
|
||||
if err != nil {
|
||||
w.Write(out.Bytes()) // write out if error, so we can still see.
|
||||
// w.Write(bout) // write out if error, as much as possible, so we can still see.
|
||||
return
|
||||
}
|
||||
w.Write(bout)
|
||||
return
|
||||
}
|
||||
|
||||
func genTmplRun2Go(fnameIn, fnameOut string) {
|
||||
// println("____ " + fnameIn + " --> " + fnameOut + " ______")
|
||||
fin, err := os.Open(fnameIn)
|
||||
genCheckErr(err)
|
||||
defer fin.Close()
|
||||
fout, err := os.Create(fnameOut)
|
||||
genCheckErr(err)
|
||||
defer fout.Close()
|
||||
err = genTmplGoFile(fin, fout)
|
||||
genCheckErr(err)
|
||||
}
|
||||
586
vendor/github.com/ugorji/go/codec/gen_mono.go
generated
vendored
Normal file
586
vendor/github.com/ugorji/go/codec/gen_mono.go
generated
vendored
Normal file
@@ -0,0 +1,586 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build codec.build
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// This tool will monomorphize types scoped to a specific format.
|
||||
//
|
||||
// This tool only monomorphized the type Name, and not a function Name.
|
||||
// Explicitly, generic functions are not supported, as they cannot be monomorphized
|
||||
// to a specific format without a corresponding name change.
|
||||
//
|
||||
// However, for types constrained to encWriter or decReader,
|
||||
// which are shared across formats, there's no place to put them without duplication.
|
||||
|
||||
const genMonoParserMode = parser.AllErrors | parser.SkipObjectResolution
|
||||
|
||||
var genMonoSpecialFieldTypes = []string{"helperDecReader"}
|
||||
|
||||
// These functions should take the address of first param when monomorphized
|
||||
var genMonoSpecialFunc4Addr = []string{} // {"decByteSlice"}
|
||||
|
||||
var genMonoImportsToSkip = []string{`"errors"`, `"fmt"`, `"net/rpc"`}
|
||||
|
||||
var genMonoRefImportsVia_ = [][2]string{
|
||||
// {"errors", "New"},
|
||||
}
|
||||
|
||||
var genMonoCallsToSkip = []string{"callMake"}
|
||||
|
||||
type genMonoFieldState uint
|
||||
|
||||
const (
|
||||
genMonoFieldRecv genMonoFieldState = iota << 1
|
||||
genMonoFieldParamsResult
|
||||
genMonoFieldStruct
|
||||
)
|
||||
|
||||
type genMonoImports struct {
|
||||
set map[string]struct{}
|
||||
specs []*ast.ImportSpec
|
||||
}
|
||||
|
||||
type genMono struct {
|
||||
files map[string][]byte
|
||||
typParam map[string]*ast.Field
|
||||
typParamTransient map[string]*ast.Field
|
||||
}
|
||||
|
||||
func (x *genMono) init() {
|
||||
x.files = make(map[string][]byte)
|
||||
x.typParam = make(map[string]*ast.Field)
|
||||
x.typParamTransient = make(map[string]*ast.Field)
|
||||
}
|
||||
|
||||
func (x *genMono) reset() {
|
||||
clear(x.typParam)
|
||||
clear(x.typParamTransient)
|
||||
}
|
||||
|
||||
func (m *genMono) hdl(hname string) {
|
||||
m.reset()
|
||||
m.do(hname, []string{"encode.go", "decode.go", hname + ".go"}, []string{"base.notfastpath.go", "base.notfastpath.notmono.go"}, "", "")
|
||||
m.do(hname, []string{"base.notfastpath.notmono.go"}, nil, ".notfastpath", ` && (notfastpath || codec.notfastpath)`)
|
||||
m.do(hname, []string{"base.fastpath.notmono.generated.go"}, []string{"base.fastpath.generated.go"}, ".fastpath", ` && !notfastpath && !codec.notfastpath`)
|
||||
}
|
||||
|
||||
func (m *genMono) do(hname string, fnames, tnames []string, fnameInfx string, buildTagsSfx string) {
|
||||
// keep m.typParams across whole call, as all others use it
|
||||
const fnameSfx = ".mono.generated.go"
|
||||
fname := hname + fnameInfx + fnameSfx
|
||||
|
||||
var imports = genMonoImports{set: make(map[string]struct{})}
|
||||
|
||||
r1, fset := m.merge(fnames, tnames, &imports)
|
||||
m.trFile(r1, hname, true)
|
||||
|
||||
r2, fset := m.merge(fnames, tnames, &imports)
|
||||
m.trFile(r2, hname, false)
|
||||
|
||||
r0 := genMonoOutInit(imports.specs, fname)
|
||||
r0.Decls = append(r0.Decls, r1.Decls...)
|
||||
r0.Decls = append(r0.Decls, r2.Decls...)
|
||||
|
||||
// output r1 to a file
|
||||
f, err := os.Create(fname)
|
||||
halt.onerror(err)
|
||||
defer f.Close()
|
||||
|
||||
var s genMonoStrBuilder
|
||||
s.s(`//go:build !notmono && !codec.notmono `).s(buildTagsSfx).s(`
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
`)
|
||||
_, err = f.Write(s.v)
|
||||
halt.onerror(err)
|
||||
err = format.Node(f, fset, r0)
|
||||
halt.onerror(err)
|
||||
}
|
||||
|
||||
func (x *genMono) file(fname string) (b []byte) {
|
||||
b = x.files[fname]
|
||||
if b == nil {
|
||||
var err error
|
||||
b, err = os.ReadFile(fname)
|
||||
halt.onerror(err)
|
||||
x.files[fname] = b
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (x *genMono) merge(fNames, tNames []string, imports *genMonoImports) (dst *ast.File, fset *token.FileSet) {
|
||||
// typParams used in fnLoadTyps
|
||||
var typParams map[string]*ast.Field
|
||||
var loadTyps bool
|
||||
fnLoadTyps := func(node ast.Node) bool {
|
||||
var ok bool
|
||||
switch n := node.(type) {
|
||||
case *ast.GenDecl:
|
||||
if n.Tok == token.TYPE {
|
||||
for _, v := range n.Specs {
|
||||
nn := v.(*ast.TypeSpec)
|
||||
ok = genMonoTypeParamsOk(nn.TypeParams)
|
||||
if ok {
|
||||
// each decl will have only 1 var/type
|
||||
typParams[nn.Name.Name] = nn.TypeParams.List[0]
|
||||
if loadTyps {
|
||||
dst.Decls = append(dst.Decls, &ast.GenDecl{Tok: n.Tok, Specs: []ast.Spec{v}})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// we only merge top-level methods and types
|
||||
fnIdX := func(n *ast.FuncDecl, n2 *ast.IndexExpr) (ok bool) {
|
||||
n9, ok9 := n2.Index.(*ast.Ident)
|
||||
n3, ok := n2.X.(*ast.Ident) // n3 = type name
|
||||
ok = ok && ok9 && n9.Name == "T"
|
||||
if ok {
|
||||
_, ok = x.typParam[n3.Name]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
fnLoadMethodsAndImports := func(node ast.Node) bool {
|
||||
var ok bool
|
||||
switch n := node.(type) {
|
||||
case *ast.FuncDecl:
|
||||
// TypeParams is nil for methods, as it is defined at the type node
|
||||
// instead, look at the name, and
|
||||
// if IndexExpr.Index=T, and IndexExpr.X matches a type name seen already
|
||||
// then ok = true
|
||||
if n.Recv == nil || len(n.Recv.List) != 1 {
|
||||
return false
|
||||
}
|
||||
ok = false
|
||||
switch nn := n.Recv.List[0].Type.(type) {
|
||||
case *ast.IndexExpr:
|
||||
ok = fnIdX(n, nn)
|
||||
case *ast.StarExpr:
|
||||
switch nn2 := nn.X.(type) {
|
||||
case *ast.IndexExpr:
|
||||
ok = fnIdX(n, nn2)
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
dst.Decls = append(dst.Decls, n)
|
||||
}
|
||||
return false
|
||||
case *ast.GenDecl:
|
||||
if n.Tok == token.IMPORT {
|
||||
for _, v := range n.Specs {
|
||||
nn := v.(*ast.ImportSpec)
|
||||
if slices.Contains(genMonoImportsToSkip, nn.Path.Value) {
|
||||
continue
|
||||
}
|
||||
if _, ok = imports.set[nn.Path.Value]; !ok {
|
||||
imports.specs = append(imports.specs, nn)
|
||||
imports.set[nn.Path.Value] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
fset = token.NewFileSet()
|
||||
fnLoadAsts := func(names []string) (asts []*ast.File) {
|
||||
for _, fname := range names {
|
||||
fsrc := x.file(fname)
|
||||
f, err := parser.ParseFile(fset, fname, fsrc, genMonoParserMode)
|
||||
halt.onerror(err)
|
||||
asts = append(asts, f)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
clear(x.typParamTransient)
|
||||
|
||||
dst = &ast.File{
|
||||
Name: &ast.Ident{Name: "codec"},
|
||||
}
|
||||
|
||||
fs := fnLoadAsts(fNames)
|
||||
ts := fnLoadAsts(tNames)
|
||||
|
||||
loadTyps = true
|
||||
typParams = x.typParam
|
||||
for _, v := range fs {
|
||||
ast.Inspect(v, fnLoadTyps)
|
||||
}
|
||||
loadTyps = false
|
||||
typParams = x.typParamTransient
|
||||
for _, v := range ts {
|
||||
ast.Inspect(v, fnLoadTyps)
|
||||
}
|
||||
typParams = nil
|
||||
for _, v := range fs {
|
||||
ast.Inspect(v, fnLoadMethodsAndImports)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (x *genMono) trFile(r *ast.File, hname string, isbytes bool) {
|
||||
fn := func(node ast.Node) bool {
|
||||
switch n := node.(type) {
|
||||
case *ast.TypeSpec:
|
||||
// type x[T encDriver] struct { ... }
|
||||
if !genMonoTypeParamsOk(n.TypeParams) {
|
||||
return false
|
||||
}
|
||||
x.trType(n, hname, isbytes)
|
||||
return false
|
||||
case *ast.FuncDecl:
|
||||
if n.Recv == nil || len(n.Recv.List) != 1 {
|
||||
return false
|
||||
}
|
||||
if _, ok := n.Recv.List[0].Type.(*ast.Ident); ok {
|
||||
return false
|
||||
}
|
||||
tp := x.trMethodSign(n, hname, isbytes) // receiver, params, results
|
||||
// handle the body
|
||||
x.trMethodBody(n.Body, tp, hname, isbytes)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
ast.Inspect(r, fn)
|
||||
|
||||
// set type params to nil, and Pos to NoPos
|
||||
fn = func(node ast.Node) bool {
|
||||
switch n := node.(type) {
|
||||
case *ast.FuncType:
|
||||
if genMonoTypeParamsOk(n.TypeParams) {
|
||||
n.TypeParams = nil
|
||||
}
|
||||
case *ast.TypeSpec: // for type ...
|
||||
if genMonoTypeParamsOk(n.TypeParams) {
|
||||
n.TypeParams = nil
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
ast.Inspect(r, fn)
|
||||
}
|
||||
|
||||
func (x *genMono) trType(n *ast.TypeSpec, hname string, isbytes bool) {
|
||||
sfx, _, _, hnameUp := genMonoIsBytesVals(hname, isbytes)
|
||||
tp := n.TypeParams.List[0]
|
||||
switch tp.Type.(*ast.Ident).Name {
|
||||
case "encDriver", "decDriver":
|
||||
n.Name.Name += hnameUp + sfx
|
||||
case "encWriter", "decReader":
|
||||
n.Name.Name += sfx
|
||||
}
|
||||
|
||||
// handle the Struct and Array types
|
||||
switch nn := n.Type.(type) {
|
||||
case *ast.StructType:
|
||||
x.trStruct(nn, tp, hname, isbytes)
|
||||
case *ast.ArrayType:
|
||||
x.trArray(nn, tp, hname, isbytes)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *genMono) trMethodSign(n *ast.FuncDecl, hname string, isbytes bool) (tp *ast.Field) {
|
||||
// check if recv type is not parameterized
|
||||
tp = x.trField(n.Recv.List[0], nil, hname, isbytes, genMonoFieldRecv)
|
||||
// handle params and results
|
||||
x.trMethodSignNonRecv(n.Type.Params, tp, hname, isbytes)
|
||||
x.trMethodSignNonRecv(n.Type.Results, tp, hname, isbytes)
|
||||
return
|
||||
}
|
||||
|
||||
func (x *genMono) trMethodSignNonRecv(r *ast.FieldList, tp *ast.Field, hname string, isbytes bool) {
|
||||
if r == nil || len(r.List) == 0 {
|
||||
return
|
||||
}
|
||||
for _, v := range r.List {
|
||||
x.trField(v, tp, hname, isbytes, genMonoFieldParamsResult)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *genMono) trStruct(r *ast.StructType, tp *ast.Field, hname string, isbytes bool) {
|
||||
// search for fields, and update accordingly
|
||||
// type x[T encDriver] struct { w T }
|
||||
// var x *A[T]
|
||||
// A[T]
|
||||
if r == nil || r.Fields == nil || len(r.Fields.List) == 0 {
|
||||
return
|
||||
}
|
||||
for _, v := range r.Fields.List {
|
||||
x.trField(v, tp, hname, isbytes, genMonoFieldStruct)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *genMono) trArray(n *ast.ArrayType, tp *ast.Field, hname string, isbytes bool) {
|
||||
sfx, _, _, hnameUp := genMonoIsBytesVals(hname, isbytes)
|
||||
// type fastpathEs[T encDriver] [56]fastpathE[T]
|
||||
// p := tp.Names[0].Name
|
||||
switch elt := n.Elt.(type) {
|
||||
// case *ast.InterfaceType:
|
||||
case *ast.IndexExpr:
|
||||
if elt.Index.(*ast.Ident).Name == "T" { // generic
|
||||
n.Elt = ast.NewIdent(elt.X.(*ast.Ident).Name + hnameUp + sfx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (x *genMono) trMethodBody(r *ast.BlockStmt, tp *ast.Field, hname string, isbytes bool) {
|
||||
// find the parent node for an indexExpr, or a T/*T, and set the value back in there
|
||||
|
||||
fn := func(pnode ast.Node) bool {
|
||||
var pn *ast.Ident
|
||||
fnUp := func() {
|
||||
x.updateIdentForT(pn, hname, tp, isbytes, false)
|
||||
}
|
||||
switch n := pnode.(type) {
|
||||
// case *ast.SelectorExpr:
|
||||
// case *ast.TypeAssertExpr:
|
||||
// case *ast.IndexExpr:
|
||||
case *ast.StarExpr:
|
||||
if genMonoUpdateIndexExprT(&pn, n.X) {
|
||||
n.X = pn
|
||||
fnUp()
|
||||
}
|
||||
case *ast.CallExpr:
|
||||
for i4, n4 := range n.Args {
|
||||
if genMonoUpdateIndexExprT(&pn, n4) {
|
||||
n.Args[i4] = pn
|
||||
fnUp()
|
||||
}
|
||||
}
|
||||
if n4, ok4 := n.Fun.(*ast.Ident); ok4 && slices.Contains(genMonoSpecialFunc4Addr, n4.Name) {
|
||||
n.Args[0] = &ast.UnaryExpr{Op: token.AND, X: n.Args[0].(*ast.SelectorExpr)}
|
||||
}
|
||||
case *ast.CompositeLit:
|
||||
if genMonoUpdateIndexExprT(&pn, n.Type) {
|
||||
n.Type = pn
|
||||
fnUp()
|
||||
}
|
||||
case *ast.ArrayType:
|
||||
if genMonoUpdateIndexExprT(&pn, n.Elt) {
|
||||
n.Elt = pn
|
||||
fnUp()
|
||||
}
|
||||
case *ast.ValueSpec:
|
||||
for i2, n2 := range n.Values {
|
||||
if genMonoUpdateIndexExprT(&pn, n2) {
|
||||
n.Values[i2] = pn
|
||||
fnUp()
|
||||
}
|
||||
}
|
||||
if genMonoUpdateIndexExprT(&pn, n.Type) {
|
||||
n.Type = pn
|
||||
fnUp()
|
||||
}
|
||||
case *ast.BinaryExpr:
|
||||
// early return here, since the 2 things can apply
|
||||
if genMonoUpdateIndexExprT(&pn, n.X) {
|
||||
n.X = pn
|
||||
fnUp()
|
||||
}
|
||||
if genMonoUpdateIndexExprT(&pn, n.Y) {
|
||||
n.Y = pn
|
||||
fnUp()
|
||||
}
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
ast.Inspect(r, fn)
|
||||
}
|
||||
|
||||
func (x *genMono) trField(f *ast.Field, tpt *ast.Field, hname string, isbytes bool, state genMonoFieldState) (tp *ast.Field) {
|
||||
var pn *ast.Ident
|
||||
switch nn := f.Type.(type) {
|
||||
case *ast.IndexExpr:
|
||||
if genMonoUpdateIndexExprT(&pn, nn) {
|
||||
f.Type = pn
|
||||
}
|
||||
case *ast.StarExpr:
|
||||
if genMonoUpdateIndexExprT(&pn, nn.X) {
|
||||
nn.X = pn
|
||||
}
|
||||
case *ast.FuncType:
|
||||
x.trMethodSignNonRecv(nn.Params, tpt, hname, isbytes)
|
||||
x.trMethodSignNonRecv(nn.Results, tpt, hname, isbytes)
|
||||
return
|
||||
case *ast.ArrayType:
|
||||
x.trArray(nn, tpt, hname, isbytes)
|
||||
return
|
||||
case *ast.Ident:
|
||||
if state == genMonoFieldRecv || nn.Name != "T" {
|
||||
return
|
||||
}
|
||||
pn = nn // "T"
|
||||
if state == genMonoFieldParamsResult {
|
||||
f.Type = &ast.StarExpr{X: pn}
|
||||
}
|
||||
}
|
||||
if pn == nil {
|
||||
return
|
||||
}
|
||||
|
||||
tp = x.updateIdentForT(pn, hname, tpt, isbytes, true)
|
||||
return
|
||||
}
|
||||
|
||||
func (x *genMono) updateIdentForT(pn *ast.Ident, hname string, tp *ast.Field,
|
||||
isbytes bool, lookupTP bool) (tp2 *ast.Field) {
|
||||
sfx, writer, reader, hnameUp := genMonoIsBytesVals(hname, isbytes)
|
||||
// handle special ones e.g. helperDecReader et al
|
||||
if slices.Contains(genMonoSpecialFieldTypes, pn.Name) {
|
||||
pn.Name += sfx
|
||||
return
|
||||
}
|
||||
|
||||
if pn.Name != "T" && lookupTP {
|
||||
tp = x.typParam[pn.Name]
|
||||
if tp == nil {
|
||||
tp = x.typParamTransient[pn.Name]
|
||||
}
|
||||
}
|
||||
|
||||
paramtyp := tp.Type.(*ast.Ident).Name
|
||||
if pn.Name == "T" {
|
||||
switch paramtyp {
|
||||
case "encDriver", "decDriver":
|
||||
pn.Name = hname + genMonoTitleCase(paramtyp) + sfx
|
||||
case "encWriter":
|
||||
pn.Name = writer
|
||||
case "decReader":
|
||||
pn.Name = reader
|
||||
}
|
||||
} else {
|
||||
switch paramtyp {
|
||||
case "encDriver", "decDriver":
|
||||
pn.Name += hnameUp + sfx
|
||||
case "encWriter", "decReader":
|
||||
pn.Name += sfx
|
||||
}
|
||||
}
|
||||
return tp
|
||||
}
|
||||
|
||||
func genMonoUpdateIndexExprT(pn **ast.Ident, node ast.Node) (pnok bool) {
|
||||
*pn = nil
|
||||
if n2, ok := node.(*ast.IndexExpr); ok {
|
||||
n9, ok9 := n2.Index.(*ast.Ident)
|
||||
n3, ok := n2.X.(*ast.Ident)
|
||||
if ok && ok9 && n9.Name == "T" {
|
||||
*pn, pnok = ast.NewIdent(n3.Name), true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func genMonoTitleCase(s string) string {
|
||||
return strings.ToUpper(s[:1]) + s[1:]
|
||||
}
|
||||
|
||||
func genMonoIsBytesVals(hName string, isbytes bool) (suffix, writer, reader, hNameUp string) {
|
||||
hNameUp = genMonoTitleCase(hName)
|
||||
if isbytes {
|
||||
return "Bytes", "bytesEncAppender", "bytesDecReader", hNameUp
|
||||
}
|
||||
return "IO", "bufioEncWriter", "ioDecReader", hNameUp
|
||||
}
|
||||
|
||||
func genMonoTypeParamsOk(v *ast.FieldList) (ok bool) {
|
||||
if v == nil || v.List == nil || len(v.List) != 1 {
|
||||
return false
|
||||
}
|
||||
pn := v.List[0]
|
||||
if len(pn.Names) != 1 {
|
||||
return false
|
||||
}
|
||||
pnName := pn.Names[0].Name
|
||||
if pnName != "T" {
|
||||
return false
|
||||
}
|
||||
// ignore any nodes which are not idents e.g. cmp.orderedRv
|
||||
vv, ok := pn.Type.(*ast.Ident)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
switch vv.Name {
|
||||
case "encDriver", "decDriver", "encWriter", "decReader":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func genMonoCopy(src *ast.File) (dst *ast.File) {
|
||||
dst = &ast.File{
|
||||
Name: &ast.Ident{Name: "codec"},
|
||||
}
|
||||
dst.Decls = append(dst.Decls, src.Decls...)
|
||||
return
|
||||
}
|
||||
|
||||
type genMonoStrBuilder struct {
|
||||
v []byte
|
||||
}
|
||||
|
||||
func (x *genMonoStrBuilder) s(v string) *genMonoStrBuilder {
|
||||
x.v = append(x.v, v...)
|
||||
return x
|
||||
}
|
||||
|
||||
func genMonoOutInit(importSpecs []*ast.ImportSpec, fname string) (f *ast.File) {
|
||||
// ParseFile seems to skip the //go:build stanza
|
||||
// it should be written directly into the file
|
||||
var s genMonoStrBuilder
|
||||
s.s(`
|
||||
package codec
|
||||
|
||||
import (
|
||||
`)
|
||||
for _, v := range importSpecs {
|
||||
s.s("\t").s(v.Path.Value).s("\n")
|
||||
}
|
||||
s.s(")\n")
|
||||
for _, v := range genMonoRefImportsVia_ {
|
||||
s.s("var _ = ").s(v[0]).s(".").s(v[1]).s("\n")
|
||||
}
|
||||
f, err := parser.ParseFile(token.NewFileSet(), fname, s.v, genMonoParserMode)
|
||||
halt.onerror(err)
|
||||
return
|
||||
}
|
||||
|
||||
func genMonoAll() {
|
||||
// hdls := []Handle{
|
||||
// (*SimpleHandle)(nil),
|
||||
// (*JsonHandle)(nil),
|
||||
// (*CborHandle)(nil),
|
||||
// (*BincHandle)(nil),
|
||||
// (*MsgpackHandle)(nil),
|
||||
// }
|
||||
hdls := []string{"simple", "json", "cbor", "binc", "msgpack"}
|
||||
var m genMono
|
||||
m.init()
|
||||
for _, v := range hdls {
|
||||
m.hdl(v)
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/ugorji/go/codec/goversion_check_supported.go
generated
vendored
Normal file
20
vendor/github.com/ugorji/go/codec/goversion_check_supported.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build !go1.21
|
||||
|
||||
package codec
|
||||
|
||||
import "errors"
|
||||
|
||||
// Moving forward, this codec package will support at least the last 4 major Go releases.
|
||||
//
|
||||
// As of early summer 2025, codec will support go 1.21, 1.22, 1.23, 1.24 releases of go.
|
||||
// This allows use of the followin:
|
||||
// - stabilized generics
|
||||
// - min/max/clear
|
||||
// - slice->array conversion
|
||||
|
||||
func init() {
|
||||
panic(errors.New("codec: supports go 1.21 and above only"))
|
||||
}
|
||||
16
vendor/github.com/ugorji/go/codec/goversion_noswissmap_unsafe.go
generated
vendored
Normal file
16
vendor/github.com/ugorji/go/codec/goversion_noswissmap_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build !safe && !codec.safe && !appengine && !go1.24
|
||||
|
||||
package codec
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// retrofited from hIter struct
|
||||
|
||||
type unsafeMapIterPadding struct {
|
||||
_ [6]unsafe.Pointer // padding: *maptype, *hmap, buckets, *bmap, overflow, oldoverflow,
|
||||
_ [4]uintptr // padding: uintptr, uint8, bool fields
|
||||
_ uintptr // padding: wasted (try to fill cache-line at multiple of 4)
|
||||
}
|
||||
15
vendor/github.com/ugorji/go/codec/goversion_swissmap_unsafe.go
generated
vendored
Normal file
15
vendor/github.com/ugorji/go/codec/goversion_swissmap_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build !safe && !codec.safe && !appengine && go1.24
|
||||
|
||||
package codec
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// retrofited from linknameIter struct (compatibility layer for swissmaps)
|
||||
|
||||
type unsafeMapIterPadding struct {
|
||||
_ [2]unsafe.Pointer // padding: *abi.SwissMapType, *maps.Iter
|
||||
_ uintptr // padding: wasted (try to fill cache-line at multiple of 4)
|
||||
}
|
||||
3278
vendor/github.com/ugorji/go/codec/helper.go
generated
vendored
Normal file
3278
vendor/github.com/ugorji/go/codec/helper.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
0
vendor/github.com/ugorji/go/codec/helper.s
generated
vendored
Normal file
0
vendor/github.com/ugorji/go/codec/helper.s
generated
vendored
Normal file
706
vendor/github.com/ugorji/go/codec/helper_notunsafe.go
generated
vendored
Normal file
706
vendor/github.com/ugorji/go/codec/helper_notunsafe.go
generated
vendored
Normal file
@@ -0,0 +1,706 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build !go1.9 || safe || codec.safe || appengine
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
// "hash/adler32"
|
||||
"math"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This file has safe variants of some helper functions.
|
||||
// MARKER: See helper_unsafe.go for the usage documentation.
|
||||
|
||||
const safeMode = true
|
||||
|
||||
func isTransientType4Size(size uint32) bool { return true }
|
||||
|
||||
type mapReqParams struct{}
|
||||
|
||||
func getMapReqParams(ti *typeInfo) (r mapReqParams) { return }
|
||||
|
||||
func byteAt(b []byte, index uint) byte {
|
||||
return b[index]
|
||||
}
|
||||
|
||||
func setByteAt(b []byte, index uint, val byte) {
|
||||
b[index] = val
|
||||
}
|
||||
|
||||
func stringView(v []byte) string {
|
||||
return string(v)
|
||||
}
|
||||
|
||||
func bytesView(v string) []byte {
|
||||
return []byte(v)
|
||||
}
|
||||
|
||||
func byteSliceSameData(v1 []byte, v2 []byte) bool {
|
||||
return cap(v1) != 0 && cap(v2) != 0 && &(v1[:1][0]) == &(v2[:1][0])
|
||||
}
|
||||
|
||||
func isNil(v interface{}, checkPtr bool) (rv reflect.Value, b bool) {
|
||||
b = v == nil
|
||||
if b || !checkPtr {
|
||||
return
|
||||
}
|
||||
rv = reflect.ValueOf(v)
|
||||
if rv.Kind() == reflect.Ptr {
|
||||
b = rv.IsNil()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ptrToLowLevel(v interface{}) interface{} {
|
||||
return v
|
||||
}
|
||||
|
||||
func lowLevelToPtr[T any](v interface{}) *T {
|
||||
return v.(*T)
|
||||
}
|
||||
|
||||
func eq4i(i0, i1 interface{}) bool {
|
||||
return i0 == i1
|
||||
}
|
||||
|
||||
func rv4iptr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
|
||||
func rv4istr(i interface{}) reflect.Value { return reflect.ValueOf(i) }
|
||||
|
||||
func rv2i(rv reflect.Value) interface{} {
|
||||
if rv.IsValid() {
|
||||
return rv.Interface()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rvAddr(rv reflect.Value, ptrType reflect.Type) reflect.Value {
|
||||
return rv.Addr()
|
||||
}
|
||||
|
||||
func rvPtrIsNil(rv reflect.Value) bool {
|
||||
return rv.IsNil()
|
||||
}
|
||||
|
||||
func rvIsNil(rv reflect.Value) bool {
|
||||
return rv.IsNil()
|
||||
}
|
||||
|
||||
func rvSetSliceLen(rv reflect.Value, length int) {
|
||||
rv.SetLen(length)
|
||||
}
|
||||
|
||||
func rvZeroAddrK(t reflect.Type, k reflect.Kind) reflect.Value {
|
||||
return reflect.New(t).Elem()
|
||||
}
|
||||
|
||||
func rvZeroK(t reflect.Type, k reflect.Kind) reflect.Value {
|
||||
return reflect.Zero(t)
|
||||
}
|
||||
|
||||
func rvConvert(v reflect.Value, t reflect.Type) (rv reflect.Value) {
|
||||
// Note that reflect.Value.Convert(...) will make a copy if it is addressable.
|
||||
// Since we decode into the passed value, we must try to convert the addressable value..
|
||||
if v.CanAddr() {
|
||||
return v.Addr().Convert(reflect.PtrTo(t)).Elem()
|
||||
}
|
||||
return v.Convert(t)
|
||||
}
|
||||
|
||||
func rt2id(rt reflect.Type) uintptr {
|
||||
return reflect.ValueOf(rt).Pointer()
|
||||
}
|
||||
|
||||
func i2rtid(i interface{}) uintptr {
|
||||
return reflect.ValueOf(reflect.TypeOf(i)).Pointer()
|
||||
}
|
||||
|
||||
// --------------------------
|
||||
|
||||
// is this an empty interface/ptr/struct/map/slice/chan/array
|
||||
func isEmptyContainerValue(v reflect.Value, tinfos *TypeInfos, recursive bool) (empty bool) {
|
||||
switch v.Kind() {
|
||||
case reflect.Array:
|
||||
for i, vlen := 0, v.Len(); i < vlen; i++ {
|
||||
if !isEmptyValue(v.Index(i), tinfos, false) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Map, reflect.Slice, reflect.Chan:
|
||||
return v.IsNil() || v.Len() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
empty = v.IsNil()
|
||||
if recursive && !empty {
|
||||
return isEmptyValue(v.Elem(), tinfos, recursive)
|
||||
}
|
||||
return empty
|
||||
case reflect.Struct:
|
||||
return isEmptyStruct(v, tinfos, recursive)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmptyValue(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
case reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Array:
|
||||
// zero := reflect.Zero(v.Type().Elem())
|
||||
// can I just check if the whole value is equal to zeros? seems not.
|
||||
// can I just check if the whole value is equal to its zero value? no.
|
||||
// Well, then we check if each value is empty without recursive.
|
||||
for i, vlen := 0, v.Len(); i < vlen; i++ {
|
||||
if !isEmptyValue(v.Index(i), tinfos, false) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Map, reflect.Slice, reflect.Chan:
|
||||
return v.IsNil() || v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
c := v.Complex()
|
||||
return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Func, reflect.UnsafePointer:
|
||||
return v.IsNil()
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
isnil := v.IsNil()
|
||||
if recursive && !isnil {
|
||||
return isEmptyValue(v.Elem(), tinfos, recursive)
|
||||
}
|
||||
return isnil
|
||||
case reflect.Struct:
|
||||
return isEmptyStruct(v, tinfos, recursive)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
|
||||
// - does it implement IsZero() bool
|
||||
// - is it comparable, and can i compare directly using ==
|
||||
// - if checkStruct, then walk through the encodable fields
|
||||
// and check if they are empty or not.
|
||||
func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, recursive bool) bool {
|
||||
// v is a struct kind - no need to check again.
|
||||
// We only check isZero on a struct kind, to reduce the amount of times
|
||||
// that we lookup the rtid and typeInfo for each type as we walk the tree.
|
||||
|
||||
vt := v.Type()
|
||||
rtid := rt2id(vt)
|
||||
if tinfos == nil {
|
||||
tinfos = defTypeInfos
|
||||
}
|
||||
ti := tinfos.get(rtid, vt)
|
||||
if ti.rtid == timeTypId {
|
||||
return rv2i(v).(time.Time).IsZero()
|
||||
}
|
||||
if ti.flagIsZeroer {
|
||||
return rv2i(v).(isZeroer).IsZero()
|
||||
}
|
||||
if ti.flagIsZeroerPtr && v.CanAddr() {
|
||||
return rv2i(v.Addr()).(isZeroer).IsZero()
|
||||
}
|
||||
if ti.flagIsCodecEmptyer {
|
||||
return rv2i(v).(isCodecEmptyer).IsCodecEmpty()
|
||||
}
|
||||
if ti.flagIsCodecEmptyerPtr && v.CanAddr() {
|
||||
return rv2i(v.Addr()).(isCodecEmptyer).IsCodecEmpty()
|
||||
}
|
||||
if ti.flagComparable {
|
||||
return rv2i(v) == rv2i(rvZeroK(vt, reflect.Struct))
|
||||
}
|
||||
if !recursive {
|
||||
return false
|
||||
}
|
||||
// We only care about what we can encode/decode,
|
||||
// so that is what we use to check omitEmpty.
|
||||
for _, si := range ti.sfi.source() {
|
||||
sfv := si.fieldNoAlloc(v, true)
|
||||
if sfv.IsValid() && !isEmptyValue(sfv, tinfos, recursive) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func makeMapReflect(t reflect.Type, size int) reflect.Value {
|
||||
return reflect.MakeMapWithSize(t, size)
|
||||
}
|
||||
|
||||
// --------------------------
|
||||
|
||||
type perTypeElem struct {
|
||||
t reflect.Type
|
||||
rtid uintptr
|
||||
zero reflect.Value
|
||||
addr [2]reflect.Value
|
||||
}
|
||||
|
||||
func (x *perTypeElem) get(index uint8) (v reflect.Value) {
|
||||
v = x.addr[index%2]
|
||||
if v.IsValid() {
|
||||
v.Set(x.zero)
|
||||
} else {
|
||||
v = reflect.New(x.t).Elem()
|
||||
x.addr[index%2] = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type perType struct {
|
||||
v []perTypeElem
|
||||
}
|
||||
|
||||
type decPerType = perType
|
||||
|
||||
type encPerType = perType
|
||||
|
||||
func (x *perType) elem(t reflect.Type) *perTypeElem {
|
||||
rtid := rt2id(t)
|
||||
var h, i uint
|
||||
var j = uint(len(x.v))
|
||||
LOOP:
|
||||
if i < j {
|
||||
h = (i + j) >> 1 // avoid overflow when computing h // h = i + (j-i)/2
|
||||
if x.v[h].rtid < rtid {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
goto LOOP
|
||||
}
|
||||
if i < uint(len(x.v)) {
|
||||
if x.v[i].rtid != rtid {
|
||||
x.v = append(x.v, perTypeElem{})
|
||||
copy(x.v[i+1:], x.v[i:])
|
||||
x.v[i] = perTypeElem{t: t, rtid: rtid, zero: reflect.Zero(t)}
|
||||
}
|
||||
} else {
|
||||
x.v = append(x.v, perTypeElem{t: t, rtid: rtid, zero: reflect.Zero(t)})
|
||||
}
|
||||
return &x.v[i]
|
||||
}
|
||||
|
||||
func (x *perType) TransientAddrK(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
|
||||
return x.elem(t).get(0)
|
||||
}
|
||||
|
||||
func (x *perType) TransientAddr2K(t reflect.Type, k reflect.Kind) (rv reflect.Value) {
|
||||
return x.elem(t).get(1)
|
||||
}
|
||||
|
||||
func (x *perType) AddressableRO(v reflect.Value) (rv reflect.Value) {
|
||||
rv = x.elem(v.Type()).get(0)
|
||||
rvSetDirect(rv, v)
|
||||
return
|
||||
}
|
||||
|
||||
// --------------------------
|
||||
type mapIter struct {
|
||||
t *reflect.MapIter
|
||||
m reflect.Value
|
||||
values bool
|
||||
}
|
||||
|
||||
func (t *mapIter) Next() (r bool) {
|
||||
return t.t.Next()
|
||||
}
|
||||
|
||||
func (t *mapIter) Key() reflect.Value {
|
||||
return t.t.Key()
|
||||
}
|
||||
|
||||
func (t *mapIter) Value() (r reflect.Value) {
|
||||
if t.values {
|
||||
return t.t.Value()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *mapIter) Done() {}
|
||||
|
||||
func mapRange(t *mapIter, m, k, v reflect.Value, values bool) {
|
||||
*t = mapIter{
|
||||
m: m,
|
||||
t: m.MapRange(),
|
||||
values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------
|
||||
type structFieldInfos struct {
|
||||
c []*structFieldInfo
|
||||
s []*structFieldInfo
|
||||
t uint8To32TrieNode
|
||||
// byName map[string]*structFieldInfo // find sfi given a name
|
||||
}
|
||||
|
||||
func (x *structFieldInfos) load(source, sorted []*structFieldInfo) {
|
||||
x.c = source
|
||||
x.s = sorted
|
||||
}
|
||||
|
||||
// func (x *structFieldInfos) count() int { return len(x.c) }
|
||||
func (x *structFieldInfos) source() (v []*structFieldInfo) { return x.c }
|
||||
func (x *structFieldInfos) sorted() (v []*structFieldInfo) { return x.s }
|
||||
|
||||
// --------------------------
|
||||
|
||||
type uint8To32TrieNodeNoKids struct {
|
||||
key uint8
|
||||
valid bool // the value marks the end of a full stored string
|
||||
_ [2]byte // padding
|
||||
value uint32
|
||||
}
|
||||
|
||||
type uint8To32TrieNodeKids = []uint8To32TrieNode
|
||||
|
||||
func (x *uint8To32TrieNode) setKids(kids []uint8To32TrieNode) { x.kids = kids }
|
||||
func (x *uint8To32TrieNode) getKids() []uint8To32TrieNode { return x.kids }
|
||||
func (x *uint8To32TrieNode) truncKids() { x.kids = x.kids[:0] } // set len to 0
|
||||
|
||||
// --------------------------
|
||||
func (n *fauxUnion) ru() reflect.Value {
|
||||
return reflect.ValueOf(&n.u).Elem()
|
||||
}
|
||||
func (n *fauxUnion) ri() reflect.Value {
|
||||
return reflect.ValueOf(&n.i).Elem()
|
||||
}
|
||||
func (n *fauxUnion) rf() reflect.Value {
|
||||
return reflect.ValueOf(&n.f).Elem()
|
||||
}
|
||||
func (n *fauxUnion) rl() reflect.Value {
|
||||
return reflect.ValueOf(&n.l).Elem()
|
||||
}
|
||||
func (n *fauxUnion) rs() reflect.Value {
|
||||
return reflect.ValueOf(&n.s).Elem()
|
||||
}
|
||||
func (n *fauxUnion) rt() reflect.Value {
|
||||
return reflect.ValueOf(&n.t).Elem()
|
||||
}
|
||||
func (n *fauxUnion) rb() reflect.Value {
|
||||
return reflect.ValueOf(&n.b).Elem()
|
||||
}
|
||||
|
||||
// --------------------------
|
||||
func rvSetBytes(rv reflect.Value, v []byte) {
|
||||
rv.SetBytes(v)
|
||||
}
|
||||
|
||||
func rvSetString(rv reflect.Value, v string) {
|
||||
rv.SetString(v)
|
||||
}
|
||||
|
||||
func rvSetBool(rv reflect.Value, v bool) {
|
||||
rv.SetBool(v)
|
||||
}
|
||||
|
||||
func rvSetTime(rv reflect.Value, v time.Time) {
|
||||
rv.Set(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
func rvSetFloat32(rv reflect.Value, v float32) {
|
||||
rv.SetFloat(float64(v))
|
||||
}
|
||||
|
||||
func rvSetFloat64(rv reflect.Value, v float64) {
|
||||
rv.SetFloat(v)
|
||||
}
|
||||
|
||||
func rvSetComplex64(rv reflect.Value, v complex64) {
|
||||
rv.SetComplex(complex128(v))
|
||||
}
|
||||
|
||||
func rvSetComplex128(rv reflect.Value, v complex128) {
|
||||
rv.SetComplex(v)
|
||||
}
|
||||
|
||||
func rvSetInt(rv reflect.Value, v int) {
|
||||
rv.SetInt(int64(v))
|
||||
}
|
||||
|
||||
func rvSetInt8(rv reflect.Value, v int8) {
|
||||
rv.SetInt(int64(v))
|
||||
}
|
||||
|
||||
func rvSetInt16(rv reflect.Value, v int16) {
|
||||
rv.SetInt(int64(v))
|
||||
}
|
||||
|
||||
func rvSetInt32(rv reflect.Value, v int32) {
|
||||
rv.SetInt(int64(v))
|
||||
}
|
||||
|
||||
func rvSetInt64(rv reflect.Value, v int64) {
|
||||
rv.SetInt(v)
|
||||
}
|
||||
|
||||
func rvSetUint(rv reflect.Value, v uint) {
|
||||
rv.SetUint(uint64(v))
|
||||
}
|
||||
|
||||
func rvSetUintptr(rv reflect.Value, v uintptr) {
|
||||
rv.SetUint(uint64(v))
|
||||
}
|
||||
|
||||
func rvSetUint8(rv reflect.Value, v uint8) {
|
||||
rv.SetUint(uint64(v))
|
||||
}
|
||||
|
||||
func rvSetUint16(rv reflect.Value, v uint16) {
|
||||
rv.SetUint(uint64(v))
|
||||
}
|
||||
|
||||
func rvSetUint32(rv reflect.Value, v uint32) {
|
||||
rv.SetUint(uint64(v))
|
||||
}
|
||||
|
||||
func rvSetUint64(rv reflect.Value, v uint64) {
|
||||
rv.SetUint(v)
|
||||
}
|
||||
|
||||
// ----------------
|
||||
|
||||
func rvSetDirect(rv reflect.Value, v reflect.Value) {
|
||||
rv.Set(v)
|
||||
}
|
||||
|
||||
func rvSetDirectZero(rv reflect.Value) {
|
||||
rv.Set(reflect.Zero(rv.Type()))
|
||||
}
|
||||
|
||||
// func rvSet(rv reflect.Value, v reflect.Value) {
|
||||
// rv.Set(v)
|
||||
// }
|
||||
|
||||
func rvSetIntf(rv reflect.Value, v reflect.Value) {
|
||||
rv.Set(v)
|
||||
}
|
||||
|
||||
func rvSetZero(rv reflect.Value) {
|
||||
rv.Set(reflect.Zero(rv.Type()))
|
||||
}
|
||||
|
||||
func rvSlice(rv reflect.Value, length int) reflect.Value {
|
||||
return rv.Slice(0, length)
|
||||
}
|
||||
|
||||
func rvMakeSlice(rv reflect.Value, ti *typeInfo, xlen, xcap int) (v reflect.Value, set bool) {
|
||||
v = reflect.MakeSlice(ti.rt, xlen, xcap)
|
||||
if rv.Len() > 0 {
|
||||
reflect.Copy(v, rv)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func rvGrowSlice(rv reflect.Value, ti *typeInfo, cap, incr int) (v reflect.Value, newcap int, set bool) {
|
||||
newcap = int(growCap(uint(cap), uint(ti.elemsize), uint(incr)))
|
||||
v = reflect.MakeSlice(ti.rt, newcap, newcap)
|
||||
if rv.Len() > 0 {
|
||||
reflect.Copy(v, rv)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ----------------
|
||||
|
||||
func rvArrayIndex(rv reflect.Value, i int, _ *typeInfo, _ bool) reflect.Value {
|
||||
return rv.Index(i)
|
||||
}
|
||||
|
||||
// func rvArrayIndex(rv reflect.Value, i int, ti *typeInfo) reflect.Value {
|
||||
// return rv.Index(i)
|
||||
// }
|
||||
|
||||
func rvSliceZeroCap(t reflect.Type) (v reflect.Value) {
|
||||
return reflect.MakeSlice(t, 0, 0)
|
||||
}
|
||||
|
||||
func rvLenSlice(rv reflect.Value) int {
|
||||
return rv.Len()
|
||||
}
|
||||
|
||||
func rvCapSlice(rv reflect.Value) int {
|
||||
return rv.Cap()
|
||||
}
|
||||
|
||||
func rvGetArrayBytes(rv reflect.Value, scratch []byte) (bs []byte) {
|
||||
l := rv.Len()
|
||||
if scratch == nil && rv.CanAddr() {
|
||||
return rv.Slice(0, l).Bytes()
|
||||
}
|
||||
|
||||
if l <= cap(scratch) {
|
||||
bs = scratch[:l]
|
||||
} else {
|
||||
bs = make([]byte, l)
|
||||
}
|
||||
reflect.Copy(reflect.ValueOf(bs), rv)
|
||||
return
|
||||
}
|
||||
|
||||
func rvGetArray4Slice(rv reflect.Value) (v reflect.Value) {
|
||||
v = rvZeroAddrK(reflect.ArrayOf(rvLenSlice(rv), rv.Type().Elem()), reflect.Array)
|
||||
reflect.Copy(v, rv)
|
||||
return
|
||||
}
|
||||
|
||||
func rvGetSlice4Array(rv reflect.Value, v interface{}) {
|
||||
// v is a pointer to a slice to be populated
|
||||
|
||||
// rv.Slice fails if address is not addressable, which can occur during encoding.
|
||||
// Consequently, check if non-addressable, and if so, make new slice and copy into it first.
|
||||
// MARKER: this *may* cause allocation if non-addressable, unfortunately.
|
||||
|
||||
rve := reflect.ValueOf(v).Elem()
|
||||
l := rv.Len()
|
||||
if rv.CanAddr() {
|
||||
rve.Set(rv.Slice(0, l))
|
||||
} else {
|
||||
rvs := reflect.MakeSlice(rve.Type(), l, l)
|
||||
reflect.Copy(rvs, rv)
|
||||
rve.Set(rvs)
|
||||
}
|
||||
// reflect.ValueOf(v).Elem().Set(rv.Slice(0, rv.Len()))
|
||||
}
|
||||
|
||||
func rvCopySlice(dest, src reflect.Value, _ reflect.Type) {
|
||||
reflect.Copy(dest, src)
|
||||
}
|
||||
|
||||
// ------------
|
||||
|
||||
func rvGetBool(rv reflect.Value) bool {
|
||||
return rv.Bool()
|
||||
}
|
||||
|
||||
func rvGetBytes(rv reflect.Value) []byte {
|
||||
return rv.Bytes()
|
||||
}
|
||||
|
||||
func rvGetTime(rv reflect.Value) time.Time {
|
||||
return rv2i(rv).(time.Time)
|
||||
}
|
||||
|
||||
func rvGetString(rv reflect.Value) string {
|
||||
return rv.String()
|
||||
}
|
||||
|
||||
func rvGetFloat64(rv reflect.Value) float64 {
|
||||
return rv.Float()
|
||||
}
|
||||
|
||||
func rvGetFloat32(rv reflect.Value) float32 {
|
||||
return float32(rv.Float())
|
||||
}
|
||||
|
||||
func rvGetComplex64(rv reflect.Value) complex64 {
|
||||
return complex64(rv.Complex())
|
||||
}
|
||||
|
||||
func rvGetComplex128(rv reflect.Value) complex128 {
|
||||
return rv.Complex()
|
||||
}
|
||||
|
||||
func rvGetInt(rv reflect.Value) int {
|
||||
return int(rv.Int())
|
||||
}
|
||||
|
||||
func rvGetInt8(rv reflect.Value) int8 {
|
||||
return int8(rv.Int())
|
||||
}
|
||||
|
||||
func rvGetInt16(rv reflect.Value) int16 {
|
||||
return int16(rv.Int())
|
||||
}
|
||||
|
||||
func rvGetInt32(rv reflect.Value) int32 {
|
||||
return int32(rv.Int())
|
||||
}
|
||||
|
||||
func rvGetInt64(rv reflect.Value) int64 {
|
||||
return rv.Int()
|
||||
}
|
||||
|
||||
func rvGetUint(rv reflect.Value) uint {
|
||||
return uint(rv.Uint())
|
||||
}
|
||||
|
||||
func rvGetUint8(rv reflect.Value) uint8 {
|
||||
return uint8(rv.Uint())
|
||||
}
|
||||
|
||||
func rvGetUint16(rv reflect.Value) uint16 {
|
||||
return uint16(rv.Uint())
|
||||
}
|
||||
|
||||
func rvGetUint32(rv reflect.Value) uint32 {
|
||||
return uint32(rv.Uint())
|
||||
}
|
||||
|
||||
func rvGetUint64(rv reflect.Value) uint64 {
|
||||
return rv.Uint()
|
||||
}
|
||||
|
||||
func rvGetUintptr(rv reflect.Value) uintptr {
|
||||
return uintptr(rv.Uint())
|
||||
}
|
||||
|
||||
func rvLenMap(rv reflect.Value) int {
|
||||
return rv.Len()
|
||||
}
|
||||
|
||||
// ------------ map range and map indexing ----------
|
||||
|
||||
func mapSet(m, k, v reflect.Value, _ mapReqParams) {
|
||||
m.SetMapIndex(k, v)
|
||||
}
|
||||
|
||||
func mapGet(m, k, v reflect.Value, _ mapReqParams) (vv reflect.Value) {
|
||||
return m.MapIndex(k)
|
||||
}
|
||||
|
||||
func mapAddrLoopvarRV(t reflect.Type, k reflect.Kind) (r reflect.Value) {
|
||||
return // reflect.New(t).Elem()
|
||||
}
|
||||
|
||||
// ---------- ENCODER optimized ---------------
|
||||
|
||||
func (d *decoderBase) bytes2Str(in []byte, att dBytesAttachState) (s string, mutable bool) {
|
||||
return d.detach2Str(in, att), false
|
||||
}
|
||||
|
||||
// ---------- structFieldInfo optimized ---------------
|
||||
|
||||
func (n *structFieldInfoNode) rvField(v reflect.Value) reflect.Value {
|
||||
return v.Field(int(n.index))
|
||||
}
|
||||
|
||||
// ---------- others ---------------
|
||||
|
||||
// --------------------------
|
||||
type atomicRtidFnSlice struct {
|
||||
v atomic.Value
|
||||
}
|
||||
|
||||
func (x *atomicRtidFnSlice) load() interface{} {
|
||||
return x.v.Load()
|
||||
}
|
||||
|
||||
func (x *atomicRtidFnSlice) store(p interface{}) {
|
||||
x.v.Store(p)
|
||||
}
|
||||
59
vendor/github.com/ugorji/go/codec/helper_notunsafe_or_notgc.go
generated
vendored
Normal file
59
vendor/github.com/ugorji/go/codec/helper_notunsafe_or_notgc.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build safe || codec.safe || !gc
|
||||
|
||||
package codec
|
||||
|
||||
// growCap will return a new capacity for a slice, given the following:
|
||||
// - oldCap: current capacity
|
||||
// - unit: in-memory size of an element
|
||||
// - num: number of elements to add
|
||||
func growCap(oldCap, unit, num uint) (newCap uint) {
|
||||
// appendslice logic (if cap < 1024, *2, else *1.25):
|
||||
// leads to many copy calls, especially when copying bytes.
|
||||
// bytes.Buffer model (2*cap + n): much better for bytes.
|
||||
// smarter way is to take the byte-size of the appended element(type) into account
|
||||
|
||||
// maintain 1 thresholds:
|
||||
// t1: if cap <= t1, newcap = 2x
|
||||
// else newcap = 1.5x
|
||||
//
|
||||
// t1 is always >= 1024.
|
||||
// This means that, if unit size >= 16, then always do 2x or 1.5x (ie t1, t2, t3 are all same)
|
||||
//
|
||||
// With this, appending for bytes increase by:
|
||||
// 100% up to 4K
|
||||
// 50% beyond that
|
||||
|
||||
// unit can be 0 e.g. for struct{}{}; handle that appropriately
|
||||
maxCap := num + (oldCap * 3 / 2)
|
||||
if unit == 0 || maxCap > maxArrayLen || maxCap < oldCap { // handle wraparound, etc
|
||||
return maxArrayLen
|
||||
}
|
||||
|
||||
var t1 uint = 1024 // default thresholds for large values
|
||||
if unit <= 4 {
|
||||
t1 = 8 * 1024
|
||||
} else if unit <= 16 {
|
||||
t1 = 2 * 1024
|
||||
}
|
||||
|
||||
newCap = 2 + num
|
||||
if oldCap > 0 {
|
||||
if oldCap <= t1 { // [0,t1]
|
||||
newCap = num + (oldCap * 2)
|
||||
} else { // (t1,infinity]
|
||||
newCap = maxCap
|
||||
}
|
||||
}
|
||||
|
||||
// ensure newCap takes multiples of a cache line (size is a multiple of 64)
|
||||
t1 = newCap * unit
|
||||
if t2 := t1 % 64; t2 != 0 {
|
||||
t1 += 64 - t2
|
||||
newCap = t1 / unit
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
1258
vendor/github.com/ugorji/go/codec/helper_unsafe.go
generated
vendored
Normal file
1258
vendor/github.com/ugorji/go/codec/helper_unsafe.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
242
vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go
generated
vendored
Normal file
242
vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_gc.go
generated
vendored
Normal file
@@ -0,0 +1,242 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build !safe && !codec.safe && !appengine && go1.9 && gc
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
_ "runtime" // needed for go linkname(s)
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// keep in sync with
|
||||
//
|
||||
// $GOROOT/src/cmd/compile/internal/gc/reflect.go: MAXKEYSIZE, MAXELEMSIZE
|
||||
// $GOROOT/src/runtime/map.go: maxKeySize, maxElemSize
|
||||
// $GOROOT/src/reflect/type.go: maxKeySize, maxElemSize
|
||||
//
|
||||
// We use these to determine whether the type is stored indirectly in the map or not.
|
||||
const (
|
||||
// mapMaxKeySize = 128
|
||||
mapMaxElemSize = 128
|
||||
)
|
||||
|
||||
type mapKeyFastKind uint8
|
||||
|
||||
const (
|
||||
mapKeyFastKindAny = iota + 1
|
||||
mapKeyFastKind32
|
||||
mapKeyFastKind32ptr
|
||||
mapKeyFastKind64
|
||||
mapKeyFastKind64ptr
|
||||
mapKeyFastKindStr
|
||||
)
|
||||
|
||||
var mapKeyFastKindVals [32]mapKeyFastKind
|
||||
|
||||
type mapReqParams struct {
|
||||
kfast mapKeyFastKind
|
||||
ref bool
|
||||
indirect bool
|
||||
}
|
||||
|
||||
func getMapReqParams(ti *typeInfo) (r mapReqParams) {
|
||||
r.indirect = mapStoresElemIndirect(uintptr(ti.elemsize))
|
||||
r.ref = refBitset.isset(ti.elemkind)
|
||||
r.kfast = mapKeyFastKindFor(reflect.Kind(ti.keykind))
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
xx := func(f mapKeyFastKind, k ...reflect.Kind) {
|
||||
for _, v := range k {
|
||||
mapKeyFastKindVals[byte(v)&31] = f // 'v % 32' equal to 'v & 31'
|
||||
}
|
||||
}
|
||||
|
||||
var f mapKeyFastKind
|
||||
|
||||
f = mapKeyFastKind64
|
||||
if wordSizeBits == 32 {
|
||||
f = mapKeyFastKind32
|
||||
}
|
||||
xx(f, reflect.Int, reflect.Uint, reflect.Uintptr)
|
||||
|
||||
f = mapKeyFastKind64ptr
|
||||
if wordSizeBits == 32 {
|
||||
f = mapKeyFastKind32ptr
|
||||
}
|
||||
xx(f, reflect.Ptr)
|
||||
|
||||
xx(mapKeyFastKindStr, reflect.String)
|
||||
xx(mapKeyFastKind32, reflect.Uint32, reflect.Int32, reflect.Float32)
|
||||
xx(mapKeyFastKind64, reflect.Uint64, reflect.Int64, reflect.Float64)
|
||||
}
|
||||
|
||||
func mapKeyFastKindFor(k reflect.Kind) mapKeyFastKind {
|
||||
return mapKeyFastKindVals[k&31]
|
||||
}
|
||||
|
||||
func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (s unsafeSlice) {
|
||||
// culled from GOROOT/runtime/slice.go
|
||||
s = rtgrowslice(old.Data, old.Cap+incr, old.Cap, incr, typ)
|
||||
s.Len = old.Len
|
||||
return
|
||||
}
|
||||
|
||||
// func rvType(rv reflect.Value) reflect.Type {
|
||||
// return rvPtrToType(((*unsafeReflectValue)(unsafe.Pointer(&rv))).typ)
|
||||
// // return rv.Type()
|
||||
// }
|
||||
|
||||
// mapStoresElemIndirect tells if the element type is stored indirectly in the map.
|
||||
//
|
||||
// This is used to determine valIsIndirect which is passed into mapSet/mapGet calls.
|
||||
//
|
||||
// If valIsIndirect doesn't matter, then just return false and ignore the value
|
||||
// passed in mapGet/mapSet calls
|
||||
func mapStoresElemIndirect(elemsize uintptr) bool {
|
||||
return elemsize > mapMaxElemSize
|
||||
}
|
||||
|
||||
func mapSet(m, k, v reflect.Value, p mapReqParams) { // valIsRef
|
||||
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
|
||||
var kptr = unsafeMapKVPtr(urv)
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
|
||||
var vtyp = urv.typ
|
||||
var vptr = unsafeMapKVPtr(urv)
|
||||
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
|
||||
mptr := rvRefPtr(urv)
|
||||
|
||||
var vvptr unsafe.Pointer
|
||||
|
||||
// mapassign_fastXXX don't take indirect into account.
|
||||
// It was hard to infer what makes it work all the time.
|
||||
// Sometimes, we got vvptr == nil when we dereferenced vvptr (if valIsIndirect).
|
||||
// Consequently, only use fastXXX functions if !valIsIndirect
|
||||
|
||||
if p.indirect {
|
||||
vvptr = mapassign(urv.typ, mptr, kptr)
|
||||
// typedmemmove(vtyp, vvptr, vptr)
|
||||
// // reflect_mapassign(urv.typ, mptr, kptr, vptr)
|
||||
// return
|
||||
goto END
|
||||
}
|
||||
|
||||
switch p.kfast {
|
||||
case mapKeyFastKind32:
|
||||
vvptr = mapassign_fast32(urv.typ, mptr, *(*uint32)(kptr))
|
||||
case mapKeyFastKind32ptr:
|
||||
vvptr = mapassign_fast32ptr(urv.typ, mptr, *(*unsafe.Pointer)(kptr))
|
||||
case mapKeyFastKind64:
|
||||
vvptr = mapassign_fast64(urv.typ, mptr, *(*uint64)(kptr))
|
||||
case mapKeyFastKind64ptr:
|
||||
vvptr = mapassign_fast64ptr(urv.typ, mptr, *(*unsafe.Pointer)(kptr))
|
||||
case mapKeyFastKindStr:
|
||||
vvptr = mapassign_faststr(urv.typ, mptr, *(*string)(kptr))
|
||||
default:
|
||||
vvptr = mapassign(urv.typ, mptr, kptr)
|
||||
}
|
||||
|
||||
// if p.kfast != 0 && valIsIndirect {
|
||||
// vvptr = *(*unsafe.Pointer)(vvptr)
|
||||
// }
|
||||
END:
|
||||
typedmemmove(vtyp, vvptr, vptr)
|
||||
}
|
||||
|
||||
func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) {
|
||||
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
|
||||
var kptr = unsafeMapKVPtr(urv)
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
|
||||
mptr := rvRefPtr(urv)
|
||||
|
||||
var vvptr unsafe.Pointer
|
||||
var ok bool
|
||||
|
||||
// Note that mapaccess2_fastXXX functions do not check if the value needs to be copied.
|
||||
// if they do, we should dereference the pointer and return that
|
||||
|
||||
switch p.kfast {
|
||||
case mapKeyFastKind32, mapKeyFastKind32ptr:
|
||||
vvptr, ok = mapaccess2_fast32(urv.typ, mptr, *(*uint32)(kptr))
|
||||
case mapKeyFastKind64, mapKeyFastKind64ptr:
|
||||
vvptr, ok = mapaccess2_fast64(urv.typ, mptr, *(*uint64)(kptr))
|
||||
case mapKeyFastKindStr:
|
||||
vvptr, ok = mapaccess2_faststr(urv.typ, mptr, *(*string)(kptr))
|
||||
default:
|
||||
vvptr, ok = mapaccess2(urv.typ, mptr, kptr)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
|
||||
|
||||
if p.kfast != 0 && p.indirect {
|
||||
urv.ptr = *(*unsafe.Pointer)(vvptr)
|
||||
} else if helperUnsafeDirectAssignMapEntry || p.ref {
|
||||
urv.ptr = vvptr
|
||||
} else {
|
||||
typedmemmove(urv.typ, urv.ptr, vvptr)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
//go:linkname unsafeZeroArr runtime.zeroVal
|
||||
var unsafeZeroArr [1024]byte
|
||||
|
||||
//go:linkname mapassign_fast32 runtime.mapassign_fast32
|
||||
//go:noescape
|
||||
func mapassign_fast32(typ unsafe.Pointer, m unsafe.Pointer, key uint32) unsafe.Pointer
|
||||
|
||||
//go:linkname mapassign_fast32ptr runtime.mapassign_fast32ptr
|
||||
//go:noescape
|
||||
func mapassign_fast32ptr(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
|
||||
|
||||
//go:linkname mapassign_fast64 runtime.mapassign_fast64
|
||||
//go:noescape
|
||||
func mapassign_fast64(typ unsafe.Pointer, m unsafe.Pointer, key uint64) unsafe.Pointer
|
||||
|
||||
//go:linkname mapassign_fast64ptr runtime.mapassign_fast64ptr
|
||||
//go:noescape
|
||||
func mapassign_fast64ptr(typ unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
|
||||
|
||||
//go:linkname mapassign_faststr runtime.mapassign_faststr
|
||||
//go:noescape
|
||||
func mapassign_faststr(typ unsafe.Pointer, m unsafe.Pointer, s string) unsafe.Pointer
|
||||
|
||||
//go:linkname mapaccess2_fast32 runtime.mapaccess2_fast32
|
||||
//go:noescape
|
||||
func mapaccess2_fast32(typ unsafe.Pointer, m unsafe.Pointer, key uint32) (val unsafe.Pointer, ok bool)
|
||||
|
||||
//go:linkname mapaccess2_fast64 runtime.mapaccess2_fast64
|
||||
//go:noescape
|
||||
func mapaccess2_fast64(typ unsafe.Pointer, m unsafe.Pointer, key uint64) (val unsafe.Pointer, ok bool)
|
||||
|
||||
//go:linkname mapaccess2_faststr runtime.mapaccess2_faststr
|
||||
//go:noescape
|
||||
func mapaccess2_faststr(typ unsafe.Pointer, m unsafe.Pointer, key string) (val unsafe.Pointer, ok bool)
|
||||
|
||||
//go:linkname rtgrowslice runtime.growslice
|
||||
//go:noescape
|
||||
func rtgrowslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, typ unsafe.Pointer) unsafeSlice
|
||||
|
||||
// ----
|
||||
|
||||
// //go:linkname rvPtrToType reflect.toType
|
||||
// //go:noescape
|
||||
// func rvPtrToType(typ unsafe.Pointer) reflect.Type
|
||||
|
||||
// //go:linkname growslice reflect.growslice
|
||||
// //go:noescape
|
||||
// func growslice(typ unsafe.Pointer, old unsafeSlice, cap int) unsafeSlice
|
||||
|
||||
// ----
|
||||
81
vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go
generated
vendored
Normal file
81
vendor/github.com/ugorji/go/codec/helper_unsafe_compiler_not_gc.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build !safe && !codec.safe && !appengine && go1.9 && !gc
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
_ "runtime" // needed for go linkname(s)
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var unsafeZeroArr [1024]byte
|
||||
|
||||
type mapReqParams struct {
|
||||
ref bool
|
||||
}
|
||||
|
||||
func getMapReqParams(ti *typeInfo) (r mapReqParams) {
|
||||
r.ref = refBitset.isset(ti.elemkind)
|
||||
return
|
||||
}
|
||||
|
||||
// runtime.growslice does not work with gccgo, failing with "growslice: cap out of range" error.
|
||||
// consequently, we just call newarray followed by typedslicecopy directly.
|
||||
|
||||
func unsafeGrowslice(typ unsafe.Pointer, old unsafeSlice, cap, incr int) (v unsafeSlice) {
|
||||
size := rtsize2(typ)
|
||||
if size == 0 {
|
||||
return unsafeSlice{unsafe.Pointer(&unsafeZeroArr[0]), old.Len, cap + incr}
|
||||
}
|
||||
newcap := int(growCap(uint(cap), uint(size), uint(incr)))
|
||||
v = unsafeSlice{Data: newarray(typ, newcap), Len: old.Len, Cap: newcap}
|
||||
if old.Len > 0 {
|
||||
typedslicecopy(typ, v, old)
|
||||
}
|
||||
// memmove(v.Data, old.Data, size*uintptr(old.Len))
|
||||
return
|
||||
}
|
||||
|
||||
// runtime.{mapassign_fastXXX, mapaccess2_fastXXX} are not supported in gollvm,
|
||||
// failing with "error: undefined reference" error.
|
||||
// so we just use runtime.{mapassign, mapaccess2} directly
|
||||
|
||||
func mapSet(m, k, v reflect.Value, p mapReqParams) {
|
||||
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
|
||||
var kptr = unsafeMapKVPtr(urv)
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
|
||||
var vtyp = urv.typ
|
||||
var vptr = unsafeMapKVPtr(urv)
|
||||
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
|
||||
mptr := rvRefPtr(urv)
|
||||
|
||||
vvptr := mapassign(urv.typ, mptr, kptr)
|
||||
typedmemmove(vtyp, vvptr, vptr)
|
||||
}
|
||||
|
||||
func mapGet(m, k, v reflect.Value, p mapReqParams) (_ reflect.Value) {
|
||||
var urv = (*unsafeReflectValue)(unsafe.Pointer(&k))
|
||||
var kptr = unsafeMapKVPtr(urv)
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&m))
|
||||
mptr := rvRefPtr(urv)
|
||||
|
||||
vvptr, ok := mapaccess2(urv.typ, mptr, kptr)
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
urv = (*unsafeReflectValue)(unsafe.Pointer(&v))
|
||||
|
||||
if helperUnsafeDirectAssignMapEntry || p.ref {
|
||||
urv.ptr = vvptr
|
||||
} else {
|
||||
typedmemmove(urv.typ, urv.ptr, vvptr)
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
130
vendor/github.com/ugorji/go/codec/init.mono.go
generated
vendored
Normal file
130
vendor/github.com/ugorji/go/codec/init.mono.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build !notmono && !codec.notmono
|
||||
|
||||
package codec
|
||||
|
||||
import "io"
|
||||
|
||||
func callMake(v interface{}) {}
|
||||
|
||||
type encWriter interface{ encWriterI }
|
||||
type decReader interface{ decReaderI }
|
||||
type encDriver interface{ encDriverI }
|
||||
type decDriver interface{ decDriverI }
|
||||
|
||||
func (h *SimpleHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriverSimpleBytes{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriverSimpleIO{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriverSimpleBytes{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriverSimpleIO{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriverJsonBytes{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriverJsonIO{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriverJsonBytes{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriverJsonIO{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriverMsgpackBytes{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriverMsgpackIO{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriverMsgpackBytes{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriverMsgpackIO{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriverBincBytes{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriverBincIO{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriverBincBytes{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriverBincIO{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriverCborBytes{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriverCborIO{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriverCborBytes{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriverCborIO{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
var (
|
||||
bincFpEncIO = helperEncDriverBincIO{}.fastpathEList()
|
||||
bincFpEncBytes = helperEncDriverBincBytes{}.fastpathEList()
|
||||
bincFpDecIO = helperDecDriverBincIO{}.fastpathDList()
|
||||
bincFpDecBytes = helperDecDriverBincBytes{}.fastpathDList()
|
||||
)
|
||||
|
||||
var (
|
||||
cborFpEncIO = helperEncDriverCborIO{}.fastpathEList()
|
||||
cborFpEncBytes = helperEncDriverCborBytes{}.fastpathEList()
|
||||
cborFpDecIO = helperDecDriverCborIO{}.fastpathDList()
|
||||
cborFpDecBytes = helperDecDriverCborBytes{}.fastpathDList()
|
||||
)
|
||||
|
||||
var (
|
||||
jsonFpEncIO = helperEncDriverJsonIO{}.fastpathEList()
|
||||
jsonFpEncBytes = helperEncDriverJsonBytes{}.fastpathEList()
|
||||
jsonFpDecIO = helperDecDriverJsonIO{}.fastpathDList()
|
||||
jsonFpDecBytes = helperDecDriverJsonBytes{}.fastpathDList()
|
||||
)
|
||||
|
||||
var (
|
||||
msgpackFpEncIO = helperEncDriverMsgpackIO{}.fastpathEList()
|
||||
msgpackFpEncBytes = helperEncDriverMsgpackBytes{}.fastpathEList()
|
||||
msgpackFpDecIO = helperDecDriverMsgpackIO{}.fastpathDList()
|
||||
msgpackFpDecBytes = helperDecDriverMsgpackBytes{}.fastpathDList()
|
||||
)
|
||||
|
||||
var (
|
||||
simpleFpEncIO = helperEncDriverSimpleIO{}.fastpathEList()
|
||||
simpleFpEncBytes = helperEncDriverSimpleBytes{}.fastpathEList()
|
||||
simpleFpDecIO = helperDecDriverSimpleIO{}.fastpathDList()
|
||||
simpleFpDecBytes = helperDecDriverSimpleBytes{}.fastpathDList()
|
||||
)
|
||||
313
vendor/github.com/ugorji/go/codec/init.notmono.go
generated
vendored
Normal file
313
vendor/github.com/ugorji/go/codec/init.notmono.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
//go:build notmono || codec.notmono
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// This contains all the iniatializations of generics.
|
||||
// Putting it into one file, ensures that we can go generics or not.
|
||||
|
||||
type maker interface{ Make() }
|
||||
|
||||
func callMake(v interface{}) {
|
||||
v.(maker).Make()
|
||||
}
|
||||
|
||||
// ---- (writer.go)
|
||||
|
||||
type encWriter interface {
|
||||
bufioEncWriterM | bytesEncAppenderM
|
||||
encWriterI
|
||||
}
|
||||
|
||||
type bytesEncAppenderM struct {
|
||||
*bytesEncAppender
|
||||
}
|
||||
|
||||
func (z *bytesEncAppenderM) Make() {
|
||||
z.bytesEncAppender = new(bytesEncAppender)
|
||||
z.out = &bytesEncAppenderDefOut
|
||||
}
|
||||
|
||||
type bufioEncWriterM struct {
|
||||
*bufioEncWriter
|
||||
}
|
||||
|
||||
func (z *bufioEncWriterM) Make() {
|
||||
z.bufioEncWriter = new(bufioEncWriter)
|
||||
z.w = io.Discard
|
||||
}
|
||||
|
||||
// ---- reader.go
|
||||
|
||||
type decReader interface {
|
||||
bytesDecReaderM | ioDecReaderM
|
||||
|
||||
decReaderI
|
||||
}
|
||||
|
||||
type bytesDecReaderM struct {
|
||||
*bytesDecReader
|
||||
}
|
||||
|
||||
func (z *bytesDecReaderM) Make() {
|
||||
z.bytesDecReader = new(bytesDecReader)
|
||||
}
|
||||
|
||||
type ioDecReaderM struct {
|
||||
*ioDecReader
|
||||
}
|
||||
|
||||
func (z *ioDecReaderM) Make() {
|
||||
z.ioDecReader = new(ioDecReader)
|
||||
}
|
||||
|
||||
// type helperEncWriter[T encWriter] struct{}
|
||||
// type helperDecReader[T decReader] struct{}
|
||||
// func (helperDecReader[T]) decByteSlice(r T, clen, maxInitLen int, bs []byte) (bsOut []byte) {
|
||||
|
||||
// ---- (encode.go)
|
||||
|
||||
type encDriver interface {
|
||||
simpleEncDriverM[bufioEncWriterM] |
|
||||
simpleEncDriverM[bytesEncAppenderM] |
|
||||
jsonEncDriverM[bufioEncWriterM] |
|
||||
jsonEncDriverM[bytesEncAppenderM] |
|
||||
cborEncDriverM[bufioEncWriterM] |
|
||||
cborEncDriverM[bytesEncAppenderM] |
|
||||
msgpackEncDriverM[bufioEncWriterM] |
|
||||
msgpackEncDriverM[bytesEncAppenderM] |
|
||||
bincEncDriverM[bufioEncWriterM] |
|
||||
bincEncDriverM[bytesEncAppenderM]
|
||||
|
||||
encDriverI
|
||||
}
|
||||
|
||||
// ---- (decode.go)
|
||||
|
||||
type decDriver interface {
|
||||
simpleDecDriverM[bytesDecReaderM] |
|
||||
simpleDecDriverM[ioDecReaderM] |
|
||||
jsonDecDriverM[bytesDecReaderM] |
|
||||
jsonDecDriverM[ioDecReaderM] |
|
||||
cborDecDriverM[bytesDecReaderM] |
|
||||
cborDecDriverM[ioDecReaderM] |
|
||||
msgpackDecDriverM[bytesDecReaderM] |
|
||||
msgpackDecDriverM[ioDecReaderM] |
|
||||
bincDecDriverM[bytesDecReaderM] |
|
||||
bincDecDriverM[ioDecReaderM]
|
||||
|
||||
decDriverI
|
||||
}
|
||||
|
||||
// Below: <format>.go files
|
||||
|
||||
// ---- (binc.go)
|
||||
|
||||
type bincEncDriverM[T encWriter] struct {
|
||||
*bincEncDriver[T]
|
||||
}
|
||||
|
||||
func (d *bincEncDriverM[T]) Make() {
|
||||
d.bincEncDriver = new(bincEncDriver[T])
|
||||
}
|
||||
|
||||
type bincDecDriverM[T decReader] struct {
|
||||
*bincDecDriver[T]
|
||||
}
|
||||
|
||||
func (d *bincDecDriverM[T]) Make() {
|
||||
d.bincDecDriver = new(bincDecDriver[T])
|
||||
}
|
||||
|
||||
var (
|
||||
bincFpEncIO = helperEncDriver[bincEncDriverM[bufioEncWriterM]]{}.fastpathEList()
|
||||
bincFpEncBytes = helperEncDriver[bincEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
|
||||
bincFpDecIO = helperDecDriver[bincDecDriverM[ioDecReaderM]]{}.fastpathDList()
|
||||
bincFpDecBytes = helperDecDriver[bincDecDriverM[bytesDecReaderM]]{}.fastpathDList()
|
||||
)
|
||||
|
||||
// ---- (cbor.go)
|
||||
|
||||
type cborEncDriverM[T encWriter] struct {
|
||||
*cborEncDriver[T]
|
||||
}
|
||||
|
||||
func (d *cborEncDriverM[T]) Make() {
|
||||
d.cborEncDriver = new(cborEncDriver[T])
|
||||
}
|
||||
|
||||
type cborDecDriverM[T decReader] struct {
|
||||
*cborDecDriver[T]
|
||||
}
|
||||
|
||||
func (d *cborDecDriverM[T]) Make() {
|
||||
d.cborDecDriver = new(cborDecDriver[T])
|
||||
}
|
||||
|
||||
var (
|
||||
cborFpEncIO = helperEncDriver[cborEncDriverM[bufioEncWriterM]]{}.fastpathEList()
|
||||
cborFpEncBytes = helperEncDriver[cborEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
|
||||
cborFpDecIO = helperDecDriver[cborDecDriverM[ioDecReaderM]]{}.fastpathDList()
|
||||
cborFpDecBytes = helperDecDriver[cborDecDriverM[bytesDecReaderM]]{}.fastpathDList()
|
||||
)
|
||||
|
||||
// ---- (json.go)
|
||||
|
||||
type jsonEncDriverM[T encWriter] struct {
|
||||
*jsonEncDriver[T]
|
||||
}
|
||||
|
||||
func (d *jsonEncDriverM[T]) Make() {
|
||||
d.jsonEncDriver = new(jsonEncDriver[T])
|
||||
}
|
||||
|
||||
type jsonDecDriverM[T decReader] struct {
|
||||
*jsonDecDriver[T]
|
||||
}
|
||||
|
||||
func (d *jsonDecDriverM[T]) Make() {
|
||||
d.jsonDecDriver = new(jsonDecDriver[T])
|
||||
}
|
||||
|
||||
var (
|
||||
jsonFpEncIO = helperEncDriver[jsonEncDriverM[bufioEncWriterM]]{}.fastpathEList()
|
||||
jsonFpEncBytes = helperEncDriver[jsonEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
|
||||
jsonFpDecIO = helperDecDriver[jsonDecDriverM[ioDecReaderM]]{}.fastpathDList()
|
||||
jsonFpDecBytes = helperDecDriver[jsonDecDriverM[bytesDecReaderM]]{}.fastpathDList()
|
||||
)
|
||||
|
||||
// ---- (msgpack.go)
|
||||
|
||||
type msgpackEncDriverM[T encWriter] struct {
|
||||
*msgpackEncDriver[T]
|
||||
}
|
||||
|
||||
func (d *msgpackEncDriverM[T]) Make() {
|
||||
d.msgpackEncDriver = new(msgpackEncDriver[T])
|
||||
}
|
||||
|
||||
type msgpackDecDriverM[T decReader] struct {
|
||||
*msgpackDecDriver[T]
|
||||
}
|
||||
|
||||
func (d *msgpackDecDriverM[T]) Make() {
|
||||
d.msgpackDecDriver = new(msgpackDecDriver[T])
|
||||
}
|
||||
|
||||
var (
|
||||
msgpackFpEncIO = helperEncDriver[msgpackEncDriverM[bufioEncWriterM]]{}.fastpathEList()
|
||||
msgpackFpEncBytes = helperEncDriver[msgpackEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
|
||||
msgpackFpDecIO = helperDecDriver[msgpackDecDriverM[ioDecReaderM]]{}.fastpathDList()
|
||||
msgpackFpDecBytes = helperDecDriver[msgpackDecDriverM[bytesDecReaderM]]{}.fastpathDList()
|
||||
)
|
||||
|
||||
// ---- (simple.go)
|
||||
|
||||
type simpleEncDriverM[T encWriter] struct {
|
||||
*simpleEncDriver[T]
|
||||
}
|
||||
|
||||
func (d *simpleEncDriverM[T]) Make() {
|
||||
d.simpleEncDriver = new(simpleEncDriver[T])
|
||||
}
|
||||
|
||||
type simpleDecDriverM[T decReader] struct {
|
||||
*simpleDecDriver[T]
|
||||
}
|
||||
|
||||
func (d *simpleDecDriverM[T]) Make() {
|
||||
d.simpleDecDriver = new(simpleDecDriver[T])
|
||||
}
|
||||
|
||||
var (
|
||||
simpleFpEncIO = helperEncDriver[simpleEncDriverM[bufioEncWriterM]]{}.fastpathEList()
|
||||
simpleFpEncBytes = helperEncDriver[simpleEncDriverM[bytesEncAppenderM]]{}.fastpathEList()
|
||||
simpleFpDecIO = helperDecDriver[simpleDecDriverM[ioDecReaderM]]{}.fastpathDList()
|
||||
simpleFpDecBytes = helperDecDriver[simpleDecDriverM[bytesDecReaderM]]{}.fastpathDList()
|
||||
)
|
||||
|
||||
func (h *SimpleHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriver[simpleEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriver[simpleEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriver[simpleDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *SimpleHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriver[simpleDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriver[jsonEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriver[jsonEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriver[jsonDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *JsonHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriver[jsonDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriver[msgpackEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriver[msgpackEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriver[msgpackDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *MsgpackHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriver[msgpackDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriver[cborEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriver[cborEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriver[cborDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *CborHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriver[cborDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newEncoderBytes(out *[]byte) encoderI {
|
||||
return helperEncDriver[bincEncDriverM[bytesEncAppenderM]]{}.newEncoderBytes(out, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newEncoder(w io.Writer) encoderI {
|
||||
return helperEncDriver[bincEncDriverM[bufioEncWriterM]]{}.newEncoderIO(w, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newDecoderBytes(in []byte) decoderI {
|
||||
return helperDecDriver[bincDecDriverM[bytesDecReaderM]]{}.newDecoderBytes(in, h)
|
||||
}
|
||||
|
||||
func (h *BincHandle) newDecoder(r io.Reader) decoderI {
|
||||
return helperDecDriver[bincDecDriverM[ioDecReaderM]]{}.newDecoderIO(r, h)
|
||||
}
|
||||
504
vendor/github.com/ugorji/go/codec/json.base.go
generated
vendored
Normal file
504
vendor/github.com/ugorji/go/codec/json.base.go
generated
vendored
Normal file
@@ -0,0 +1,504 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
//--------------------------------
|
||||
|
||||
// jsonLits and jsonLitb are defined at the package level,
|
||||
// so they are guaranteed to be stored efficiently, making
|
||||
// for better append/string comparison/etc.
|
||||
//
|
||||
// (anecdotal evidence from some benchmarking on go 1.20 devel in 20220104)
|
||||
const jsonLits = `"true"false"null"{}[]`
|
||||
|
||||
const (
|
||||
jsonLitT = 1
|
||||
jsonLitF = 6
|
||||
jsonLitN = 12
|
||||
jsonLitM = 17
|
||||
jsonLitA = 19
|
||||
)
|
||||
|
||||
var jsonLitb = []byte(jsonLits)
|
||||
var jsonNull = jsonLitb[jsonLitN : jsonLitN+4]
|
||||
var jsonArrayEmpty = jsonLitb[jsonLitA : jsonLitA+2]
|
||||
var jsonMapEmpty = jsonLitb[jsonLitM : jsonLitM+2]
|
||||
|
||||
const jsonEncodeUintSmallsString = "" +
|
||||
"00010203040506070809" +
|
||||
"10111213141516171819" +
|
||||
"20212223242526272829" +
|
||||
"30313233343536373839" +
|
||||
"40414243444546474849" +
|
||||
"50515253545556575859" +
|
||||
"60616263646566676869" +
|
||||
"70717273747576777879" +
|
||||
"80818283848586878889" +
|
||||
"90919293949596979899"
|
||||
|
||||
var jsonEncodeUintSmallsStringBytes = (*[len(jsonEncodeUintSmallsString)]byte)([]byte(jsonEncodeUintSmallsString))
|
||||
|
||||
const (
|
||||
jsonU4Chk2 = '0'
|
||||
jsonU4Chk1 = 'a' - 10
|
||||
jsonU4Chk0 = 'A' - 10
|
||||
)
|
||||
|
||||
const (
|
||||
// If !jsonValidateSymbols, decoding will be faster, by skipping some checks:
|
||||
// - If we see first character of null, false or true,
|
||||
// do not validate subsequent characters.
|
||||
// - e.g. if we see a n, assume null and skip next 3 characters,
|
||||
// and do not validate they are ull.
|
||||
// P.S. Do not expect a significant decoding boost from this.
|
||||
jsonValidateSymbols = true
|
||||
|
||||
// jsonEscapeMultiByteUnicodeSep controls whether some unicode characters
|
||||
// that are valid json but may bomb in some contexts are escaped during encoeing.
|
||||
//
|
||||
// U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR.
|
||||
// Both technically valid JSON, but bomb on JSONP, so fix here unconditionally.
|
||||
jsonEscapeMultiByteUnicodeSep = true
|
||||
|
||||
// jsonNakedBoolNumInQuotedStr is used during decoding into a blank interface{}
|
||||
// to control whether we detect quoted values of bools and null where a map key is expected,
|
||||
// and treat as nil, true or false.
|
||||
jsonNakedBoolNumInQuotedStr = true
|
||||
)
|
||||
|
||||
var (
|
||||
// jsonTabs and jsonSpaces are used as caches for indents
|
||||
jsonTabs [32]byte
|
||||
jsonSpaces [128]byte
|
||||
|
||||
jsonHexEncoder hexEncoder
|
||||
// jsonTimeLayout is used to validate time layouts.
|
||||
// Unfortunately, we couldn't compare time.Time effectively, so punted.
|
||||
// jsonTimeLayout time.Time
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < len(jsonTabs); i++ {
|
||||
jsonTabs[i] = '\t'
|
||||
}
|
||||
for i := 0; i < len(jsonSpaces); i++ {
|
||||
jsonSpaces[i] = ' '
|
||||
}
|
||||
// jsonTimeLayout, err := time.Parse(time.Layout, time.Layout)
|
||||
// halt.onerror(err)
|
||||
// jsonTimeLayout = jsonTimeLayout.Round(time.Second).UTC()
|
||||
}
|
||||
|
||||
// ----------------
|
||||
|
||||
type jsonBytesFmt uint8
|
||||
|
||||
const (
|
||||
jsonBytesFmtArray jsonBytesFmt = iota + 1
|
||||
jsonBytesFmtBase64
|
||||
jsonBytesFmtBase64url
|
||||
jsonBytesFmtBase32
|
||||
jsonBytesFmtBase32hex
|
||||
jsonBytesFmtBase16
|
||||
|
||||
jsonBytesFmtHex = jsonBytesFmtBase16
|
||||
)
|
||||
|
||||
type jsonTimeFmt uint8
|
||||
|
||||
const (
|
||||
jsonTimeFmtStringLayout jsonTimeFmt = iota + 1
|
||||
jsonTimeFmtUnix
|
||||
jsonTimeFmtUnixMilli
|
||||
jsonTimeFmtUnixMicro
|
||||
jsonTimeFmtUnixNano
|
||||
)
|
||||
|
||||
type jsonBytesFmter = bytesEncoder
|
||||
|
||||
type jsonHandleOpts struct {
|
||||
rawext bool
|
||||
// bytesFmt used during encode to determine how to encode []byte
|
||||
bytesFmt jsonBytesFmt
|
||||
// timeFmt used during encode to determine how to encode a time.Time
|
||||
timeFmt jsonTimeFmt
|
||||
// timeFmtNum used during decode to decode a time.Time from an int64 in the stream
|
||||
timeFmtNum jsonTimeFmt
|
||||
// timeFmtLayouts used on decode, to try to parse time.Time until successful
|
||||
timeFmtLayouts []string
|
||||
// byteFmters used on decode, to try to parse []byte from a UTF-8 string encoding (e.g. base64)
|
||||
byteFmters []jsonBytesFmter
|
||||
}
|
||||
|
||||
func jsonCheckTimeLayout(s string) (ok bool) {
|
||||
_, err := time.Parse(s, s)
|
||||
// t...Equal(jsonTimeLayout) always returns false - unsure why
|
||||
// return err == nil && t.Round(time.Second).UTC().Equal(jsonTimeLayout)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (x *jsonHandleOpts) reset(h *JsonHandle) {
|
||||
x.timeFmt = 0
|
||||
x.timeFmtNum = 0
|
||||
x.timeFmtLayouts = x.timeFmtLayouts[:0]
|
||||
if len(h.TimeFormat) != 0 {
|
||||
switch h.TimeFormat[0] {
|
||||
case "unix":
|
||||
x.timeFmt = jsonTimeFmtUnix
|
||||
case "unixmilli":
|
||||
x.timeFmt = jsonTimeFmtUnixMilli
|
||||
case "unixmicro":
|
||||
x.timeFmt = jsonTimeFmtUnixMicro
|
||||
case "unixnano":
|
||||
x.timeFmt = jsonTimeFmtUnixNano
|
||||
}
|
||||
x.timeFmtNum = x.timeFmt
|
||||
for _, v := range h.TimeFormat {
|
||||
if !strings.HasPrefix(v, "unix") && jsonCheckTimeLayout(v) {
|
||||
x.timeFmtLayouts = append(x.timeFmtLayouts, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
if x.timeFmt == 0 { // both timeFmt and timeFmtNum are 0
|
||||
x.timeFmtNum = jsonTimeFmtUnix
|
||||
x.timeFmt = jsonTimeFmtStringLayout
|
||||
if len(x.timeFmtLayouts) == 0 {
|
||||
x.timeFmtLayouts = append(x.timeFmtLayouts, time.RFC3339Nano)
|
||||
}
|
||||
}
|
||||
|
||||
x.bytesFmt = 0
|
||||
x.byteFmters = x.byteFmters[:0]
|
||||
var b64 bool
|
||||
if len(h.BytesFormat) != 0 {
|
||||
switch h.BytesFormat[0] {
|
||||
case "array":
|
||||
x.bytesFmt = jsonBytesFmtArray
|
||||
case "base64":
|
||||
x.bytesFmt = jsonBytesFmtBase64
|
||||
case "base64url":
|
||||
x.bytesFmt = jsonBytesFmtBase64url
|
||||
case "base32":
|
||||
x.bytesFmt = jsonBytesFmtBase32
|
||||
case "base32hex":
|
||||
x.bytesFmt = jsonBytesFmtBase32hex
|
||||
case "base16", "hex":
|
||||
x.bytesFmt = jsonBytesFmtBase16
|
||||
}
|
||||
for _, v := range h.BytesFormat {
|
||||
switch v {
|
||||
// case "array":
|
||||
case "base64":
|
||||
x.byteFmters = append(x.byteFmters, base64.StdEncoding)
|
||||
b64 = true
|
||||
case "base64url":
|
||||
x.byteFmters = append(x.byteFmters, base64.URLEncoding)
|
||||
case "base32":
|
||||
x.byteFmters = append(x.byteFmters, base32.StdEncoding)
|
||||
case "base32hex":
|
||||
x.byteFmters = append(x.byteFmters, base32.HexEncoding)
|
||||
case "base16", "hex":
|
||||
x.byteFmters = append(x.byteFmters, &jsonHexEncoder)
|
||||
}
|
||||
}
|
||||
}
|
||||
if x.bytesFmt == 0 {
|
||||
// either len==0 OR gibberish was in the first element; resolve here
|
||||
x.bytesFmt = jsonBytesFmtBase64
|
||||
if !b64 { // not present - so insert into pos 0
|
||||
x.byteFmters = append(x.byteFmters, nil)
|
||||
copy(x.byteFmters[1:], x.byteFmters[0:])
|
||||
x.byteFmters[0] = base64.StdEncoding
|
||||
}
|
||||
}
|
||||
// ----
|
||||
x.rawext = h.RawBytesExt != nil
|
||||
}
|
||||
|
||||
var jsonEncBoolStrs = [2][2]string{
|
||||
{jsonLits[jsonLitF : jsonLitF+5], jsonLits[jsonLitT : jsonLitT+4]},
|
||||
{jsonLits[jsonLitF-1 : jsonLitF+6], jsonLits[jsonLitT-1 : jsonLitT+5]},
|
||||
}
|
||||
|
||||
func jsonEncodeUint(neg, quotes bool, u uint64, b *[48]byte) []byte {
|
||||
// MARKER: use setByteAt/byteAt to elide the bounds-checks
|
||||
// when we are sure that we don't go beyond the bounds.
|
||||
|
||||
// MARKER: copied mostly from std library: strconv/itoa.go
|
||||
// this should only be called on 64bit OS.
|
||||
|
||||
var ss = jsonEncodeUintSmallsStringBytes[:]
|
||||
|
||||
// typically, 19 or 20 bytes sufficient for decimal encoding a uint64
|
||||
var a = b[:24]
|
||||
var i = uint(len(a))
|
||||
|
||||
if quotes {
|
||||
i--
|
||||
setByteAt(a, i, '"')
|
||||
// a[i] = '"'
|
||||
}
|
||||
var is, us uint // use uint, as those fit into a register on the platform
|
||||
if cpu32Bit {
|
||||
for u >= 1e9 {
|
||||
q := u / 1e9
|
||||
us = uint(u - q*1e9) // u % 1e9 fits into a uint
|
||||
for j := 4; j > 0; j-- {
|
||||
is = us % 100 * 2
|
||||
us /= 100
|
||||
i -= 2
|
||||
setByteAt(a, i+1, byteAt(ss, is+1))
|
||||
setByteAt(a, i, byteAt(ss, is))
|
||||
}
|
||||
i--
|
||||
setByteAt(a, i, byteAt(ss, us*2+1))
|
||||
u = q
|
||||
}
|
||||
// u is now < 1e9, so is guaranteed to fit into a uint
|
||||
}
|
||||
us = uint(u)
|
||||
for us >= 100 {
|
||||
is = us % 100 * 2
|
||||
us /= 100
|
||||
i -= 2
|
||||
setByteAt(a, i+1, byteAt(ss, is+1))
|
||||
setByteAt(a, i, byteAt(ss, is))
|
||||
// a[i+1], a[i] = ss[is+1], ss[is]
|
||||
}
|
||||
|
||||
// us < 100
|
||||
is = us * 2
|
||||
i--
|
||||
setByteAt(a, i, byteAt(ss, is+1))
|
||||
// a[i] = ss[is+1]
|
||||
if us >= 10 {
|
||||
i--
|
||||
setByteAt(a, i, byteAt(ss, is))
|
||||
// a[i] = ss[is]
|
||||
}
|
||||
if neg {
|
||||
i--
|
||||
setByteAt(a, i, '-')
|
||||
// a[i] = '-'
|
||||
}
|
||||
if quotes {
|
||||
i--
|
||||
setByteAt(a, i, '"')
|
||||
// a[i] = '"'
|
||||
}
|
||||
return a[i:]
|
||||
}
|
||||
|
||||
// MARKER: checkLitErr methods to prevent the got/expect parameters from escaping
|
||||
|
||||
//go:noinline
|
||||
func jsonCheckLitErr3(got, expect [3]byte) {
|
||||
halt.errorf("expecting %s: got %s", expect, got)
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func jsonCheckLitErr4(got, expect [4]byte) {
|
||||
halt.errorf("expecting %s: got %s", expect, got)
|
||||
}
|
||||
|
||||
func jsonSlashURune(cs [4]byte) (rr uint32) {
|
||||
for _, c := range cs {
|
||||
// best to use explicit if-else
|
||||
// - not a table, etc which involve memory loads, array lookup with bounds checks, etc
|
||||
if c >= '0' && c <= '9' {
|
||||
rr = rr*16 + uint32(c-jsonU4Chk2)
|
||||
} else if c >= 'a' && c <= 'f' {
|
||||
rr = rr*16 + uint32(c-jsonU4Chk1)
|
||||
} else if c >= 'A' && c <= 'F' {
|
||||
rr = rr*16 + uint32(c-jsonU4Chk0)
|
||||
} else {
|
||||
return unicode.ReplacementChar
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func jsonNakedNum(z *fauxUnion, bs []byte, preferFloat, signedInt bool) (err error) {
|
||||
// Note: jsonNakedNum is NEVER called with a zero-length []byte
|
||||
if preferFloat {
|
||||
z.v = valueTypeFloat
|
||||
z.f, err = parseFloat64(bs)
|
||||
} else {
|
||||
err = parseNumber(bs, z, signedInt)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//----------------------
|
||||
|
||||
// JsonHandle is a handle for JSON encoding format.
|
||||
//
|
||||
// Json is comprehensively supported:
|
||||
// - decodes numbers into interface{} as int, uint or float64
|
||||
// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc.
|
||||
// - decode integers from float formatted numbers e.g. 1.27e+8
|
||||
// - decode any json value (numbers, bool, etc) from quoted strings
|
||||
// - configurable way to encode/decode []byte .
|
||||
// by default, encodes and decodes []byte using base64 Std Encoding
|
||||
// - UTF-8 support for encoding and decoding
|
||||
//
|
||||
// It has better performance than the json library in the standard library,
|
||||
// by leveraging the performance improvements of the codec library.
|
||||
//
|
||||
// In addition, it doesn't read more bytes than necessary during a decode, which allows
|
||||
// reading multiple values from a stream containing json and non-json content.
|
||||
// For example, a user can read a json value, then a cbor value, then a msgpack value,
|
||||
// all from the same stream in sequence.
|
||||
//
|
||||
// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are
|
||||
// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD.
|
||||
//
|
||||
// Note also that the float values for NaN, +Inf or -Inf are encoded as null,
|
||||
// as suggested by NOTE 4 of the ECMA-262 ECMAScript Language Specification 5.1 edition.
|
||||
// see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-262.pdf .
|
||||
//
|
||||
// Note the following behaviour differences vs std-library encoding/json package:
|
||||
// - struct field names matched in case-sensitive manner
|
||||
type JsonHandle struct {
|
||||
textEncodingType
|
||||
BasicHandle
|
||||
|
||||
// Indent indicates how a value is encoded.
|
||||
// - If positive, indent by that number of spaces.
|
||||
// - If negative, indent by that number of tabs.
|
||||
Indent int8
|
||||
|
||||
// IntegerAsString controls how integers (signed and unsigned) are encoded.
|
||||
//
|
||||
// Per the JSON Spec, JSON numbers are 64-bit floating point numbers.
|
||||
// Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision.
|
||||
// This can be mitigated by configuring how to encode integers.
|
||||
//
|
||||
// IntegerAsString interpretes the following values:
|
||||
// - if 'L', then encode integers > 2^53 as a json string.
|
||||
// - if 'A', then encode all integers as a json string
|
||||
// containing the exact integer representation as a decimal.
|
||||
// - else encode all integers as a json number (default)
|
||||
IntegerAsString byte
|
||||
|
||||
// HTMLCharsAsIs controls how to encode some special characters to html: < > &
|
||||
//
|
||||
// By default, we encode them as \uXXX
|
||||
// to prevent security holes when served from some browsers.
|
||||
HTMLCharsAsIs bool
|
||||
|
||||
// PreferFloat says that we will default to decoding a number as a float.
|
||||
// If not set, we will examine the characters of the number and decode as an
|
||||
// integer type if it doesn't have any of the characters [.eE].
|
||||
PreferFloat bool
|
||||
|
||||
// TermWhitespace says that we add a whitespace character
|
||||
// at the end of an encoding.
|
||||
//
|
||||
// The whitespace is important, especially if using numbers in a context
|
||||
// where multiple items are written to a stream.
|
||||
TermWhitespace bool
|
||||
|
||||
// MapKeyAsString says to encode all map keys as strings.
|
||||
//
|
||||
// Use this to enforce strict json output.
|
||||
// The only caveat is that nil value is ALWAYS written as null (never as "null")
|
||||
MapKeyAsString bool
|
||||
|
||||
// _ uint64 // padding (cache line)
|
||||
|
||||
// Note: below, we store hardly-used items e.g. RawBytesExt.
|
||||
// These values below may straddle a cache line, but they are hardly-used,
|
||||
// so shouldn't contribute to false-sharing except in rare cases.
|
||||
|
||||
// RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way.
|
||||
// If not configured, raw bytes are encoded to/from base64 text.
|
||||
RawBytesExt InterfaceExt
|
||||
|
||||
// TimeFormat is an array of strings representing a time.Time format, with each one being either
|
||||
// a layout that honor the time.Time.Format specification.
|
||||
// In addition, at most one of the set below (unix, unixmilli, unixmicro, unixnana) can be specified
|
||||
// supporting encoding and decoding time as a number relative to the time epoch of Jan 1, 1970.
|
||||
//
|
||||
// During encode of a time.Time, the first entry in the array is used (defaults to RFC 3339).
|
||||
//
|
||||
// During decode,
|
||||
// - if a string, then each of the layout formats will be tried in order until a time.Time is decoded.
|
||||
// - if a number, then the sole unix entry is used.
|
||||
TimeFormat []string
|
||||
|
||||
// BytesFormat is an array of strings representing how bytes are encoded.
|
||||
//
|
||||
// Supported values are base64 (default), base64url, base32, base32hex, base16 (synonymous with hex) and array.
|
||||
//
|
||||
// array is a special value configuring that bytes are encoded as a sequence of numbers.
|
||||
//
|
||||
// During encode of a []byte, the first entry is used (defaults to base64 if none specified).
|
||||
//
|
||||
// During decode
|
||||
// - if a string, then attempt decoding using each format in sequence until successful.
|
||||
// - if an array, then decode normally
|
||||
BytesFormat []string
|
||||
}
|
||||
|
||||
func (h *JsonHandle) isJson() bool { return true }
|
||||
|
||||
// Name returns the name of the handle: json
|
||||
func (h *JsonHandle) Name() string { return "json" }
|
||||
|
||||
// func (h *JsonHandle) desc(bd byte) string { return str4byte(bd) }
|
||||
func (h *JsonHandle) desc(bd byte) string { return string(bd) }
|
||||
|
||||
func (h *JsonHandle) typical() bool {
|
||||
return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L'
|
||||
}
|
||||
|
||||
// SetInterfaceExt sets an extension
|
||||
func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) {
|
||||
return h.SetExt(rt, tag, makeExt(ext))
|
||||
}
|
||||
|
||||
func jsonFloatStrconvFmtPrec64(f float64) (fmt byte, prec int8) {
|
||||
fmt = 'f'
|
||||
prec = -1
|
||||
fbits := math.Float64bits(f)
|
||||
abs := math.Float64frombits(fbits &^ (1 << 63))
|
||||
if abs == 0 || abs == 1 {
|
||||
prec = 1
|
||||
} else if abs < 1e-6 || abs >= 1e21 {
|
||||
fmt = 'e'
|
||||
} else if noFrac64(fbits) {
|
||||
prec = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func jsonFloatStrconvFmtPrec32(f float32) (fmt byte, prec int8) {
|
||||
fmt = 'f'
|
||||
prec = -1
|
||||
// directly handle Modf (to get fractions) and Abs (to get absolute)
|
||||
fbits := math.Float32bits(f)
|
||||
abs := math.Float32frombits(fbits &^ (1 << 31))
|
||||
if abs == 0 || abs == 1 {
|
||||
prec = 1
|
||||
} else if abs < 1e-6 || abs >= 1e21 {
|
||||
fmt = 'e'
|
||||
} else if noFrac32(fbits) {
|
||||
prec = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var errJsonNoBd = errors.New("descBd unsupported in json")
|
||||
12482
vendor/github.com/ugorji/go/codec/json.fastpath.mono.generated.go
generated
vendored
Normal file
12482
vendor/github.com/ugorji/go/codec/json.fastpath.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1230
vendor/github.com/ugorji/go/codec/json.go
generated
vendored
Normal file
1230
vendor/github.com/ugorji/go/codec/json.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
8349
vendor/github.com/ugorji/go/codec/json.mono.generated.go
generated
vendored
Normal file
8349
vendor/github.com/ugorji/go/codec/json.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
52
vendor/github.com/ugorji/go/codec/json.notfastpath.mono.generated.go
generated
vendored
Normal file
52
vendor/github.com/ugorji/go/codec/json.notfastpath.mono.generated.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type fastpathEJsonBytes struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderJsonBytes, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDJsonBytes struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderJsonBytes, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsJsonBytes [0]fastpathEJsonBytes
|
||||
type fastpathDsJsonBytes [0]fastpathDJsonBytes
|
||||
|
||||
func (helperEncDriverJsonBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonBytes) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverJsonBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonBytes) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverJsonBytes) fastpathEList() (v *fastpathEsJsonBytes) { return }
|
||||
func (helperDecDriverJsonBytes) fastpathDList() (v *fastpathDsJsonBytes) { return }
|
||||
|
||||
type fastpathEJsonIO struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderJsonIO, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDJsonIO struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderJsonIO, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsJsonIO [0]fastpathEJsonIO
|
||||
type fastpathDsJsonIO [0]fastpathDJsonIO
|
||||
|
||||
func (helperEncDriverJsonIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderJsonIO) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverJsonIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderJsonIO) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverJsonIO) fastpathEList() (v *fastpathEsJsonIO) { return }
|
||||
func (helperDecDriverJsonIO) fastpathDList() (v *fastpathDsJsonIO) { return }
|
||||
324
vendor/github.com/ugorji/go/codec/mammoth_test.go.tmpl
generated
vendored
Normal file
324
vendor/github.com/ugorji/go/codec/mammoth_test.go.tmpl
generated
vendored
Normal file
@@ -0,0 +1,324 @@
|
||||
//go:build !codec.notmammoth
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
// Code generated from mammoth_test.go.tmpl - DO NOT EDIT.
|
||||
|
||||
package codec
|
||||
|
||||
import "testing"
|
||||
import "fmt"
|
||||
import "reflect"
|
||||
|
||||
// TestMammoth has all the different paths optimized in fastpath
|
||||
// It has all the primitives, slices and maps.
|
||||
//
|
||||
// For each of those types, it has a pointer and a non-pointer field.
|
||||
|
||||
func init() { _ = fmt.Printf } // so we can include fmt as needed
|
||||
|
||||
type TestMammoth struct {
|
||||
|
||||
{{range .Values }}{{if .Primitive -}}
|
||||
{{ .MethodNamePfx "F" true }} {{ .Primitive }}
|
||||
{{ .MethodNamePfx "Fptr" true }} *{{ .Primitive }}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
|
||||
{{ .MethodNamePfx "F" false }} []{{ .Elem }}
|
||||
{{ .MethodNamePfx "Fptr" false }} *[]{{ .Elem }}
|
||||
{{ .MethodNamePfx "Farr4" false }} [4]{{ .Elem }}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
|
||||
{{ .MethodNamePfx "F" false }} map[{{ .MapKey }}]{{ .Elem }}
|
||||
{{ .MethodNamePfx "Fptr" false }} *map[{{ .MapKey }}]{{ .Elem }}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
}
|
||||
|
||||
// -----------
|
||||
|
||||
// Increase codecoverage by covering all the codecgen paths, in fastpath ....
|
||||
//
|
||||
// Note: even though this is built based on fastpath, we will run these tests
|
||||
// in all modes, including notfastpath, etc.
|
||||
//
|
||||
// Add test file for creating a mammoth generated file as _mammoth_generated.go
|
||||
//
|
||||
// Now, add some types:
|
||||
// - some that implement BinaryMarshal, TextMarshal, JSONMarshal, and one that implements none of it
|
||||
// - create a wrapper type that includes TestMammoth2, with it in slices, and maps, and the custom types
|
||||
// - this wrapper object is what we work encode/decode (so that the codecgen methods are called)
|
||||
|
||||
type testMammoth2Binary uint64
|
||||
func (x testMammoth2Binary) MarshalBinary() (data []byte, err error) {
|
||||
data = make([]byte, 8)
|
||||
bigenstd.PutUint64(data, uint64(x))
|
||||
return
|
||||
}
|
||||
func (x *testMammoth2Binary) UnmarshalBinary(data []byte) (err error) {
|
||||
*x = testMammoth2Binary(bigenstd.Uint64(data))
|
||||
return
|
||||
}
|
||||
|
||||
type testMammoth2Text uint64
|
||||
func (x testMammoth2Text) MarshalText() (data []byte, err error) {
|
||||
data = []byte(fmt.Sprintf("%b", uint64(x)))
|
||||
return
|
||||
}
|
||||
func (x *testMammoth2Text) UnmarshalText(data []byte) (err error) {
|
||||
_, err = fmt.Sscanf(string(data), "%b", (*uint64)(x))
|
||||
return
|
||||
}
|
||||
|
||||
type testMammoth2Json uint64
|
||||
func (x testMammoth2Json) MarshalJSON() (data []byte, err error) {
|
||||
data = []byte(fmt.Sprintf("%v", uint64(x)))
|
||||
return
|
||||
}
|
||||
func (x *testMammoth2Json) UnmarshalJSON(data []byte) (err error) {
|
||||
_, err = fmt.Sscanf(string(data), "%v", (*uint64)(x))
|
||||
return
|
||||
}
|
||||
|
||||
type testMammoth2Basic [4]uint64
|
||||
|
||||
type TestMammoth2Wrapper struct {
|
||||
V TestMammoth
|
||||
T testMammoth2Text
|
||||
B testMammoth2Binary
|
||||
J testMammoth2Json
|
||||
C testMammoth2Basic
|
||||
M map[testMammoth2Basic]TestMammoth
|
||||
L []TestMammoth
|
||||
A [4]int64
|
||||
|
||||
Tcomplex128 complex128
|
||||
Tcomplex64 complex64
|
||||
Tbytes []uint8
|
||||
Tpbytes *[]uint8
|
||||
}
|
||||
|
||||
// -----------
|
||||
|
||||
{{range .Values }}{{if not .Primitive }}{{if not .MapKey -}}
|
||||
type {{ .MethodNamePfx "typMbs" false }} []{{ .Elem }}
|
||||
func (_ {{ .MethodNamePfx "typMbs" false }}) MapBySlice() { }
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Values }}{{if not .Primitive }}{{if .MapKey -}}
|
||||
type {{ .MethodNamePfx "typMap" false }} map[{{ .MapKey }}]{{ .Elem }}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
func __doTestMammothSlices(t *testing.T, h Handle) {
|
||||
{{range $i, $e := .Values }}{{if not .Primitive }}{{if not .MapKey -}}
|
||||
var v{{$i}}va [8]{{ .Elem }}
|
||||
for _, v := range [][]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .Elem }}, {{ zerocmd .Elem }}, {{ zerocmd .Elem }}, {{ nonzerocmd .Elem }} } } {
|
||||
{{/*
|
||||
// fmt.Printf(">>>> running mammoth slice v{{$i}}: %v\n", v)
|
||||
// - encode value to some []byte
|
||||
// - decode into a length-wise-equal []byte
|
||||
// - check if equal to initial slice
|
||||
// - encode ptr to the value
|
||||
// - check if encode bytes are same
|
||||
// - decode into ptrs to: nil, then 1-elem slice, equal-length, then large len slice
|
||||
// - decode into non-addressable slice of equal length, then larger len
|
||||
// - for each decode, compare elem-by-elem to the original slice
|
||||
// -
|
||||
// - rinse and repeat for a MapBySlice version
|
||||
// -
|
||||
*/ -}}
|
||||
var v{{$i}}v1, v{{$i}}v2 []{{ .Elem }}
|
||||
var bs{{$i}} []byte
|
||||
v{{$i}}v1 = v
|
||||
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-slice-v{{$i}}")
|
||||
if v == nil {
|
||||
v{{$i}}v2 = make([]{{ .Elem }}, 2)
|
||||
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
|
||||
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}") // should not change
|
||||
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
|
||||
v{{$i}}v2 = make([]{{ .Elem }}, 2)
|
||||
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
|
||||
testDeepEqualErr(v{{$i}}v2[0], v{{$i}}v2[1], t, "equal-slice-v{{$i}}-noaddr") // should not change
|
||||
testDeepEqualErr(len(v{{$i}}v2), 2, t, "equal-slice-v{{$i}}") // should not change
|
||||
} else {
|
||||
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
|
||||
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}")
|
||||
v{{$i}}v2 = make([]{{ .Elem }}, len(v))
|
||||
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-slice-v{{$i}}-noaddr") // non-addressable value
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-noaddr")
|
||||
}
|
||||
testReleaseBytes(bs{{$i}})
|
||||
// ...
|
||||
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-slice-v{{$i}}-p")
|
||||
v{{$i}}v2 = nil
|
||||
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p")
|
||||
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
|
||||
testUnmarshalErr(&v{{$i}}va, bs{{$i}}, h, t, "dec-array-v{{$i}}-p-1")
|
||||
if v{{$i}}v1 == nil && v{{$i}}v2 == nil { v{{$i}}v2 = []{{ .Elem }}{} } // so we can compare to zero len slice below
|
||||
testDeepEqualErrHandle(v{{$i}}va[:len(v{{$i}}v2)], v{{$i}}v2, h, t, "equal-array-v{{$i}}-p-1")
|
||||
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
|
||||
v{{$i}}v2 = v{{$i}}va[:1:1]
|
||||
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-1")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-1")
|
||||
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
|
||||
v{{$i}}v2 = v{{$i}}va[:len(v{{$i}}v1):len(v{{$i}}v1)]
|
||||
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-len")
|
||||
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
|
||||
v{{$i}}v2 = v{{$i}}va[:]
|
||||
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-slice-v{{$i}}-p-cap")
|
||||
if len(v{{$i}}v1) > 1 {
|
||||
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
|
||||
testUnmarshalErr((&v{{$i}}va)[:len(v{{$i}}v1)], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-len-noaddr")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], h, t, "equal-slice-v{{$i}}-p-len-noaddr")
|
||||
v{{$i}}va = [8]{{ .Elem }}{} // clear the array
|
||||
testUnmarshalErr((&v{{$i}}va)[:], bs{{$i}}, h, t, "dec-slice-v{{$i}}-p-cap-noaddr")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}va[:len(v{{$i}}v1)], h, t, "equal-slice-v{{$i}}-p-cap-noaddr")
|
||||
}
|
||||
testReleaseBytes(bs{{$i}})
|
||||
// ...
|
||||
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMbs" false }}
|
||||
v{{$i}}v2 = nil
|
||||
if v != nil { v{{$i}}v2 = make([]{{ .Elem }}, len(v)) }
|
||||
v{{$i}}v3 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v1)
|
||||
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
|
||||
if v != nil {
|
||||
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom")
|
||||
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom")
|
||||
testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-slice-v{{$i}}-custom")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
}
|
||||
bs{{$i}} = testMarshalErr(&v{{$i}}v3, h, t, "enc-slice-v{{$i}}-custom-p")
|
||||
v{{$i}}v2 = nil
|
||||
v{{$i}}v4 = {{ .MethodNamePfx "typMbs" false }}(v{{$i}}v2)
|
||||
testUnmarshalErr(&v{{$i}}v4, bs{{$i}}, h, t, "dec-slice-v{{$i}}-custom-p")
|
||||
testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-slice-v{{$i}}-custom-p")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
}
|
||||
{{end}}{{end}}{{end}}
|
||||
}
|
||||
|
||||
func __doTestMammothMaps(t *testing.T, h Handle) {
|
||||
{{range $i, $e := .Values }}{{if not .Primitive }}{{if .MapKey -}}
|
||||
for _, v := range []map[{{ .MapKey }}]{{ .Elem }}{ nil, {}, { {{ nonzerocmd .MapKey }}:{{ zerocmd .Elem }} {{if ne "bool" .MapKey}}, {{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }} {{end}} } } {
|
||||
{{/* // fmt.Printf(">>>> running mammoth map v{{$i}}: %v\n", v) */ -}}
|
||||
var v{{$i}}v1, v{{$i}}v2 map[{{ .MapKey }}]{{ .Elem }}
|
||||
var bs{{$i}} []byte
|
||||
v{{$i}}v1 = v
|
||||
bs{{$i}} = testMarshalErr(v{{$i}}v1, h, t, "enc-map-v{{$i}}")
|
||||
if v != nil {
|
||||
v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) // reset map
|
||||
testUnmarshalErr(v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}")
|
||||
v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) // reset map
|
||||
testUnmarshalErr(reflect.ValueOf(v{{$i}}v2), bs{{$i}}, h, t, "dec-map-v{{$i}}-noaddr") // decode into non-addressable map value
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-noaddr")
|
||||
}
|
||||
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
|
||||
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
bs{{$i}} = testMarshalErr(&v{{$i}}v1, h, t, "enc-map-v{{$i}}-p")
|
||||
v{{$i}}v2 = nil
|
||||
testUnmarshalErr(&v{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-nil")
|
||||
testDeepEqualErrHandle(v{{$i}}v1, v{{$i}}v2, h, t, "equal-map-v{{$i}}-p-nil")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
// ...
|
||||
if v == nil { v{{$i}}v2 = nil } else { v{{$i}}v2 = make(map[{{ .MapKey }}]{{ .Elem }}, len(v)) } // reset map
|
||||
var v{{$i}}v3, v{{$i}}v4 {{ .MethodNamePfx "typMap" false }}
|
||||
v{{$i}}v3 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v1)
|
||||
v{{$i}}v4 = {{ .MethodNamePfx "typMap" false }}(v{{$i}}v2)
|
||||
if v != nil {
|
||||
bs{{$i}} = testMarshalErr(v{{$i}}v3, h, t, "enc-map-v{{$i}}-custom")
|
||||
testUnmarshalErr(v{{$i}}v4, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
|
||||
testDeepEqualErrHandle(v{{$i}}v3, v{{$i}}v4, h, t, "equal-map-v{{$i}}-p-len")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
}
|
||||
type s{{$i}}T struct {
|
||||
M map[{{ .MapKey }}]{{ .Elem }}
|
||||
Mp *map[{{ .MapKey }}]{{ .Elem }}
|
||||
}
|
||||
var m{{$i}}v99 = map[{{ .MapKey }}]{{ .Elem }}{
|
||||
{{ zerocmd .MapKey }}: {{ zerocmd .Elem }},
|
||||
{{ nonzerocmd .MapKey }}:{{ nonzerocmd .Elem }},
|
||||
}
|
||||
var s{{$i}}v1, s{{$i}}v2 s{{$i}}T
|
||||
bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom")
|
||||
testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
|
||||
testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
s{{$i}}v2 = s{{$i}}T{}
|
||||
s{{$i}}v1.M = m{{$i}}v99
|
||||
bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom")
|
||||
testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
|
||||
testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
s{{$i}}v2 = s{{$i}}T{}
|
||||
s{{$i}}v1.Mp = &m{{$i}}v99
|
||||
bs{{$i}} = testMarshalErr(s{{$i}}v1, h, t, "enc-map-v{{$i}}-custom")
|
||||
testUnmarshalErr(&s{{$i}}v2, bs{{$i}}, h, t, "dec-map-v{{$i}}-p-len")
|
||||
testDeepEqualErrHandle(s{{$i}}v1, s{{$i}}v2, h, t, "equal-map-v{{$i}}-p-len")
|
||||
testReleaseBytes(bs{{$i}})
|
||||
}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
}
|
||||
|
||||
func doTestMammothMapsAndSlices(t *testing.T, h Handle) {
|
||||
defer testSetup(t, &h)()
|
||||
if mh, ok := h.(*MsgpackHandle); ok {
|
||||
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
|
||||
mh.RawToString = true
|
||||
}
|
||||
__doTestMammothSlices(t, h)
|
||||
__doTestMammothMaps(t, h)
|
||||
}
|
||||
|
||||
func doTestMammoth(t *testing.T, h Handle) {
|
||||
defer testSetup(t, &h)()
|
||||
if mh, ok := h.(*MsgpackHandle); ok {
|
||||
defer func(b bool) { mh.RawToString = b }(mh.RawToString)
|
||||
mh.RawToString = true
|
||||
}
|
||||
|
||||
name := h.Name()
|
||||
var b []byte
|
||||
|
||||
var m, m2 TestMammoth
|
||||
testRandomFillRV(reflect.ValueOf(&m).Elem())
|
||||
b = testMarshalErr(&m, h, t, "mammoth-"+name)
|
||||
|
||||
testUnmarshalErr(&m2, b, h, t, "mammoth-"+name)
|
||||
testDeepEqualErrHandle(&m, &m2, h, t, "mammoth-"+name)
|
||||
testReleaseBytes(b)
|
||||
|
||||
if testing.Short() {
|
||||
t.Skipf("skipping rest of mammoth test in -short mode")
|
||||
}
|
||||
|
||||
var mm, mm2 TestMammoth2Wrapper
|
||||
testRandomFillRV(reflect.ValueOf(&mm).Elem())
|
||||
b = testMarshalErr(&mm, h, t, "mammoth2-"+name)
|
||||
// os.Stderr.Write([]byte("\n\n\n\n" + string(b) + "\n\n\n\n"))
|
||||
testUnmarshalErr(&mm2, b, h, t, "mammoth2-"+name)
|
||||
testDeepEqualErrHandle(&mm, &mm2, h, t, "mammoth2-"+name)
|
||||
// testMammoth2(t, name, h)
|
||||
testReleaseBytes(b)
|
||||
}
|
||||
|
||||
{{range $i, $e := .Formats -}}
|
||||
func Test{{ . }}Mammoth(t *testing.T) {
|
||||
doTestMammoth(t, test{{ . }}H)
|
||||
}
|
||||
{{end}}
|
||||
{{range $i, $e := .Formats -}}
|
||||
func Test{{ . }}MammothMapsAndSlices(t *testing.T) {
|
||||
doTestMammothMapsAndSlices(t, test{{ . }}H)
|
||||
}
|
||||
{{end}}
|
||||
299
vendor/github.com/ugorji/go/codec/msgpack.base.go
generated
vendored
Normal file
299
vendor/github.com/ugorji/go/codec/msgpack.base.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/rpc"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
mpPosFixNumMin byte = 0x00
|
||||
mpPosFixNumMax byte = 0x7f
|
||||
mpFixMapMin byte = 0x80
|
||||
mpFixMapMax byte = 0x8f
|
||||
mpFixArrayMin byte = 0x90
|
||||
mpFixArrayMax byte = 0x9f
|
||||
mpFixStrMin byte = 0xa0
|
||||
mpFixStrMax byte = 0xbf
|
||||
mpNil byte = 0xc0
|
||||
_ byte = 0xc1
|
||||
mpFalse byte = 0xc2
|
||||
mpTrue byte = 0xc3
|
||||
mpFloat byte = 0xca
|
||||
mpDouble byte = 0xcb
|
||||
mpUint8 byte = 0xcc
|
||||
mpUint16 byte = 0xcd
|
||||
mpUint32 byte = 0xce
|
||||
mpUint64 byte = 0xcf
|
||||
mpInt8 byte = 0xd0
|
||||
mpInt16 byte = 0xd1
|
||||
mpInt32 byte = 0xd2
|
||||
mpInt64 byte = 0xd3
|
||||
|
||||
// extensions below
|
||||
mpBin8 byte = 0xc4
|
||||
mpBin16 byte = 0xc5
|
||||
mpBin32 byte = 0xc6
|
||||
mpExt8 byte = 0xc7
|
||||
mpExt16 byte = 0xc8
|
||||
mpExt32 byte = 0xc9
|
||||
mpFixExt1 byte = 0xd4
|
||||
mpFixExt2 byte = 0xd5
|
||||
mpFixExt4 byte = 0xd6
|
||||
mpFixExt8 byte = 0xd7
|
||||
mpFixExt16 byte = 0xd8
|
||||
|
||||
mpStr8 byte = 0xd9 // new
|
||||
mpStr16 byte = 0xda
|
||||
mpStr32 byte = 0xdb
|
||||
|
||||
mpArray16 byte = 0xdc
|
||||
mpArray32 byte = 0xdd
|
||||
|
||||
mpMap16 byte = 0xde
|
||||
mpMap32 byte = 0xdf
|
||||
|
||||
mpNegFixNumMin byte = 0xe0
|
||||
mpNegFixNumMax byte = 0xff
|
||||
)
|
||||
|
||||
var mpTimeExtTag int8 = -1
|
||||
var mpTimeExtTagU = uint8(mpTimeExtTag)
|
||||
|
||||
var mpdescNames = map[byte]string{
|
||||
mpNil: "nil",
|
||||
mpFalse: "false",
|
||||
mpTrue: "true",
|
||||
mpFloat: "float",
|
||||
mpDouble: "float",
|
||||
mpUint8: "uuint",
|
||||
mpUint16: "uint",
|
||||
mpUint32: "uint",
|
||||
mpUint64: "uint",
|
||||
mpInt8: "int",
|
||||
mpInt16: "int",
|
||||
mpInt32: "int",
|
||||
mpInt64: "int",
|
||||
|
||||
mpStr8: "string|bytes",
|
||||
mpStr16: "string|bytes",
|
||||
mpStr32: "string|bytes",
|
||||
|
||||
mpBin8: "bytes",
|
||||
mpBin16: "bytes",
|
||||
mpBin32: "bytes",
|
||||
|
||||
mpArray16: "array",
|
||||
mpArray32: "array",
|
||||
|
||||
mpMap16: "map",
|
||||
mpMap32: "map",
|
||||
}
|
||||
|
||||
func mpdesc(bd byte) (s string) {
|
||||
s = mpdescNames[bd]
|
||||
if s == "" {
|
||||
switch {
|
||||
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax,
|
||||
bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
|
||||
s = "int"
|
||||
case bd >= mpFixStrMin && bd <= mpFixStrMax:
|
||||
s = "string|bytes"
|
||||
case bd >= mpFixArrayMin && bd <= mpFixArrayMax:
|
||||
s = "array"
|
||||
case bd >= mpFixMapMin && bd <= mpFixMapMax:
|
||||
s = "map"
|
||||
case bd >= mpFixExt1 && bd <= mpFixExt16,
|
||||
bd >= mpExt8 && bd <= mpExt32:
|
||||
s = "ext"
|
||||
default:
|
||||
s = "unknown"
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
|
||||
// that the backend RPC service takes multiple arguments, which have been arranged
|
||||
// in sequence in the slice.
|
||||
//
|
||||
// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
|
||||
// array of 1 element).
|
||||
type MsgpackSpecRpcMultiArgs []interface{}
|
||||
|
||||
// A MsgpackContainer type specifies the different types of msgpackContainers.
|
||||
type msgpackContainerType struct {
|
||||
fixCutoff, bFixMin, b8, b16, b32 byte
|
||||
// hasFixMin, has8, has8Always bool
|
||||
}
|
||||
|
||||
var (
|
||||
msgpackContainerRawLegacy = msgpackContainerType{
|
||||
32, mpFixStrMin, 0, mpStr16, mpStr32,
|
||||
}
|
||||
msgpackContainerStr = msgpackContainerType{
|
||||
32, mpFixStrMin, mpStr8, mpStr16, mpStr32, // true, true, false,
|
||||
}
|
||||
msgpackContainerBin = msgpackContainerType{
|
||||
0, 0, mpBin8, mpBin16, mpBin32, // false, true, true,
|
||||
}
|
||||
msgpackContainerList = msgpackContainerType{
|
||||
16, mpFixArrayMin, 0, mpArray16, mpArray32, // true, false, false,
|
||||
}
|
||||
msgpackContainerMap = msgpackContainerType{
|
||||
16, mpFixMapMin, 0, mpMap16, mpMap32, // true, false, false,
|
||||
}
|
||||
)
|
||||
|
||||
//--------------------------------------------------
|
||||
|
||||
// MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
|
||||
type MsgpackHandle struct {
|
||||
binaryEncodingType
|
||||
notJsonType
|
||||
BasicHandle
|
||||
|
||||
// NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum.
|
||||
NoFixedNum bool
|
||||
|
||||
// WriteExt controls whether the new spec is honored.
|
||||
//
|
||||
// With WriteExt=true, we can encode configured extensions with extension tags
|
||||
// and encode string/[]byte/extensions in a way compatible with the new spec
|
||||
// but incompatible with the old spec.
|
||||
//
|
||||
// For compatibility with the old spec, set WriteExt=false.
|
||||
//
|
||||
// With WriteExt=false:
|
||||
// configured extensions are serialized as raw bytes (not msgpack extensions).
|
||||
// reserved byte descriptors like Str8 and those enabling the new msgpack Binary type
|
||||
// are not encoded.
|
||||
WriteExt bool
|
||||
|
||||
// PositiveIntUnsigned says to encode positive integers as unsigned.
|
||||
PositiveIntUnsigned bool
|
||||
}
|
||||
|
||||
// Name returns the name of the handle: msgpack
|
||||
func (h *MsgpackHandle) Name() string { return "msgpack" }
|
||||
|
||||
func (h *MsgpackHandle) desc(bd byte) string { return mpdesc(bd) }
|
||||
|
||||
// SetBytesExt sets an extension
|
||||
func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, makeExt(ext))
|
||||
}
|
||||
|
||||
//--------------------------------------------------
|
||||
|
||||
type msgpackSpecRpcCodec struct {
|
||||
*rpcCodec
|
||||
}
|
||||
|
||||
// /////////////// Spec RPC Codec ///////////////////
|
||||
func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
|
||||
// WriteRequest can write to both a Go service, and other services that do
|
||||
// not abide by the 1 argument rule of a Go service.
|
||||
// We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
|
||||
var bodyArr []interface{}
|
||||
if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
|
||||
bodyArr = ([]interface{})(m)
|
||||
} else {
|
||||
bodyArr = []interface{}{body}
|
||||
}
|
||||
r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
|
||||
return c.write(r2)
|
||||
}
|
||||
|
||||
func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
|
||||
var moe interface{}
|
||||
if r.Error != "" {
|
||||
moe = r.Error
|
||||
}
|
||||
if moe != nil && body != nil {
|
||||
body = nil
|
||||
}
|
||||
r2 := []interface{}{1, uint32(r.Seq), moe, body}
|
||||
return c.write(r2)
|
||||
}
|
||||
|
||||
func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
|
||||
return c.parseCustomHeader(1, &r.Seq, &r.Error)
|
||||
}
|
||||
|
||||
func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
|
||||
return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
|
||||
}
|
||||
|
||||
func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
|
||||
if body == nil { // read and discard
|
||||
return c.read(nil)
|
||||
}
|
||||
bodyArr := []interface{}{body}
|
||||
return c.read(&bodyArr)
|
||||
}
|
||||
|
||||
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
|
||||
if c.cls.Load().closed {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
// We read the response header by hand
|
||||
// so that the body can be decoded on its own from the stream at a later time.
|
||||
|
||||
const fia byte = 0x94 //four item array descriptor value
|
||||
|
||||
var ba [1]byte
|
||||
var n int
|
||||
for {
|
||||
n, err = c.r.Read(ba[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n == 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var b = ba[0]
|
||||
if b != fia {
|
||||
err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b))
|
||||
} else {
|
||||
err = c.read(&b)
|
||||
if err == nil {
|
||||
if b != expectTypeByte {
|
||||
err = fmt.Errorf("%s - expecting %v but got %x/%s", msgBadDesc, expectTypeByte, b, mpdesc(b))
|
||||
} else {
|
||||
err = c.read(msgid)
|
||||
if err == nil {
|
||||
err = c.read(methodOrError)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//--------------------------------------------------
|
||||
|
||||
// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
|
||||
// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
|
||||
type msgpackSpecRpc struct{}
|
||||
|
||||
// MsgpackSpecRpc implements Rpc using the communication protocol defined in
|
||||
// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
|
||||
//
|
||||
// See GoRpc documentation, for information on buffering for better performance.
|
||||
var MsgpackSpecRpc msgpackSpecRpc
|
||||
|
||||
func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
|
||||
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
|
||||
}
|
||||
|
||||
func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
|
||||
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
|
||||
}
|
||||
12482
vendor/github.com/ugorji/go/codec/msgpack.fastpath.mono.generated.go
generated
vendored
Normal file
12482
vendor/github.com/ugorji/go/codec/msgpack.fastpath.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1019
vendor/github.com/ugorji/go/codec/msgpack.go
generated
vendored
Normal file
1019
vendor/github.com/ugorji/go/codec/msgpack.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
8046
vendor/github.com/ugorji/go/codec/msgpack.mono.generated.go
generated
vendored
Normal file
8046
vendor/github.com/ugorji/go/codec/msgpack.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
52
vendor/github.com/ugorji/go/codec/msgpack.notfastpath.mono.generated.go
generated
vendored
Normal file
52
vendor/github.com/ugorji/go/codec/msgpack.notfastpath.mono.generated.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type fastpathEMsgpackBytes struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderMsgpackBytes, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDMsgpackBytes struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderMsgpackBytes, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsMsgpackBytes [0]fastpathEMsgpackBytes
|
||||
type fastpathDsMsgpackBytes [0]fastpathDMsgpackBytes
|
||||
|
||||
func (helperEncDriverMsgpackBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackBytes) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverMsgpackBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackBytes) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverMsgpackBytes) fastpathEList() (v *fastpathEsMsgpackBytes) { return }
|
||||
func (helperDecDriverMsgpackBytes) fastpathDList() (v *fastpathDsMsgpackBytes) { return }
|
||||
|
||||
type fastpathEMsgpackIO struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderMsgpackIO, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDMsgpackIO struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderMsgpackIO, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsMsgpackIO [0]fastpathEMsgpackIO
|
||||
type fastpathDsMsgpackIO [0]fastpathDMsgpackIO
|
||||
|
||||
func (helperEncDriverMsgpackIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderMsgpackIO) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverMsgpackIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderMsgpackIO) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverMsgpackIO) fastpathEList() (v *fastpathEsMsgpackIO) { return }
|
||||
func (helperDecDriverMsgpackIO) fastpathDList() (v *fastpathDsMsgpackIO) { return }
|
||||
838
vendor/github.com/ugorji/go/codec/reader.go
generated
vendored
Normal file
838
vendor/github.com/ugorji/go/codec/reader.go
generated
vendored
Normal file
@@ -0,0 +1,838 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// decReader abstracts the reading source, allowing implementations that can
|
||||
// read from an io.Reader or directly off a byte slice with zero-copying.
|
||||
type decReaderI interface {
|
||||
// readx will return a view of the []byte in one of 2 ways:
|
||||
// - direct view into []byte which decoding is happening from (if bytes)
|
||||
// - view into a mutable []byte which the ioReader is using (if IO)
|
||||
//
|
||||
// Users should directly consume the contents read, and not store for future use.
|
||||
readx(n uint) []byte
|
||||
|
||||
// skip n bytes
|
||||
skip(n uint)
|
||||
|
||||
readb([]byte)
|
||||
|
||||
// readxb will read n bytes, returning as out, and a flag stating whether
|
||||
// an internal buffer (not the view) was used.
|
||||
readxb(n uint) (out []byte, usingBuf bool)
|
||||
|
||||
readn1() byte
|
||||
readn2() [2]byte
|
||||
readn3() [3]byte
|
||||
readn4() [4]byte
|
||||
readn8() [8]byte
|
||||
// readn1eof() (v uint8, eof bool)
|
||||
|
||||
// // read up to 8 bytes at a time
|
||||
// readn(num uint8) (v [8]byte)
|
||||
|
||||
numread() uint // number of bytes read
|
||||
|
||||
// skip any whitespace characters, and return the first non-matching byte
|
||||
skipWhitespace() (token byte)
|
||||
|
||||
// jsonReadNum will read a sequence of numeric characters, checking from the last
|
||||
// read byte. It will return a sequence of numeric characters (v),
|
||||
// and the next token character (tok - returned separately),
|
||||
//
|
||||
// if an EOF is found before the next token is seen, it returns a token value of 0.
|
||||
jsonReadNum() (v []byte, token byte)
|
||||
|
||||
// jsonReadAsisChars recognizes 2 terminal characters (" or \).
|
||||
// jsonReadAsisChars will read json plain characters until it reaches a terminal char,
|
||||
// and returns a slice up to the terminal char (excluded),
|
||||
// and also returns the terminal char separately (" or \).
|
||||
jsonReadAsisChars() (v []byte, terminal byte)
|
||||
|
||||
// readUntil will read characters until it reaches a ",
|
||||
// return a slice up to " (excluded)
|
||||
jsonReadUntilDblQuote() (v []byte)
|
||||
|
||||
// skip will skip any byte that matches, and return the first non-matching byte
|
||||
// skip(accept *bitset256) (token byte)
|
||||
|
||||
// readTo will read any byte that matches, stopping once no-longer matching.
|
||||
// readTo(accept *bitset256) (out []byte)
|
||||
|
||||
// // readUntil will read characters until it reaches a stop char,
|
||||
// // return a slice up to the terminal byte (excluded)
|
||||
// readUntil(stop byte) (out []byte)
|
||||
|
||||
// only supported when reading from bytes
|
||||
// bytesReadFrom(startpos uint) []byte
|
||||
|
||||
// isBytes() bool
|
||||
resetIO(r io.Reader, bufsize int, maxInitLen int, blist *bytesFreeList)
|
||||
|
||||
resetBytes(in []byte)
|
||||
|
||||
// nextValueBytes() captures bytes read between a call to startRecording and stopRecording.
|
||||
// startRecording will always includes the last byte read.
|
||||
startRecording()
|
||||
// stopRecording will include all bytes read between the point of startRecording and now.
|
||||
stopRecording() []byte
|
||||
}
|
||||
|
||||
// // ------------------------------------------------
|
||||
|
||||
const maxConsecutiveEmptyReads = 16 // 2 is sufficient, 16 is enough, 64 is optimal
|
||||
|
||||
// const defBufReaderSize = 4096
|
||||
|
||||
// --------------------
|
||||
|
||||
// ioReaderByteScanner contains the io.Reader and io.ByteScanner interfaces
|
||||
type ioReaderByteScanner interface {
|
||||
io.Reader
|
||||
io.ByteScanner
|
||||
}
|
||||
|
||||
// MARKER: why not separate bufioDecReader from ioDecReader?
|
||||
//
|
||||
// We tried, but only readn1 of bufioDecReader came close to being
|
||||
// inlined (at inline cost 82). All other methods were at inline cost >= 90.
|
||||
//
|
||||
// Consequently, there's no performance impact from having both together
|
||||
// (except a single if z.bufio branch, which is likely well predicted and happens
|
||||
// only once per call (right at the top).
|
||||
|
||||
// ioDecReader is a decReader that reads off an io.Reader.
|
||||
type ioDecReader struct {
|
||||
r io.Reader
|
||||
|
||||
blist *bytesFreeList
|
||||
|
||||
maxInitLen uint
|
||||
|
||||
n uint // num read
|
||||
|
||||
bufsize uint
|
||||
|
||||
bufio bool // are we buffering (rc and wc are valid)
|
||||
rbr bool // r is a byte reader
|
||||
recording bool // are we recording (src and erc are valid)
|
||||
done bool // did we reach EOF and are we done?
|
||||
|
||||
// valid when: bufio=false
|
||||
b [1]byte // tiny buffer for reading single byte (if z.br == nil)
|
||||
l byte // last byte read
|
||||
br io.ByteReader // main reader used for ReadByte
|
||||
|
||||
// valid when: bufio=true
|
||||
wc uint // read cursor
|
||||
rc uint // write cursor
|
||||
err error
|
||||
|
||||
// valid when: recording=true
|
||||
recc uint // start-recording cursor (valid: recording=true)
|
||||
|
||||
buf []byte // buffer for bufio OR recording (if !bufio)
|
||||
}
|
||||
|
||||
func (z *ioDecReader) resetBytes(in []byte) {
|
||||
halt.errorStr("resetBytes unsupported by ioDecReader")
|
||||
}
|
||||
|
||||
func (z *ioDecReader) resetIO(r io.Reader, bufsize int, maxInitLen int, blist *bytesFreeList) {
|
||||
buf := z.buf
|
||||
*z = ioDecReader{}
|
||||
z.maxInitLen = max(1024, uint(maxInitLen))
|
||||
z.blist = blist
|
||||
z.buf = blist.check(buf, max(256, bufsize))
|
||||
z.bufsize = uint(max(0, bufsize))
|
||||
z.bufio = z.bufsize > 0
|
||||
if z.bufio {
|
||||
z.buf = z.buf[:cap(z.buf)]
|
||||
} else {
|
||||
z.buf = z.buf[:0]
|
||||
}
|
||||
if r == nil {
|
||||
z.r = &eofReader
|
||||
} else {
|
||||
z.r = r
|
||||
}
|
||||
z.br, z.rbr = z.r.(io.ByteReader)
|
||||
}
|
||||
|
||||
func (z *ioDecReader) numread() uint {
|
||||
return z.n
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readn2() [2]byte {
|
||||
return ([2]byte)(z.readx(2))
|
||||
// using readb forced return bs onto heap, unnecessarily
|
||||
// z.readb(bs[:])
|
||||
// return
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readn3() [3]byte {
|
||||
return ([3]byte)(z.readx(3))
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readn4() [4]byte {
|
||||
return ([4]byte)(z.readx(4))
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readn8() [8]byte {
|
||||
return ([8]byte)(z.readx(8))
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readx(n uint) (bs []byte) {
|
||||
return bytesOK(z.readxb(n))
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readErr() (err error) {
|
||||
err, z.err = z.err, nil
|
||||
return
|
||||
}
|
||||
|
||||
func (z *ioDecReader) checkErr() {
|
||||
halt.onerror(z.readErr())
|
||||
}
|
||||
|
||||
func (z *ioDecReader) unexpectedEOF() {
|
||||
z.checkErr()
|
||||
// if no error, still halt with unexpected EOF
|
||||
halt.error(io.ErrUnexpectedEOF)
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readOne() (b byte, err error) {
|
||||
n, err := z.r.Read(z.b[:])
|
||||
if n == 1 {
|
||||
err = nil
|
||||
b = z.b[0]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// fillbuf reads a new chunk into the buffer.
|
||||
func (z *ioDecReader) fillbuf(bufsize uint) (numShift, numRead uint) {
|
||||
z.checkErr()
|
||||
bufsize = max(bufsize, z.bufsize)
|
||||
|
||||
// Slide existing data to beginning.
|
||||
if z.recording {
|
||||
numShift = z.recc // recc is always <= rc
|
||||
} else {
|
||||
numShift = z.rc
|
||||
}
|
||||
if numShift > 0 {
|
||||
numShift-- // never shift last byte read out
|
||||
}
|
||||
copy(z.buf, z.buf[numShift:z.wc])
|
||||
z.wc -= numShift
|
||||
z.rc -= numShift
|
||||
if z.recording {
|
||||
z.recc -= numShift
|
||||
}
|
||||
// add enough to allow u to read up to bufsize again iff
|
||||
// - buf is fully written
|
||||
// - NOTE: don't pre-allocate more until needed
|
||||
if uint(len(z.buf)) == z.wc {
|
||||
if bufsize+z.wc < uint(cap(z.buf)) {
|
||||
z.buf = z.buf[:uint(cap(z.buf))]
|
||||
} else {
|
||||
bufsize = max(uint(cap(z.buf)*3/2), bufsize+z.wc)
|
||||
buf := z.blist.get(int(bufsize))
|
||||
buf = buf[:cap(buf)]
|
||||
copy(buf, z.buf[:z.wc])
|
||||
z.blist.put(z.buf)
|
||||
z.buf = buf
|
||||
}
|
||||
}
|
||||
// Read new data: try a limited number of times.
|
||||
// if n == 0: try up to maxConsecutiveEmptyReads
|
||||
// if n > 0 and err == nil: try one more time (to see if we get n == 0 and EOF)
|
||||
for i := maxConsecutiveEmptyReads; i > 0; i-- {
|
||||
n, err := z.r.Read(z.buf[z.wc:])
|
||||
numRead += uint(n)
|
||||
z.wc += uint(n)
|
||||
if err != nil {
|
||||
z.err = err
|
||||
if err == io.EOF {
|
||||
z.done = true // leading to UnexpectedEOF if another Read is called
|
||||
} else if errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// os read deadline, but some bytes read: return (don't store err)
|
||||
z.err = nil // allow for a retry next time fillbuf is called
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if z.wc == uint(len(z.buf)) {
|
||||
// return
|
||||
// }
|
||||
// only read one time if results returned
|
||||
// if n > 0 && i > 2 {
|
||||
// i = 2 // try max one more time (to see about getting EOF)
|
||||
// }
|
||||
|
||||
// Once you have some data from this read call, move on.
|
||||
// Consequently, a blocked Read has less chance of happening.
|
||||
if n > 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
z.err = io.ErrNoProgress // either no data read OR not enough data read, without an EOF
|
||||
return
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readb(bs []byte) {
|
||||
if len(bs) == 0 {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
var n int
|
||||
if z.bufio {
|
||||
BUFIO:
|
||||
for z.rc == z.wc {
|
||||
z.fillbuf(0)
|
||||
}
|
||||
n = copy(bs, z.buf[z.rc:z.wc])
|
||||
z.rc += uint(n)
|
||||
z.n += uint(n)
|
||||
if n == len(bs) {
|
||||
return
|
||||
}
|
||||
bs = bs[n:]
|
||||
goto BUFIO
|
||||
}
|
||||
|
||||
// -------- NOT BUFIO ------
|
||||
|
||||
var nn uint
|
||||
bs0 := bs
|
||||
READER:
|
||||
n, err = z.r.Read(bs)
|
||||
if n > 0 {
|
||||
z.l = bs[n-1]
|
||||
nn += uint(n)
|
||||
bs = bs[n:]
|
||||
}
|
||||
if len(bs) != 0 && err == nil {
|
||||
goto READER
|
||||
}
|
||||
if z.recording {
|
||||
z.buf = append(z.buf, bs0[:nn]...)
|
||||
}
|
||||
z.n += nn
|
||||
if len(bs) != 0 {
|
||||
halt.onerror(err)
|
||||
halt.errorf("ioDecReader.readb read %d out of %d bytes requested", nn, len(bs0))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readn1() (b uint8) {
|
||||
if z.bufio {
|
||||
for z.rc == z.wc {
|
||||
z.fillbuf(0)
|
||||
}
|
||||
b = z.buf[z.rc]
|
||||
z.rc++
|
||||
z.n++
|
||||
return
|
||||
}
|
||||
|
||||
// -------- NOT BUFIO ------
|
||||
|
||||
var err error
|
||||
if z.rbr {
|
||||
b, err = z.br.ReadByte()
|
||||
} else {
|
||||
b, err = z.readOne()
|
||||
}
|
||||
halt.onerror(err)
|
||||
z.l = b
|
||||
z.n++
|
||||
if z.recording {
|
||||
z.buf = append(z.buf, b)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readxb(n uint) (out []byte, useBuf bool) {
|
||||
if n == 0 {
|
||||
return zeroByteSlice, false
|
||||
}
|
||||
|
||||
if z.bufio {
|
||||
BUFIO:
|
||||
nn := int(n+z.rc) - int(z.wc)
|
||||
if nn > 0 {
|
||||
z.fillbuf(decInferLen(nn, z.maxInitLen, 1))
|
||||
goto BUFIO
|
||||
}
|
||||
pos := z.rc
|
||||
z.rc += uint(n)
|
||||
z.n += uint(n)
|
||||
out = z.buf[pos:z.rc]
|
||||
useBuf = true
|
||||
return
|
||||
}
|
||||
|
||||
// -------- NOT BUFIO ------
|
||||
|
||||
var n3 int
|
||||
var err error
|
||||
useBuf = true
|
||||
out = z.buf
|
||||
r0 := uint(len(out))
|
||||
r := r0
|
||||
nn := int(n)
|
||||
for nn > 0 {
|
||||
halt.onerror(err) // check error whenever there's more to read
|
||||
n2 := r + decInferLen(int(nn), z.maxInitLen, 1)
|
||||
if cap(out) < int(n2) {
|
||||
out2 := z.blist.putGet(out, int(n2))[:n2] // make([]byte, len2+len3)
|
||||
copy(out2, out)
|
||||
out = out2
|
||||
} else {
|
||||
out = out[:n2]
|
||||
}
|
||||
n3, err = z.r.Read(out[r:n2])
|
||||
if n3 > 0 {
|
||||
z.l = out[r+uint(n3)-1]
|
||||
nn -= n3
|
||||
r += uint(n3)
|
||||
}
|
||||
}
|
||||
z.buf = out[:r0+n]
|
||||
out = out[r0 : r0+n]
|
||||
z.n += n
|
||||
return
|
||||
}
|
||||
|
||||
func (z *ioDecReader) skip(n uint) {
|
||||
if n == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if z.bufio {
|
||||
BUFIO:
|
||||
n2 := min(n, z.wc-z.rc)
|
||||
// handle in-line, so z.buf doesn't grow much (since we're skipping)
|
||||
// ie by setting z.rc, fillbuf should keep shifting left (unless recording)
|
||||
z.rc += n2
|
||||
z.n += n2
|
||||
n -= n2
|
||||
if n > 0 {
|
||||
z.fillbuf(decInferLen(int(n+z.rc)-int(z.wc), z.maxInitLen, 1))
|
||||
goto BUFIO
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -------- NOT BUFIO ------
|
||||
|
||||
var out []byte
|
||||
var fromBlist bool
|
||||
if z.recording {
|
||||
out = z.buf
|
||||
} else {
|
||||
nn := int(decInferLen(int(n), z.maxInitLen, 1))
|
||||
if cap(z.buf) >= nn/2 {
|
||||
out = z.buf[:cap(z.buf)]
|
||||
} else {
|
||||
fromBlist = true
|
||||
out = z.blist.get(nn)
|
||||
}
|
||||
}
|
||||
|
||||
var r uint
|
||||
var n3 int
|
||||
var err error
|
||||
nn := int(n)
|
||||
for nn > 0 {
|
||||
halt.onerror(err)
|
||||
n2 := uint(nn)
|
||||
if z.recording {
|
||||
r = uint(len(out))
|
||||
n2 = r + decInferLen(int(nn), z.maxInitLen, 1)
|
||||
if cap(out) < int(n2) {
|
||||
out2 := z.blist.putGet(out, int(n2))[:n2] // make([]byte, len2+len3)
|
||||
copy(out2, out)
|
||||
out = out2
|
||||
} else {
|
||||
out = out[:n2]
|
||||
}
|
||||
}
|
||||
n3, err = z.r.Read(out[r:n2])
|
||||
if n3 > 0 {
|
||||
z.l = out[r+uint(n3)-1]
|
||||
z.n += uint(n3)
|
||||
nn -= n3
|
||||
}
|
||||
}
|
||||
if z.recording {
|
||||
z.buf = out
|
||||
} else if fromBlist {
|
||||
z.blist.put(out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ---- JSON SPECIFIC HELPERS HERE ----
|
||||
|
||||
func (z *ioDecReader) jsonReadNum() (bs []byte, token byte) {
|
||||
var start, pos, end uint
|
||||
if z.bufio {
|
||||
// read and fill into buf, then take substring
|
||||
start = z.rc - 1 // include last byte read
|
||||
pos = start
|
||||
BUFIO:
|
||||
if pos == z.wc {
|
||||
if z.done {
|
||||
end = pos
|
||||
goto END
|
||||
}
|
||||
numshift, numread := z.fillbuf(0)
|
||||
start -= numshift
|
||||
pos -= numshift
|
||||
if numread == 0 {
|
||||
end = pos
|
||||
goto END
|
||||
}
|
||||
}
|
||||
token = z.buf[pos]
|
||||
pos++
|
||||
if isNumberChar(token) {
|
||||
goto BUFIO
|
||||
}
|
||||
end = pos - 1
|
||||
END:
|
||||
z.n += (pos - z.rc)
|
||||
z.rc = pos
|
||||
return z.buf[start:end], token
|
||||
}
|
||||
|
||||
// if not recording, add the last read byte into buf
|
||||
if !z.recording {
|
||||
z.buf = append(z.buf[:0], z.l)
|
||||
}
|
||||
start = uint(len(z.buf) - 1) // incl last byte in z.buf
|
||||
var b byte
|
||||
var err error
|
||||
|
||||
READER:
|
||||
if z.rbr {
|
||||
b, err = z.br.ReadByte()
|
||||
} else {
|
||||
b, err = z.readOne()
|
||||
}
|
||||
if err == io.EOF {
|
||||
return z.buf[start:], 0
|
||||
}
|
||||
halt.onerror(err)
|
||||
z.l = b
|
||||
z.n++
|
||||
z.buf = append(z.buf, b)
|
||||
if isNumberChar(b) {
|
||||
goto READER
|
||||
}
|
||||
return z.buf[start : len(z.buf)-1], b
|
||||
}
|
||||
|
||||
func (z *ioDecReader) skipWhitespace() (tok byte) {
|
||||
var pos uint
|
||||
if z.bufio {
|
||||
pos = z.rc
|
||||
BUFIO:
|
||||
if pos == z.wc {
|
||||
if z.done {
|
||||
z.unexpectedEOF()
|
||||
}
|
||||
numshift, numread := z.fillbuf(0)
|
||||
pos -= numshift
|
||||
if numread == 0 {
|
||||
z.unexpectedEOF()
|
||||
}
|
||||
}
|
||||
tok = z.buf[pos]
|
||||
pos++
|
||||
if isWhitespaceChar(tok) {
|
||||
goto BUFIO
|
||||
}
|
||||
z.n += (pos - z.rc)
|
||||
z.rc = pos
|
||||
return tok
|
||||
}
|
||||
|
||||
var err error
|
||||
READER:
|
||||
if z.rbr {
|
||||
tok, err = z.br.ReadByte()
|
||||
} else {
|
||||
tok, err = z.readOne()
|
||||
}
|
||||
halt.onerror(err)
|
||||
z.n++
|
||||
z.l = tok
|
||||
if z.recording {
|
||||
z.buf = append(z.buf, tok)
|
||||
}
|
||||
if isWhitespaceChar(tok) {
|
||||
goto READER
|
||||
}
|
||||
return tok
|
||||
}
|
||||
|
||||
func (z *ioDecReader) readUntil(stop1, stop2 byte) (bs []byte, tok byte) {
|
||||
var start, pos uint
|
||||
if z.bufio {
|
||||
start = z.rc
|
||||
pos = start
|
||||
BUFIO:
|
||||
if pos == z.wc {
|
||||
if z.done {
|
||||
z.unexpectedEOF()
|
||||
}
|
||||
numshift, numread := z.fillbuf(0)
|
||||
start -= numshift
|
||||
pos -= numshift
|
||||
if numread == 0 {
|
||||
z.unexpectedEOF()
|
||||
}
|
||||
}
|
||||
tok = z.buf[pos]
|
||||
pos++
|
||||
if tok == stop1 || tok == stop2 {
|
||||
z.n += (pos - z.rc)
|
||||
z.rc = pos
|
||||
return z.buf[start : pos-1], tok
|
||||
}
|
||||
goto BUFIO
|
||||
}
|
||||
|
||||
var err error
|
||||
if !z.recording {
|
||||
z.buf = z.buf[:0]
|
||||
}
|
||||
start = uint(len(z.buf))
|
||||
READER:
|
||||
if z.rbr {
|
||||
tok, err = z.br.ReadByte()
|
||||
} else {
|
||||
tok, err = z.readOne()
|
||||
}
|
||||
halt.onerror(err)
|
||||
z.n++
|
||||
z.l = tok
|
||||
z.buf = append(z.buf, tok)
|
||||
if tok == stop1 || tok == stop2 {
|
||||
return z.buf[start : len(z.buf)-1], tok
|
||||
}
|
||||
goto READER
|
||||
}
|
||||
|
||||
func (z *ioDecReader) jsonReadAsisChars() (bs []byte, tok byte) {
|
||||
return z.readUntil('"', '\\')
|
||||
}
|
||||
|
||||
func (z *ioDecReader) jsonReadUntilDblQuote() (bs []byte) {
|
||||
bs, _ = z.readUntil('"', 0)
|
||||
return
|
||||
}
|
||||
|
||||
// ---- start/stop recording ----
|
||||
|
||||
func (z *ioDecReader) startRecording() {
|
||||
z.recording = true
|
||||
// always include last byte read
|
||||
if z.bufio {
|
||||
z.recc = z.rc - 1
|
||||
} else {
|
||||
z.buf = append(z.buf[:0], z.l)
|
||||
}
|
||||
}
|
||||
|
||||
func (z *ioDecReader) stopRecording() (v []byte) {
|
||||
z.recording = false
|
||||
if z.bufio {
|
||||
v = z.buf[z.recc:z.rc]
|
||||
z.recc = 0
|
||||
} else {
|
||||
v = z.buf
|
||||
z.buf = z.buf[:0]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ------------------------------------
|
||||
|
||||
// bytesDecReader is a decReader that reads off a byte slice with zero copying
|
||||
//
|
||||
// Note: we do not try to convert index'ing out of bounds to an io error.
|
||||
// instead, we let it bubble up to the exported Encode/Decode method
|
||||
// and recover it as an io error.
|
||||
//
|
||||
// Every function here MUST defensively check bounds either explicitly
|
||||
// or via a bounds check.
|
||||
//
|
||||
// see panicValToErr(...) function in helper.go.
|
||||
type bytesDecReader struct {
|
||||
b []byte // data
|
||||
c uint // cursor
|
||||
r uint // recording cursor
|
||||
xb []byte // buffer for readxb
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) resetIO(r io.Reader, bufsize int, maxInitLen int, blist *bytesFreeList) {
|
||||
halt.errorStr("resetIO unsupported by bytesDecReader")
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) resetBytes(in []byte) {
|
||||
// it's ok to resize a nil slice, so long as it's not past 0
|
||||
z.b = in[:len(in):len(in)] // reslicing must not go past capacity
|
||||
z.c = 0
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) numread() uint {
|
||||
return z.c
|
||||
}
|
||||
|
||||
// Note: slicing from a non-constant start position is more expensive,
|
||||
// as more computation is required to decipher the pointer start position.
|
||||
// However, we do it only once, and it's better than reslicing both z.b and return value.
|
||||
|
||||
func (z *bytesDecReader) readx(n uint) (bs []byte) {
|
||||
bs = z.b[z.c : z.c+n]
|
||||
z.c += n
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) skip(n uint) {
|
||||
if z.c+n > uint(cap(z.b)) {
|
||||
halt.error(&outOfBoundsError{uint(cap(z.b)), z.c + n})
|
||||
}
|
||||
z.c += n
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) readxb(n uint) (out []byte, usingBuf bool) {
|
||||
return z.readx(n), false
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) readb(bs []byte) {
|
||||
copy(bs, z.readx(uint(len(bs))))
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) readn1() (v uint8) {
|
||||
v = z.b[z.c]
|
||||
z.c++
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) readn2() (bs [2]byte) {
|
||||
bs = [2]byte(z.b[z.c:])
|
||||
z.c += 2
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) readn3() (bs [3]byte) {
|
||||
bs = [3]byte(z.b[z.c:])
|
||||
z.c += 3
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) readn4() (bs [4]byte) {
|
||||
bs = [4]byte(z.b[z.c:])
|
||||
z.c += 4
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) readn8() (bs [8]byte) {
|
||||
bs = [8]byte(z.b[z.c:])
|
||||
z.c += 8
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) jsonReadNum() (bs []byte, token byte) {
|
||||
start := z.c - 1 // include last byte
|
||||
i := start
|
||||
LOOP:
|
||||
// gracefully handle end of slice (~= EOF)
|
||||
if i < uint(len(z.b)) {
|
||||
if isNumberChar(z.b[i]) {
|
||||
i++
|
||||
goto LOOP
|
||||
}
|
||||
token = z.b[i]
|
||||
}
|
||||
z.c = i + 1
|
||||
bs = z.b[start:i] // byteSliceOf(z.b, start, i)
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) jsonReadAsisChars() (bs []byte, token byte) {
|
||||
i := z.c
|
||||
LOOP:
|
||||
token = z.b[i]
|
||||
i++
|
||||
if token == '"' || token == '\\' {
|
||||
// z.c, i = i, z.c
|
||||
// return byteSliceOf(z.b, i, z.c-1), token
|
||||
bs = z.b[z.c : i-1]
|
||||
z.c = i
|
||||
return
|
||||
// return z.b[i : z.c-1], token
|
||||
}
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) skipWhitespace() (token byte) {
|
||||
i := z.c
|
||||
LOOP:
|
||||
// setting token before check reduces inlining cost,
|
||||
// making containerNext inlineable
|
||||
token = z.b[i]
|
||||
if !isWhitespaceChar(token) {
|
||||
z.c = i + 1
|
||||
return
|
||||
}
|
||||
i++
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) jsonReadUntilDblQuote() (out []byte) {
|
||||
i := z.c
|
||||
LOOP:
|
||||
if z.b[i] == '"' {
|
||||
out = z.b[z.c:i] // byteSliceOf(z.b, z.c, i)
|
||||
z.c = i + 1
|
||||
return
|
||||
}
|
||||
i++
|
||||
goto LOOP
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) startRecording() {
|
||||
z.r = z.c - 1
|
||||
}
|
||||
|
||||
func (z *bytesDecReader) stopRecording() (v []byte) {
|
||||
v = z.b[z.r:z.c]
|
||||
z.r = 0
|
||||
return
|
||||
}
|
||||
|
||||
type devNullReader struct{}
|
||||
|
||||
func (devNullReader) Read(p []byte) (int, error) { return 0, io.EOF }
|
||||
func (devNullReader) Close() error { return nil }
|
||||
func (devNullReader) ReadByte() (byte, error) { return 0, io.EOF }
|
||||
func (devNullReader) UnreadByte() error { return io.EOF }
|
||||
|
||||
// MARKER: readn{1,2,3,4,8} should throw an out of bounds error if past length.
|
||||
// MARKER: readn1: explicitly ensure bounds check is done
|
||||
// MARKER: readn{2,3,4,8}: ensure you slice z.b completely so we get bounds error if past end.
|
||||
228
vendor/github.com/ugorji/go/codec/rpc.go
generated
vendored
Normal file
228
vendor/github.com/ugorji/go/codec/rpc.go
generated
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/rpc"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var (
|
||||
errRpcIsClosed = errors.New("rpc - connection has been closed")
|
||||
errRpcNoConn = errors.New("rpc - no connection")
|
||||
|
||||
rpcSpaceArr = [1]byte{' '}
|
||||
)
|
||||
|
||||
// Rpc provides a rpc Server or Client Codec for rpc communication.
|
||||
type Rpc interface {
|
||||
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
|
||||
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
|
||||
}
|
||||
|
||||
// RPCOptions holds options specific to rpc functionality
|
||||
type RPCOptions struct {
|
||||
// RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls.
|
||||
//
|
||||
// Set RPCNoBuffer=true to turn buffering off.
|
||||
//
|
||||
// Buffering can still be done if buffered connections are passed in, or
|
||||
// buffering is configured on the handle.
|
||||
//
|
||||
// Deprecated: Buffering should be configured at the Handle or by using a buffer Reader.
|
||||
// Setting this has no effect anymore (after v1.2.12 - authored 2025-05-06)
|
||||
RPCNoBuffer bool
|
||||
}
|
||||
|
||||
// rpcCodec defines the struct members and common methods.
|
||||
type rpcCodec struct {
|
||||
c io.Closer
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
f ioFlusher
|
||||
nc net.Conn
|
||||
dec *Decoder
|
||||
enc *Encoder
|
||||
h Handle
|
||||
|
||||
cls atomic.Pointer[clsErr]
|
||||
}
|
||||
|
||||
func newRPCCodec(conn io.ReadWriteCloser, h Handle) *rpcCodec {
|
||||
nc, _ := conn.(net.Conn)
|
||||
f, _ := conn.(ioFlusher)
|
||||
rc := &rpcCodec{
|
||||
h: h,
|
||||
c: conn,
|
||||
w: conn,
|
||||
r: conn,
|
||||
f: f,
|
||||
nc: nc,
|
||||
enc: NewEncoder(conn, h),
|
||||
dec: NewDecoder(conn, h),
|
||||
}
|
||||
rc.cls.Store(new(clsErr))
|
||||
return rc
|
||||
}
|
||||
|
||||
func (c *rpcCodec) write(obj ...interface{}) (err error) {
|
||||
err = c.ready()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if c.f != nil {
|
||||
defer func() {
|
||||
flushErr := c.f.Flush()
|
||||
if flushErr != nil && err == nil {
|
||||
err = flushErr
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
for _, o := range obj {
|
||||
err = c.enc.Encode(o)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// defensive: ensure a space is always written after each encoding,
|
||||
// in case the value was a number, and encoding a value right after
|
||||
// without a space will lead to invalid output.
|
||||
if c.h.isJson() {
|
||||
_, err = c.w.Write(rpcSpaceArr[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcCodec) read(obj interface{}) (err error) {
|
||||
err = c.ready()
|
||||
if err == nil {
|
||||
// Setting ReadDeadline should not be necessary,
|
||||
// especially since it only works for net.Conn (not generic ioReadCloser).
|
||||
// if c.nc != nil {
|
||||
// c.nc.SetReadDeadline(time.Now().Add(1 * time.Second))
|
||||
// }
|
||||
|
||||
// Note: If nil is passed in, we should read and discard
|
||||
if obj == nil {
|
||||
// return c.dec.Decode(&obj)
|
||||
err = panicToErr(c.dec, func() { c.dec.swallow() })
|
||||
} else {
|
||||
err = c.dec.Decode(obj)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcCodec) Close() (err error) {
|
||||
if c.c != nil {
|
||||
cls := c.cls.Load()
|
||||
if !cls.closed {
|
||||
// writing to same pointer could lead to a data race (always make new one)
|
||||
cls = &clsErr{closed: true, err: c.c.Close()}
|
||||
c.cls.Store(cls)
|
||||
}
|
||||
err = cls.err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcCodec) ready() (err error) {
|
||||
if c.c == nil {
|
||||
err = errRpcNoConn
|
||||
} else {
|
||||
cls := c.cls.Load()
|
||||
if cls != nil && cls.closed {
|
||||
if err = cls.err; err == nil {
|
||||
err = errRpcIsClosed
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
|
||||
return c.read(body)
|
||||
}
|
||||
|
||||
// -------------------------------------
|
||||
|
||||
type goRpcCodec struct {
|
||||
*rpcCodec
|
||||
}
|
||||
|
||||
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
|
||||
return c.write(r, body)
|
||||
}
|
||||
|
||||
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
|
||||
return c.write(r, body)
|
||||
}
|
||||
|
||||
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
|
||||
return c.read(r)
|
||||
}
|
||||
|
||||
func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
|
||||
return c.read(r)
|
||||
}
|
||||
|
||||
func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
|
||||
return c.read(body)
|
||||
}
|
||||
|
||||
// -------------------------------------
|
||||
|
||||
// goRpc is the implementation of Rpc that uses the communication protocol
|
||||
// as defined in net/rpc package.
|
||||
type goRpc struct{}
|
||||
|
||||
// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
|
||||
//
|
||||
// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered.
|
||||
//
|
||||
// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle.
|
||||
// This ensures we use an adequate buffer during reading and writing.
|
||||
// If not configured, we will internally initialize and use a buffer during reads and writes.
|
||||
// This can be turned off via the RPCNoBuffer option on the Handle.
|
||||
//
|
||||
// var handle codec.JsonHandle
|
||||
// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer
|
||||
//
|
||||
// Example 1: one way of configuring buffering explicitly:
|
||||
//
|
||||
// var handle codec.JsonHandle // codec handle
|
||||
// handle.ReaderBufferSize = 1024
|
||||
// handle.WriterBufferSize = 1024
|
||||
// var conn io.ReadWriteCloser // connection got from a socket
|
||||
// var serverCodec = GoRpc.ServerCodec(conn, handle)
|
||||
// var clientCodec = GoRpc.ClientCodec(conn, handle)
|
||||
//
|
||||
// Example 2: you can also explicitly create a buffered connection yourself,
|
||||
// and not worry about configuring the buffer sizes in the Handle.
|
||||
//
|
||||
// var handle codec.Handle // codec handle
|
||||
// var conn io.ReadWriteCloser // connection got from a socket
|
||||
// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser
|
||||
// io.Closer
|
||||
// *bufio.Reader
|
||||
// *bufio.Writer
|
||||
// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)}
|
||||
// var serverCodec = GoRpc.ServerCodec(bufconn, handle)
|
||||
// var clientCodec = GoRpc.ClientCodec(bufconn, handle)
|
||||
var GoRpc goRpc
|
||||
|
||||
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
|
||||
return &goRpcCodec{newRPCCodec(conn, h)}
|
||||
}
|
||||
|
||||
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
|
||||
return &goRpcCodec{newRPCCodec(conn, h)}
|
||||
}
|
||||
97
vendor/github.com/ugorji/go/codec/simple.base.go
generated
vendored
Normal file
97
vendor/github.com/ugorji/go/codec/simple.base.go
generated
vendored
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
_ uint8 = iota
|
||||
simpleVdNil = 1
|
||||
simpleVdFalse = 2
|
||||
simpleVdTrue = 3
|
||||
simpleVdFloat32 = 4
|
||||
simpleVdFloat64 = 5
|
||||
|
||||
// each lasts for 4 (ie n, n+1, n+2, n+3)
|
||||
simpleVdPosInt = 8
|
||||
simpleVdNegInt = 12
|
||||
|
||||
simpleVdTime = 24
|
||||
|
||||
// containers: each lasts for 8 (ie n, n+1, n+2, ... n+7)
|
||||
simpleVdString = 216
|
||||
simpleVdByteArray = 224
|
||||
simpleVdArray = 232
|
||||
simpleVdMap = 240
|
||||
simpleVdExt = 248
|
||||
)
|
||||
|
||||
var simpledescNames = map[byte]string{
|
||||
simpleVdNil: "null",
|
||||
simpleVdFalse: "false",
|
||||
simpleVdTrue: "true",
|
||||
simpleVdFloat32: "float32",
|
||||
simpleVdFloat64: "float64",
|
||||
|
||||
simpleVdPosInt: "+int",
|
||||
simpleVdNegInt: "-int",
|
||||
|
||||
simpleVdTime: "time",
|
||||
|
||||
simpleVdString: "string",
|
||||
simpleVdByteArray: "binary",
|
||||
simpleVdArray: "array",
|
||||
simpleVdMap: "map",
|
||||
simpleVdExt: "ext",
|
||||
}
|
||||
|
||||
func simpledesc(bd byte) (s string) {
|
||||
s = simpledescNames[bd]
|
||||
if s == "" {
|
||||
s = "unknown"
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
// SimpleHandle is a Handle for a very simple encoding format.
|
||||
//
|
||||
// simple is a simplistic codec similar to binc, but not as compact.
|
||||
// - Encoding of a value is always preceded by the descriptor byte (bd)
|
||||
// - True, false, nil are encoded fully in 1 byte (the descriptor)
|
||||
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
|
||||
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
|
||||
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
|
||||
// - Length of containers (strings, bytes, array, map, extensions)
|
||||
// are encoded in 0, 1, 2, 4 or 8 bytes.
|
||||
// Zero-length containers have no length encoded.
|
||||
// For others, the number of bytes is given by pow(2, bd%3)
|
||||
// - maps are encoded as [bd] [length] [[key][value]]...
|
||||
// - arrays are encoded as [bd] [length] [value]...
|
||||
// - extensions are encoded as [bd] [length] [tag] [byte]...
|
||||
// - strings/bytearrays are encoded as [bd] [length] [byte]...
|
||||
// - time.Time are encoded as [bd] [length] [byte]...
|
||||
//
|
||||
// The full spec will be published soon.
|
||||
type SimpleHandle struct {
|
||||
binaryEncodingType
|
||||
notJsonType
|
||||
BasicHandle
|
||||
|
||||
// EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil
|
||||
EncZeroValuesAsNil bool
|
||||
}
|
||||
|
||||
// Name returns the name of the handle: simple
|
||||
func (h *SimpleHandle) Name() string { return "simple" }
|
||||
|
||||
func (h *SimpleHandle) desc(bd byte) string { return simpledesc(bd) }
|
||||
|
||||
// SetBytesExt sets an extension
|
||||
func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) {
|
||||
return h.SetExt(rt, tag, makeExt(ext))
|
||||
}
|
||||
12482
vendor/github.com/ugorji/go/codec/simple.fastpath.mono.generated.go
generated
vendored
Normal file
12482
vendor/github.com/ugorji/go/codec/simple.fastpath.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
749
vendor/github.com/ugorji/go/codec/simple.go
generated
vendored
Normal file
749
vendor/github.com/ugorji/go/codec/simple.go
generated
vendored
Normal file
@@ -0,0 +1,749 @@
|
||||
//go:build notmono || codec.notmono
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type simpleEncDriver[T encWriter] struct {
|
||||
noBuiltInTypes
|
||||
encDriverNoopContainerWriter
|
||||
encDriverNoState
|
||||
encDriverContainerNoTrackerT
|
||||
encInit2er
|
||||
|
||||
h *SimpleHandle
|
||||
e *encoderBase
|
||||
// b [8]byte
|
||||
w T
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeNil() {
|
||||
e.w.writen1(simpleVdNil)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeBool(b bool) {
|
||||
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && !b {
|
||||
e.EncodeNil()
|
||||
return
|
||||
}
|
||||
if b {
|
||||
e.w.writen1(simpleVdTrue)
|
||||
} else {
|
||||
e.w.writen1(simpleVdFalse)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeFloat32(f float32) {
|
||||
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
|
||||
e.EncodeNil()
|
||||
return
|
||||
}
|
||||
e.w.writen1(simpleVdFloat32)
|
||||
e.w.writen4(bigen.PutUint32(math.Float32bits(f)))
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeFloat64(f float64) {
|
||||
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && f == 0.0 {
|
||||
e.EncodeNil()
|
||||
return
|
||||
}
|
||||
e.w.writen1(simpleVdFloat64)
|
||||
e.w.writen8(bigen.PutUint64(math.Float64bits(f)))
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeInt(v int64) {
|
||||
if v < 0 {
|
||||
e.encUint(uint64(-v), simpleVdNegInt)
|
||||
} else {
|
||||
e.encUint(uint64(v), simpleVdPosInt)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeUint(v uint64) {
|
||||
e.encUint(v, simpleVdPosInt)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) encUint(v uint64, bd uint8) {
|
||||
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == 0 {
|
||||
e.EncodeNil()
|
||||
return
|
||||
}
|
||||
if v <= math.MaxUint8 {
|
||||
e.w.writen2(bd, uint8(v))
|
||||
} else if v <= math.MaxUint16 {
|
||||
e.w.writen1(bd + 1)
|
||||
e.w.writen2(bigen.PutUint16(uint16(v)))
|
||||
} else if v <= math.MaxUint32 {
|
||||
e.w.writen1(bd + 2)
|
||||
e.w.writen4(bigen.PutUint32(uint32(v)))
|
||||
} else { // if v <= math.MaxUint64 {
|
||||
e.w.writen1(bd + 3)
|
||||
e.w.writen8(bigen.PutUint64(v))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) encLen(bd byte, length int) {
|
||||
if length == 0 {
|
||||
e.w.writen1(bd)
|
||||
} else if length <= math.MaxUint8 {
|
||||
e.w.writen1(bd + 1)
|
||||
e.w.writen1(uint8(length))
|
||||
} else if length <= math.MaxUint16 {
|
||||
e.w.writen1(bd + 2)
|
||||
e.w.writen2(bigen.PutUint16(uint16(length)))
|
||||
} else if int64(length) <= math.MaxUint32 {
|
||||
e.w.writen1(bd + 3)
|
||||
e.w.writen4(bigen.PutUint32(uint32(length)))
|
||||
} else {
|
||||
e.w.writen1(bd + 4)
|
||||
e.w.writen8(bigen.PutUint64(uint64(length)))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeExt(v interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
|
||||
var bs0, bs []byte
|
||||
if ext == SelfExt {
|
||||
bs0 = e.e.blist.get(1024)
|
||||
bs = bs0
|
||||
sideEncode(e.h, &e.h.sideEncPool, func(se encoderI) { oneOffEncode(se, v, &bs, basetype, false) })
|
||||
} else {
|
||||
bs = ext.WriteExt(v)
|
||||
}
|
||||
if bs == nil {
|
||||
e.writeNilBytes()
|
||||
goto END
|
||||
}
|
||||
e.encodeExtPreamble(uint8(xtag), len(bs))
|
||||
e.w.writeb(bs)
|
||||
END:
|
||||
if ext == SelfExt {
|
||||
e.e.blist.put(bs)
|
||||
if !byteSliceSameData(bs0, bs) {
|
||||
e.e.blist.put(bs0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeRawExt(re *RawExt) {
|
||||
e.encodeExtPreamble(uint8(re.Tag), len(re.Data))
|
||||
e.w.writeb(re.Data)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) encodeExtPreamble(xtag byte, length int) {
|
||||
e.encLen(simpleVdExt, length)
|
||||
e.w.writen1(xtag)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) WriteArrayStart(length int) {
|
||||
e.encLen(simpleVdArray, length)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) WriteMapStart(length int) {
|
||||
e.encLen(simpleVdMap, length)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) WriteArrayEmpty() {
|
||||
// e.WriteArrayStart(0) = e.encLen(simpleVdArray, 0)
|
||||
e.w.writen1(simpleVdArray)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) WriteMapEmpty() {
|
||||
// e.WriteMapStart(0) = e.encLen(simpleVdMap, 0)
|
||||
e.w.writen1(simpleVdMap)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeString(v string) {
|
||||
if e.h.EncZeroValuesAsNil && e.e.c != containerMapKey && v == "" {
|
||||
e.EncodeNil()
|
||||
return
|
||||
}
|
||||
if e.h.StringToRaw {
|
||||
e.encLen(simpleVdByteArray, len(v))
|
||||
} else {
|
||||
e.encLen(simpleVdString, len(v))
|
||||
}
|
||||
e.w.writestr(v)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeStringNoEscape4Json(v string) { e.EncodeString(v) }
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeStringBytesRaw(v []byte) {
|
||||
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil {
|
||||
e.encLen(simpleVdByteArray, len(v))
|
||||
e.w.writeb(v)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeBytes(v []byte) {
|
||||
if v == nil {
|
||||
e.writeNilBytes()
|
||||
return
|
||||
}
|
||||
e.EncodeStringBytesRaw(v)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) encodeNilBytes() {
|
||||
b := byte(simpleVdNil)
|
||||
if e.h.NilCollectionToZeroLength {
|
||||
b = simpleVdArray
|
||||
}
|
||||
e.w.writen1(b)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) writeNilOr(v byte) {
|
||||
if !e.h.NilCollectionToZeroLength {
|
||||
v = simpleVdNil
|
||||
}
|
||||
e.w.writen1(v)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) writeNilArray() {
|
||||
e.writeNilOr(simpleVdArray)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) writeNilMap() {
|
||||
e.writeNilOr(simpleVdMap)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) writeNilBytes() {
|
||||
e.writeNilOr(simpleVdByteArray)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) EncodeTime(t time.Time) {
|
||||
// if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() {
|
||||
if t.IsZero() {
|
||||
e.EncodeNil()
|
||||
return
|
||||
}
|
||||
v, err := t.MarshalBinary()
|
||||
halt.onerror(err)
|
||||
e.w.writen2(simpleVdTime, uint8(len(v)))
|
||||
e.w.writeb(v)
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
type simpleDecDriver[T decReader] struct {
|
||||
h *SimpleHandle
|
||||
d *decoderBase
|
||||
r T
|
||||
|
||||
bdAndBdread
|
||||
// bytes bool
|
||||
|
||||
noBuiltInTypes
|
||||
// decDriverNoopNumberHelper
|
||||
decDriverNoopContainerReader
|
||||
decInit2er
|
||||
|
||||
// ds interface{} // must be *decoder[simpleDecDriverM[bytes...]]
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) readNextBd() {
|
||||
d.bd = d.r.readn1()
|
||||
d.bdRead = true
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) advanceNil() (null bool) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
if d.bd == simpleVdNil {
|
||||
d.bdRead = false
|
||||
return true // null = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) ContainerType() (vt valueType) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
switch d.bd {
|
||||
case simpleVdNil:
|
||||
d.bdRead = false
|
||||
return valueTypeNil
|
||||
case simpleVdByteArray, simpleVdByteArray + 1,
|
||||
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
|
||||
return valueTypeBytes
|
||||
case simpleVdString, simpleVdString + 1,
|
||||
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
|
||||
return valueTypeString
|
||||
case simpleVdArray, simpleVdArray + 1,
|
||||
simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
|
||||
return valueTypeArray
|
||||
case simpleVdMap, simpleVdMap + 1,
|
||||
simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
|
||||
return valueTypeMap
|
||||
}
|
||||
return valueTypeUnset
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) TryNil() bool {
|
||||
return d.advanceNil()
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) decFloat() (f float64, ok bool) {
|
||||
ok = true
|
||||
switch d.bd {
|
||||
case simpleVdFloat32:
|
||||
f = float64(math.Float32frombits(bigen.Uint32(d.r.readn4())))
|
||||
case simpleVdFloat64:
|
||||
f = math.Float64frombits(bigen.Uint64(d.r.readn8()))
|
||||
default:
|
||||
ok = false
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) decInteger() (ui uint64, neg, ok bool) {
|
||||
ok = true
|
||||
switch d.bd {
|
||||
case simpleVdPosInt:
|
||||
ui = uint64(d.r.readn1())
|
||||
case simpleVdPosInt + 1:
|
||||
ui = uint64(bigen.Uint16(d.r.readn2()))
|
||||
case simpleVdPosInt + 2:
|
||||
ui = uint64(bigen.Uint32(d.r.readn4()))
|
||||
case simpleVdPosInt + 3:
|
||||
ui = uint64(bigen.Uint64(d.r.readn8()))
|
||||
case simpleVdNegInt:
|
||||
ui = uint64(d.r.readn1())
|
||||
neg = true
|
||||
case simpleVdNegInt + 1:
|
||||
ui = uint64(bigen.Uint16(d.r.readn2()))
|
||||
neg = true
|
||||
case simpleVdNegInt + 2:
|
||||
ui = uint64(bigen.Uint32(d.r.readn4()))
|
||||
neg = true
|
||||
case simpleVdNegInt + 3:
|
||||
ui = uint64(bigen.Uint64(d.r.readn8()))
|
||||
neg = true
|
||||
default:
|
||||
ok = false
|
||||
// halt.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
|
||||
}
|
||||
// DO NOT do this check below, because callers may only want the unsigned value:
|
||||
//
|
||||
// if ui > math.MaxInt64 {
|
||||
// halt.errorf("decIntAny: Integer out of range for signed int64: %v", ui)
|
||||
// return
|
||||
// }
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeInt64() (i int64) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
v1, v2, v3 := d.decInteger()
|
||||
i = decNegintPosintFloatNumberHelper{d}.int64(v1, v2, v3, false)
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeUint64() (ui uint64) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
ui = decNegintPosintFloatNumberHelper{d}.uint64(d.decInteger())
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeFloat64() (f float64) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
v1, v2 := d.decFloat()
|
||||
f = decNegintPosintFloatNumberHelper{d}.float64(v1, v2, false)
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
// bool can be decoded from bool only (single byte).
|
||||
func (d *simpleDecDriver[T]) DecodeBool() (b bool) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.bd == simpleVdFalse {
|
||||
} else if d.bd == simpleVdTrue {
|
||||
b = true
|
||||
} else {
|
||||
halt.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd)
|
||||
}
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) ReadMapStart() (length int) {
|
||||
if d.advanceNil() {
|
||||
return containerLenNil
|
||||
}
|
||||
d.bdRead = false
|
||||
return d.decLen()
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) ReadArrayStart() (length int) {
|
||||
if d.advanceNil() {
|
||||
return containerLenNil
|
||||
}
|
||||
d.bdRead = false
|
||||
return d.decLen()
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) uint2Len(ui uint64) int {
|
||||
if chkOvf.Uint(ui, intBitsize) {
|
||||
halt.errorf("overflow integer: %v", ui)
|
||||
}
|
||||
return int(ui)
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) decLen() int {
|
||||
switch d.bd & 7 { // d.bd % 8 {
|
||||
case 0:
|
||||
return 0
|
||||
case 1:
|
||||
return int(d.r.readn1())
|
||||
case 2:
|
||||
return int(bigen.Uint16(d.r.readn2()))
|
||||
case 3:
|
||||
return d.uint2Len(uint64(bigen.Uint32(d.r.readn4())))
|
||||
case 4:
|
||||
return d.uint2Len(bigen.Uint64(d.r.readn8()))
|
||||
}
|
||||
halt.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||
return -1
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeStringAsBytes() ([]byte, dBytesAttachState) {
|
||||
return d.DecodeBytes()
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeBytes() (bs []byte, state dBytesAttachState) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
var cond bool
|
||||
// check if an "array" of uint8's (see ContainerType for how to infer if an array)
|
||||
if d.bd >= simpleVdArray && d.bd <= simpleVdArray+4 {
|
||||
slen := d.ReadArrayStart()
|
||||
bs, cond = usableByteSlice(d.d.buf, slen)
|
||||
for i := 0; i < len(bs); i++ {
|
||||
bs[i] = uint8(chkOvf.UintV(d.DecodeUint64(), 8))
|
||||
}
|
||||
for i := len(bs); i < slen; i++ {
|
||||
bs = append(bs, uint8(chkOvf.UintV(d.DecodeUint64(), 8)))
|
||||
}
|
||||
if cond {
|
||||
d.d.buf = bs
|
||||
}
|
||||
state = dBytesAttachBuffer
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
clen := d.decLen()
|
||||
d.bdRead = false
|
||||
bs, cond = d.r.readxb(uint(clen))
|
||||
state = d.d.attachState(cond)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeTime() (t time.Time) {
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
if d.bd != simpleVdTime {
|
||||
halt.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd)
|
||||
}
|
||||
d.bdRead = false
|
||||
clen := uint(d.r.readn1())
|
||||
b := d.r.readx(clen)
|
||||
halt.onerror((&t).UnmarshalBinary(b))
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeExt(rv interface{}, basetype reflect.Type, xtag uint64, ext Ext) {
|
||||
xbs, _, _, ok := d.decodeExtV(ext != nil, xtag)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if ext == SelfExt {
|
||||
sideDecode(d.h, &d.h.sideDecPool, func(sd decoderI) { oneOffDecode(sd, rv, xbs, basetype, false) })
|
||||
} else {
|
||||
ext.ReadExt(rv, xbs)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeRawExt(re *RawExt) {
|
||||
xbs, realxtag, state, ok := d.decodeExtV(false, 0)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
re.Tag = uint64(realxtag)
|
||||
re.setData(xbs, state >= dBytesAttachViewZerocopy)
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) decodeExtV(verifyTag bool, xtagIn uint64) (xbs []byte, xtag byte, bstate dBytesAttachState, ok bool) {
|
||||
if xtagIn > 0xff {
|
||||
halt.errorf("ext: tag must be <= 0xff; got: %v", xtagIn)
|
||||
}
|
||||
if d.advanceNil() {
|
||||
return
|
||||
}
|
||||
tag := uint8(xtagIn)
|
||||
switch d.bd {
|
||||
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
|
||||
l := d.decLen()
|
||||
xtag = d.r.readn1()
|
||||
if verifyTag && xtag != tag {
|
||||
halt.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag)
|
||||
}
|
||||
xbs, ok = d.r.readxb(uint(l))
|
||||
bstate = d.d.attachState(ok)
|
||||
case simpleVdByteArray, simpleVdByteArray + 1,
|
||||
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
|
||||
xbs, bstate = d.DecodeBytes()
|
||||
default:
|
||||
halt.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd)
|
||||
}
|
||||
d.bdRead = false
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeNaked() {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
|
||||
n := d.d.naked()
|
||||
var decodeFurther bool
|
||||
|
||||
switch d.bd {
|
||||
case simpleVdNil:
|
||||
n.v = valueTypeNil
|
||||
case simpleVdFalse:
|
||||
n.v = valueTypeBool
|
||||
n.b = false
|
||||
case simpleVdTrue:
|
||||
n.v = valueTypeBool
|
||||
n.b = true
|
||||
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
|
||||
if d.h.SignedInteger {
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt64()
|
||||
} else {
|
||||
n.v = valueTypeUint
|
||||
n.u = d.DecodeUint64()
|
||||
}
|
||||
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
|
||||
n.v = valueTypeInt
|
||||
n.i = d.DecodeInt64()
|
||||
case simpleVdFloat32:
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat64()
|
||||
case simpleVdFloat64:
|
||||
n.v = valueTypeFloat
|
||||
n.f = d.DecodeFloat64()
|
||||
case simpleVdTime:
|
||||
n.v = valueTypeTime
|
||||
n.t = d.DecodeTime()
|
||||
case simpleVdString, simpleVdString + 1,
|
||||
simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
|
||||
n.v = valueTypeString
|
||||
n.s = d.d.detach2Str(d.DecodeStringAsBytes())
|
||||
case simpleVdByteArray, simpleVdByteArray + 1,
|
||||
simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
|
||||
d.d.fauxUnionReadRawBytes(d, false, d.h.RawToString) //, d.h.ZeroCopy)
|
||||
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
|
||||
n.v = valueTypeExt
|
||||
l := d.decLen()
|
||||
n.u = uint64(d.r.readn1())
|
||||
n.l = d.r.readx(uint(l))
|
||||
// MARKER: not necessary to detach for extensions
|
||||
// var useBuf bool
|
||||
// n.l, useBuf = d.r.readxb(uint(l))
|
||||
// n.a = d.d.attachState(useBuf)
|
||||
// n.l = d.d.detach2Bytes(n.l, nil, n.a)
|
||||
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2,
|
||||
simpleVdArray + 3, simpleVdArray + 4:
|
||||
n.v = valueTypeArray
|
||||
decodeFurther = true
|
||||
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
|
||||
n.v = valueTypeMap
|
||||
decodeFurther = true
|
||||
default:
|
||||
halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd)
|
||||
}
|
||||
|
||||
if !decodeFurther {
|
||||
d.bdRead = false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) nextValueBytes() (v []byte) {
|
||||
if !d.bdRead {
|
||||
d.readNextBd()
|
||||
}
|
||||
d.r.startRecording()
|
||||
d.nextValueBytesBdReadR()
|
||||
v = d.r.stopRecording()
|
||||
d.bdRead = false
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) nextValueBytesBdReadR() {
|
||||
c := d.bd
|
||||
|
||||
var length uint
|
||||
|
||||
switch c {
|
||||
case simpleVdNil, simpleVdFalse, simpleVdTrue, simpleVdString, simpleVdByteArray:
|
||||
// pass
|
||||
case simpleVdPosInt, simpleVdNegInt:
|
||||
d.r.readn1()
|
||||
case simpleVdPosInt + 1, simpleVdNegInt + 1:
|
||||
d.r.skip(2)
|
||||
case simpleVdPosInt + 2, simpleVdNegInt + 2, simpleVdFloat32:
|
||||
d.r.skip(4)
|
||||
case simpleVdPosInt + 3, simpleVdNegInt + 3, simpleVdFloat64:
|
||||
d.r.skip(8)
|
||||
case simpleVdTime:
|
||||
c = d.r.readn1()
|
||||
d.r.skip(uint(c))
|
||||
|
||||
default:
|
||||
switch c & 7 { // c % 8 {
|
||||
case 0:
|
||||
length = 0
|
||||
case 1:
|
||||
b := d.r.readn1()
|
||||
length = uint(b)
|
||||
case 2:
|
||||
x := d.r.readn2()
|
||||
length = uint(bigen.Uint16(x))
|
||||
case 3:
|
||||
x := d.r.readn4()
|
||||
length = uint(bigen.Uint32(x))
|
||||
case 4:
|
||||
x := d.r.readn8()
|
||||
length = uint(bigen.Uint64(x))
|
||||
}
|
||||
|
||||
bExt := c >= simpleVdExt && c <= simpleVdExt+7
|
||||
bStr := c >= simpleVdString && c <= simpleVdString+7
|
||||
bByteArray := c >= simpleVdByteArray && c <= simpleVdByteArray+7
|
||||
bArray := c >= simpleVdArray && c <= simpleVdArray+7
|
||||
bMap := c >= simpleVdMap && c <= simpleVdMap+7
|
||||
|
||||
if !(bExt || bStr || bByteArray || bArray || bMap) {
|
||||
halt.errorf("cannot infer value - %s 0x%x", msgBadDesc, c)
|
||||
}
|
||||
|
||||
if bExt {
|
||||
d.r.readn1() // tag
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if bArray {
|
||||
for i := uint(0); i < length; i++ {
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
}
|
||||
} else if bMap {
|
||||
for i := uint(0); i < length; i++ {
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
d.readNextBd()
|
||||
d.nextValueBytesBdReadR()
|
||||
}
|
||||
} else {
|
||||
d.r.skip(length)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ----
|
||||
//
|
||||
// The following below are similar across all format files (except for the format name).
|
||||
//
|
||||
// We keep them together here, so that we can easily copy and compare.
|
||||
|
||||
// ----
|
||||
|
||||
func (d *simpleEncDriver[T]) init(hh Handle, shared *encoderBase, enc encoderI) (fp interface{}) {
|
||||
callMake(&d.w)
|
||||
d.h = hh.(*SimpleHandle)
|
||||
d.e = shared
|
||||
if shared.bytes {
|
||||
fp = simpleFpEncBytes
|
||||
} else {
|
||||
fp = simpleFpEncIO
|
||||
}
|
||||
// d.w.init()
|
||||
d.init2(enc)
|
||||
return
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) writeBytesAsis(b []byte) { e.w.writeb(b) }
|
||||
|
||||
func (e *simpleEncDriver[T]) writerEnd() { e.w.end() }
|
||||
|
||||
func (e *simpleEncDriver[T]) resetOutBytes(out *[]byte) {
|
||||
e.w.resetBytes(*out, out)
|
||||
}
|
||||
|
||||
func (e *simpleEncDriver[T]) resetOutIO(out io.Writer) {
|
||||
e.w.resetIO(out, e.h.WriterBufferSize, &e.e.blist)
|
||||
}
|
||||
|
||||
// ----
|
||||
|
||||
func (d *simpleDecDriver[T]) init(hh Handle, shared *decoderBase, dec decoderI) (fp interface{}) {
|
||||
callMake(&d.r)
|
||||
d.h = hh.(*SimpleHandle)
|
||||
d.d = shared
|
||||
if shared.bytes {
|
||||
fp = simpleFpDecBytes
|
||||
} else {
|
||||
fp = simpleFpDecIO
|
||||
}
|
||||
// d.r.init()
|
||||
d.init2(dec)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) NumBytesRead() int {
|
||||
return int(d.r.numread())
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) resetInBytes(in []byte) {
|
||||
d.r.resetBytes(in)
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) resetInIO(r io.Reader) {
|
||||
d.r.resetIO(r, d.h.ReaderBufferSize, d.h.MaxInitLen, &d.d.blist)
|
||||
}
|
||||
|
||||
// ---- (custom stanza)
|
||||
|
||||
func (d *simpleDecDriver[T]) descBd() string {
|
||||
return sprintf("%v (%s)", d.bd, simpledesc(d.bd))
|
||||
}
|
||||
|
||||
func (d *simpleDecDriver[T]) DecodeFloat32() (f float32) {
|
||||
return float32(chkOvf.Float32V(d.DecodeFloat64()))
|
||||
}
|
||||
7549
vendor/github.com/ugorji/go/codec/simple.mono.generated.go
generated
vendored
Normal file
7549
vendor/github.com/ugorji/go/codec/simple.mono.generated.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
52
vendor/github.com/ugorji/go/codec/simple.notfastpath.mono.generated.go
generated
vendored
Normal file
52
vendor/github.com/ugorji/go/codec/simple.notfastpath.mono.generated.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
//go:build !notmono && !codec.notmono && (notfastpath || codec.notfastpath)
|
||||
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type fastpathESimpleBytes struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderSimpleBytes, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDSimpleBytes struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderSimpleBytes, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsSimpleBytes [0]fastpathESimpleBytes
|
||||
type fastpathDsSimpleBytes [0]fastpathDSimpleBytes
|
||||
|
||||
func (helperEncDriverSimpleBytes) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleBytes) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverSimpleBytes) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleBytes) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverSimpleBytes) fastpathEList() (v *fastpathEsSimpleBytes) { return }
|
||||
func (helperDecDriverSimpleBytes) fastpathDList() (v *fastpathDsSimpleBytes) { return }
|
||||
|
||||
type fastpathESimpleIO struct {
|
||||
rt reflect.Type
|
||||
encfn func(*encoderSimpleIO, *encFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathDSimpleIO struct {
|
||||
rt reflect.Type
|
||||
decfn func(*decoderSimpleIO, *decFnInfo, reflect.Value)
|
||||
}
|
||||
type fastpathEsSimpleIO [0]fastpathESimpleIO
|
||||
type fastpathDsSimpleIO [0]fastpathDSimpleIO
|
||||
|
||||
func (helperEncDriverSimpleIO) fastpathEncodeTypeSwitch(iv interface{}, e *encoderSimpleIO) bool {
|
||||
return false
|
||||
}
|
||||
func (helperDecDriverSimpleIO) fastpathDecodeTypeSwitch(iv interface{}, d *decoderSimpleIO) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (helperEncDriverSimpleIO) fastpathEList() (v *fastpathEsSimpleIO) { return }
|
||||
func (helperDecDriverSimpleIO) fastpathDList() (v *fastpathDsSimpleIO) { return }
|
||||
639
vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
generated
vendored
Normal file
639
vendor/github.com/ugorji/go/codec/test-cbor-goldens.json
generated
vendored
Normal file
@@ -0,0 +1,639 @@
|
||||
[
|
||||
{
|
||||
"cbor": "AA==",
|
||||
"hex": "00",
|
||||
"roundtrip": true,
|
||||
"decoded": 0
|
||||
},
|
||||
{
|
||||
"cbor": "AQ==",
|
||||
"hex": "01",
|
||||
"roundtrip": true,
|
||||
"decoded": 1
|
||||
},
|
||||
{
|
||||
"cbor": "Cg==",
|
||||
"hex": "0a",
|
||||
"roundtrip": true,
|
||||
"decoded": 10
|
||||
},
|
||||
{
|
||||
"cbor": "Fw==",
|
||||
"hex": "17",
|
||||
"roundtrip": true,
|
||||
"decoded": 23
|
||||
},
|
||||
{
|
||||
"cbor": "GBg=",
|
||||
"hex": "1818",
|
||||
"roundtrip": true,
|
||||
"decoded": 24
|
||||
},
|
||||
{
|
||||
"cbor": "GBk=",
|
||||
"hex": "1819",
|
||||
"roundtrip": true,
|
||||
"decoded": 25
|
||||
},
|
||||
{
|
||||
"cbor": "GGQ=",
|
||||
"hex": "1864",
|
||||
"roundtrip": true,
|
||||
"decoded": 100
|
||||
},
|
||||
{
|
||||
"cbor": "GQPo",
|
||||
"hex": "1903e8",
|
||||
"roundtrip": true,
|
||||
"decoded": 1000
|
||||
},
|
||||
{
|
||||
"cbor": "GgAPQkA=",
|
||||
"hex": "1a000f4240",
|
||||
"roundtrip": true,
|
||||
"decoded": 1000000
|
||||
},
|
||||
{
|
||||
"cbor": "GwAAAOjUpRAA",
|
||||
"hex": "1b000000e8d4a51000",
|
||||
"roundtrip": true,
|
||||
"decoded": 1000000000000
|
||||
},
|
||||
{
|
||||
"cbor": "G///////////",
|
||||
"hex": "1bffffffffffffffff",
|
||||
"roundtrip": true,
|
||||
"decoded": 18446744073709551615
|
||||
},
|
||||
{
|
||||
"cbor": "wkkBAAAAAAAAAAA=",
|
||||
"hex": "c249010000000000000000",
|
||||
"roundtrip": true,
|
||||
"decoded": 18446744073709551616
|
||||
},
|
||||
{
|
||||
"cbor": "O///////////",
|
||||
"hex": "3bffffffffffffffff",
|
||||
"roundtrip": true,
|
||||
"decoded": -18446744073709551616,
|
||||
"skip": true
|
||||
},
|
||||
{
|
||||
"cbor": "w0kBAAAAAAAAAAA=",
|
||||
"hex": "c349010000000000000000",
|
||||
"roundtrip": true,
|
||||
"decoded": -18446744073709551617
|
||||
},
|
||||
{
|
||||
"cbor": "IA==",
|
||||
"hex": "20",
|
||||
"roundtrip": true,
|
||||
"decoded": -1
|
||||
},
|
||||
{
|
||||
"cbor": "KQ==",
|
||||
"hex": "29",
|
||||
"roundtrip": true,
|
||||
"decoded": -10
|
||||
},
|
||||
{
|
||||
"cbor": "OGM=",
|
||||
"hex": "3863",
|
||||
"roundtrip": true,
|
||||
"decoded": -100
|
||||
},
|
||||
{
|
||||
"cbor": "OQPn",
|
||||
"hex": "3903e7",
|
||||
"roundtrip": true,
|
||||
"decoded": -1000
|
||||
},
|
||||
{
|
||||
"cbor": "+QAA",
|
||||
"hex": "f90000",
|
||||
"roundtrip": true,
|
||||
"decoded": 0.0
|
||||
},
|
||||
{
|
||||
"cbor": "+YAA",
|
||||
"hex": "f98000",
|
||||
"roundtrip": true,
|
||||
"decoded": -0.0
|
||||
},
|
||||
{
|
||||
"cbor": "+TwA",
|
||||
"hex": "f93c00",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.0
|
||||
},
|
||||
{
|
||||
"cbor": "+z/xmZmZmZma",
|
||||
"hex": "fb3ff199999999999a",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.1
|
||||
},
|
||||
{
|
||||
"cbor": "+T4A",
|
||||
"hex": "f93e00",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.5
|
||||
},
|
||||
{
|
||||
"cbor": "+Xv/",
|
||||
"hex": "f97bff",
|
||||
"roundtrip": true,
|
||||
"decoded": 65504.0
|
||||
},
|
||||
{
|
||||
"cbor": "+kfDUAA=",
|
||||
"hex": "fa47c35000",
|
||||
"roundtrip": true,
|
||||
"decoded": 100000.0
|
||||
},
|
||||
{
|
||||
"cbor": "+n9///8=",
|
||||
"hex": "fa7f7fffff",
|
||||
"roundtrip": true,
|
||||
"decoded": 3.4028234663852886e+38
|
||||
},
|
||||
{
|
||||
"cbor": "+3435DyIAHWc",
|
||||
"hex": "fb7e37e43c8800759c",
|
||||
"roundtrip": true,
|
||||
"decoded": 1.0e+300
|
||||
},
|
||||
{
|
||||
"cbor": "+QAB",
|
||||
"hex": "f90001",
|
||||
"roundtrip": true,
|
||||
"decoded": 5.960464477539063e-08
|
||||
},
|
||||
{
|
||||
"cbor": "+QQA",
|
||||
"hex": "f90400",
|
||||
"roundtrip": true,
|
||||
"decoded": 6.103515625e-05
|
||||
},
|
||||
{
|
||||
"cbor": "+cQA",
|
||||
"hex": "f9c400",
|
||||
"roundtrip": true,
|
||||
"decoded": -4.0
|
||||
},
|
||||
{
|
||||
"cbor": "+8AQZmZmZmZm",
|
||||
"hex": "fbc010666666666666",
|
||||
"roundtrip": true,
|
||||
"decoded": -4.1
|
||||
},
|
||||
{
|
||||
"cbor": "+XwA",
|
||||
"hex": "f97c00",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+X4A",
|
||||
"hex": "f97e00",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "NaN"
|
||||
},
|
||||
{
|
||||
"cbor": "+fwA",
|
||||
"hex": "f9fc00",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "-Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+n+AAAA=",
|
||||
"hex": "fa7f800000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+n/AAAA=",
|
||||
"hex": "fa7fc00000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "NaN"
|
||||
},
|
||||
{
|
||||
"cbor": "+v+AAAA=",
|
||||
"hex": "faff800000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "-Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+3/wAAAAAAAA",
|
||||
"hex": "fb7ff0000000000000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "+3/4AAAAAAAA",
|
||||
"hex": "fb7ff8000000000000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "NaN"
|
||||
},
|
||||
{
|
||||
"cbor": "+//wAAAAAAAA",
|
||||
"hex": "fbfff0000000000000",
|
||||
"roundtrip": false,
|
||||
"diagnostic": "-Infinity"
|
||||
},
|
||||
{
|
||||
"cbor": "9A==",
|
||||
"hex": "f4",
|
||||
"roundtrip": true,
|
||||
"decoded": false
|
||||
},
|
||||
{
|
||||
"cbor": "9Q==",
|
||||
"hex": "f5",
|
||||
"roundtrip": true,
|
||||
"decoded": true
|
||||
},
|
||||
{
|
||||
"cbor": "9g==",
|
||||
"hex": "f6",
|
||||
"roundtrip": true,
|
||||
"decoded": null
|
||||
},
|
||||
{
|
||||
"cbor": "9w==",
|
||||
"hex": "f7",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "undefined"
|
||||
},
|
||||
{
|
||||
"cbor": "8A==",
|
||||
"hex": "f0",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "simple(16)"
|
||||
},
|
||||
{
|
||||
"cbor": "+Bg=",
|
||||
"hex": "f818",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "simple(24)"
|
||||
},
|
||||
{
|
||||
"cbor": "+P8=",
|
||||
"hex": "f8ff",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "simple(255)"
|
||||
},
|
||||
{
|
||||
"cbor": "wHQyMDEzLTAzLTIxVDIwOjA0OjAwWg==",
|
||||
"hex": "c074323031332d30332d32315432303a30343a30305a",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "0(\"2013-03-21T20:04:00Z\")"
|
||||
},
|
||||
{
|
||||
"cbor": "wRpRS2ew",
|
||||
"hex": "c11a514b67b0",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "1(1363896240)"
|
||||
},
|
||||
{
|
||||
"cbor": "wftB1FLZ7CAAAA==",
|
||||
"hex": "c1fb41d452d9ec200000",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "1(1363896240.5)"
|
||||
},
|
||||
{
|
||||
"cbor": "10QBAgME",
|
||||
"hex": "d74401020304",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "23(h'01020304')"
|
||||
},
|
||||
{
|
||||
"cbor": "2BhFZElFVEY=",
|
||||
"hex": "d818456449455446",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "24(h'6449455446')"
|
||||
},
|
||||
{
|
||||
"cbor": "2CB2aHR0cDovL3d3dy5leGFtcGxlLmNvbQ==",
|
||||
"hex": "d82076687474703a2f2f7777772e6578616d706c652e636f6d",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "32(\"http://www.example.com\")"
|
||||
},
|
||||
{
|
||||
"cbor": "QA==",
|
||||
"hex": "40",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "h''"
|
||||
},
|
||||
{
|
||||
"cbor": "RAECAwQ=",
|
||||
"hex": "4401020304",
|
||||
"roundtrip": true,
|
||||
"diagnostic": "h'01020304'"
|
||||
},
|
||||
{
|
||||
"cbor": "YA==",
|
||||
"hex": "60",
|
||||
"roundtrip": true,
|
||||
"decoded": ""
|
||||
},
|
||||
{
|
||||
"cbor": "YWE=",
|
||||
"hex": "6161",
|
||||
"roundtrip": true,
|
||||
"decoded": "a"
|
||||
},
|
||||
{
|
||||
"cbor": "ZElFVEY=",
|
||||
"hex": "6449455446",
|
||||
"roundtrip": true,
|
||||
"decoded": "IETF"
|
||||
},
|
||||
{
|
||||
"cbor": "YiJc",
|
||||
"hex": "62225c",
|
||||
"roundtrip": true,
|
||||
"decoded": "\"\\"
|
||||
},
|
||||
{
|
||||
"cbor": "YsO8",
|
||||
"hex": "62c3bc",
|
||||
"roundtrip": true,
|
||||
"decoded": "ü"
|
||||
},
|
||||
{
|
||||
"cbor": "Y+awtA==",
|
||||
"hex": "63e6b0b4",
|
||||
"roundtrip": true,
|
||||
"decoded": "水"
|
||||
},
|
||||
{
|
||||
"cbor": "ZPCQhZE=",
|
||||
"hex": "64f0908591",
|
||||
"roundtrip": true,
|
||||
"decoded": "𐅑"
|
||||
},
|
||||
{
|
||||
"cbor": "gA==",
|
||||
"hex": "80",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwECAw==",
|
||||
"hex": "83010203",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
1,
|
||||
2,
|
||||
3
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwGCAgOCBAU=",
|
||||
"hex": "8301820203820405",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "mBkBAgMEBQYHCAkKCwwNDg8QERITFBUWFxgYGBk=",
|
||||
"hex": "98190102030405060708090a0b0c0d0e0f101112131415161718181819",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "oA==",
|
||||
"hex": "a0",
|
||||
"roundtrip": true,
|
||||
"decoded": {
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "ogECAwQ=",
|
||||
"hex": "a201020304",
|
||||
"roundtrip": true,
|
||||
"skip": true,
|
||||
"diagnostic": "{1: 2, 3: 4}"
|
||||
},
|
||||
{
|
||||
"cbor": "omFhAWFiggID",
|
||||
"hex": "a26161016162820203",
|
||||
"roundtrip": true,
|
||||
"decoded": {
|
||||
"a": 1,
|
||||
"b": [
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "gmFhoWFiYWM=",
|
||||
"hex": "826161a161626163",
|
||||
"roundtrip": true,
|
||||
"decoded": [
|
||||
"a",
|
||||
{
|
||||
"b": "c"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "pWFhYUFhYmFCYWNhQ2FkYURhZWFF",
|
||||
"hex": "a56161614161626142616361436164614461656145",
|
||||
"roundtrip": true,
|
||||
"decoded": {
|
||||
"a": "A",
|
||||
"b": "B",
|
||||
"c": "C",
|
||||
"d": "D",
|
||||
"e": "E"
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "X0IBAkMDBAX/",
|
||||
"hex": "5f42010243030405ff",
|
||||
"roundtrip": false,
|
||||
"skip": true,
|
||||
"diagnostic": "(_ h'0102', h'030405')"
|
||||
},
|
||||
{
|
||||
"cbor": "f2VzdHJlYWRtaW5n/w==",
|
||||
"hex": "7f657374726561646d696e67ff",
|
||||
"roundtrip": false,
|
||||
"decoded": "streaming"
|
||||
},
|
||||
{
|
||||
"cbor": "n/8=",
|
||||
"hex": "9fff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "nwGCAgOfBAX//w==",
|
||||
"hex": "9f018202039f0405ffff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "nwGCAgOCBAX/",
|
||||
"hex": "9f01820203820405ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwGCAgOfBAX/",
|
||||
"hex": "83018202039f0405ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "gwGfAgP/ggQF",
|
||||
"hex": "83019f0203ff820405",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
[
|
||||
2,
|
||||
3
|
||||
],
|
||||
[
|
||||
4,
|
||||
5
|
||||
]
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "nwECAwQFBgcICQoLDA0ODxAREhMUFRYXGBgYGf8=",
|
||||
"hex": "9f0102030405060708090a0b0c0d0e0f101112131415161718181819ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
5,
|
||||
6,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
11,
|
||||
12,
|
||||
13,
|
||||
14,
|
||||
15,
|
||||
16,
|
||||
17,
|
||||
18,
|
||||
19,
|
||||
20,
|
||||
21,
|
||||
22,
|
||||
23,
|
||||
24,
|
||||
25
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "v2FhAWFinwID//8=",
|
||||
"hex": "bf61610161629f0203ffff",
|
||||
"roundtrip": false,
|
||||
"decoded": {
|
||||
"a": 1,
|
||||
"b": [
|
||||
2,
|
||||
3
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"cbor": "gmFhv2FiYWP/",
|
||||
"hex": "826161bf61626163ff",
|
||||
"roundtrip": false,
|
||||
"decoded": [
|
||||
"a",
|
||||
{
|
||||
"b": "c"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"cbor": "v2NGdW71Y0FtdCH/",
|
||||
"hex": "bf6346756ef563416d7421ff",
|
||||
"roundtrip": false,
|
||||
"decoded": {
|
||||
"Fun": true,
|
||||
"Amt": -2
|
||||
}
|
||||
}
|
||||
]
|
||||
138
vendor/github.com/ugorji/go/codec/test.py
generated
vendored
Normal file
138
vendor/github.com/ugorji/go/codec/test.py
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# This will create golden files in a directory passed to it.
|
||||
# A Test calls this internally to create the golden files
|
||||
# So it can process them (so we don't have to checkin the files).
|
||||
|
||||
# Ensure msgpack-python and cbor are installed first, using:
|
||||
# sudo apt install python-dev (may not be necessary)
|
||||
# sudo apt install python-pip # or python3-pip
|
||||
# pip install --user msgpack-python msgpack-rpc-python cbor
|
||||
|
||||
# Ensure all "string" keys are utf strings (else encoded as bytes)
|
||||
|
||||
from __future__ import print_function
|
||||
import cbor, msgpack, msgpackrpc, sys, os, threading
|
||||
|
||||
mylocaladdr="127.0.0.1" # localhost.localdomain localhost 127.0.0.1
|
||||
|
||||
def get_test_data_list():
|
||||
# get list with all primitive types, and a combo type
|
||||
l0 = [
|
||||
-8,
|
||||
-1616,
|
||||
-32323232,
|
||||
-6464646464646464,
|
||||
192,
|
||||
1616,
|
||||
32323232,
|
||||
6464646464646464,
|
||||
192,
|
||||
-3232.0,
|
||||
-6464646464.0,
|
||||
3232.0,
|
||||
6464.0,
|
||||
6464646464.0,
|
||||
160.0,
|
||||
1616.0,
|
||||
False,
|
||||
True,
|
||||
u"null",
|
||||
None,
|
||||
u"some&day>some<day",
|
||||
1328176922000002000,
|
||||
u"",
|
||||
-2206187877999998000,
|
||||
u"bytestring",
|
||||
270,
|
||||
u"none",
|
||||
-2013855847999995777,
|
||||
#-6795364578871345152,
|
||||
]
|
||||
l1 = [
|
||||
{ "true": True,
|
||||
"false": False },
|
||||
{ "true": u"True",
|
||||
"false": False,
|
||||
"uint16(1616)": 1616 },
|
||||
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
|
||||
"int32":32323232, "bool": True,
|
||||
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
|
||||
"SHORT STRING": u"1234567890" },
|
||||
{ True: "true", 138: False, "false": 200 }
|
||||
]
|
||||
|
||||
l = []
|
||||
l.extend(l0)
|
||||
l.append(l0)
|
||||
l.append(1)
|
||||
l.extend(l1)
|
||||
return l
|
||||
|
||||
def build_test_data(destdir):
|
||||
l = get_test_data_list()
|
||||
for i in range(len(l)):
|
||||
# packer = msgpack.Packer()
|
||||
serialized = msgpack.dumps(l[i])
|
||||
with open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb') as f:
|
||||
f.write(serialized)
|
||||
serialized = cbor.dumps(l[i])
|
||||
with open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb') as f:
|
||||
f.write(serialized)
|
||||
|
||||
def doRpcServer(port, stopTimeSec):
|
||||
class EchoHandler(object):
|
||||
def Echo123(self, msg1, msg2, msg3):
|
||||
return ("1:%s 2:%s 3:%s" % (msg1.decode("utf-8"), msg2.decode("utf-8"), msg3.decode("utf-8")))
|
||||
def EchoStruct(self, msg):
|
||||
return ("%s" % msg)
|
||||
|
||||
addr = msgpackrpc.Address(mylocaladdr, port)
|
||||
server = msgpackrpc.Server(EchoHandler())
|
||||
server.listen(addr)
|
||||
# run thread to stop it after stopTimeSec seconds if > 0
|
||||
if stopTimeSec > 0:
|
||||
def myStopRpcServer():
|
||||
server.stop()
|
||||
t = threading.Timer(stopTimeSec, myStopRpcServer)
|
||||
t.start()
|
||||
server.start()
|
||||
|
||||
def doRpcClientToPythonSvc(port):
|
||||
address = msgpackrpc.Address(mylocaladdr, port)
|
||||
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
|
||||
print(client.call("Echo123", "A1", "B2", "C3"))
|
||||
print(client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}))
|
||||
|
||||
# def doCheckSocket(port):
|
||||
# print(">>>> port: ", port, " <<<<<")
|
||||
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
# result = sock.connect_ex(('127.0.0.1', port))
|
||||
# if result == 0:
|
||||
# print("\t>>>> Port is open")
|
||||
# else:
|
||||
# print("\t>>>> Port is not open")
|
||||
# sock.close()
|
||||
|
||||
def doRpcClientToGoSvc(port):
|
||||
# doCheckSocket(port)
|
||||
address = msgpackrpc.Address(mylocaladdr, port)
|
||||
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
|
||||
print(client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"]))
|
||||
print(client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"}))
|
||||
|
||||
def doMain(args):
|
||||
if len(args) == 2 and args[0] == "testdata":
|
||||
build_test_data(args[1])
|
||||
elif len(args) == 3 and args[0] == "rpc-server":
|
||||
doRpcServer(int(args[1]), int(args[2]))
|
||||
elif len(args) == 2 and args[0] == "rpc-client-python-service":
|
||||
doRpcClientToPythonSvc(int(args[1]))
|
||||
elif len(args) == 2 and args[0] == "rpc-client-go-service":
|
||||
doRpcClientToGoSvc(int(args[1]))
|
||||
else:
|
||||
print("Usage: test.py " +
|
||||
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
|
||||
|
||||
if __name__ == "__main__":
|
||||
doMain(sys.argv[1:])
|
||||
239
vendor/github.com/ugorji/go/codec/writer.go
generated
vendored
Normal file
239
vendor/github.com/ugorji/go/codec/writer.go
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
// Copyright (c) 2012-2020 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
package codec
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const maxConsecutiveEmptyWrites = 16 // 2 is sufficient, 16 is enough, 64 is optimal
|
||||
|
||||
// encWriter abstracts writing to a byte array or to an io.Writer.
|
||||
type encWriterI interface {
|
||||
writeb([]byte)
|
||||
writestr(string)
|
||||
writeqstr(string) // write string wrapped in quotes ie "..."
|
||||
writen1(byte)
|
||||
|
||||
// add convenience functions for writing 2,4
|
||||
writen2(byte, byte)
|
||||
writen4([4]byte)
|
||||
writen8([8]byte)
|
||||
|
||||
// isBytes() bool
|
||||
end()
|
||||
|
||||
resetIO(w io.Writer, bufsize int, blist *bytesFreeList)
|
||||
resetBytes(in []byte, out *[]byte)
|
||||
}
|
||||
|
||||
// ---------------------------------------------
|
||||
|
||||
type bufioEncWriter struct {
|
||||
w io.Writer
|
||||
|
||||
buf []byte
|
||||
|
||||
n int
|
||||
|
||||
b [16]byte // scratch buffer and padding (cache-aligned)
|
||||
}
|
||||
|
||||
// MARKER: use setByteAt/byteAt to elide the bounds-checks
|
||||
// when we are sure that we don't go beyond the bounds.
|
||||
|
||||
func (z *bufioEncWriter) resetBytes(in []byte, out *[]byte) {
|
||||
halt.errorStr("resetBytes is unsupported by bufioEncWriter")
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) {
|
||||
z.w = w
|
||||
z.n = 0
|
||||
// use minimum bufsize of 16, matching the array z.b and accommodating writen methods (where n <= 8)
|
||||
bufsize = max(16, bufsize) // max(byteBufSize, bufsize)
|
||||
if cap(z.buf) < bufsize {
|
||||
if len(z.buf) > 0 && &z.buf[0] != &z.b[0] {
|
||||
blist.put(z.buf)
|
||||
}
|
||||
if len(z.b) > bufsize {
|
||||
z.buf = z.b[:]
|
||||
} else {
|
||||
z.buf = blist.get(bufsize)
|
||||
}
|
||||
}
|
||||
z.buf = z.buf[:cap(z.buf)]
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) flushErr() (err error) {
|
||||
var n int
|
||||
for i := maxConsecutiveEmptyReads; i > 0; i-- {
|
||||
n, err = z.w.Write(z.buf[:z.n])
|
||||
z.n -= n
|
||||
if z.n == 0 || err != nil {
|
||||
return
|
||||
}
|
||||
// at this point: z.n > 0 && err == nil
|
||||
if n > 0 {
|
||||
copy(z.buf, z.buf[n:z.n+n])
|
||||
}
|
||||
}
|
||||
return io.ErrShortWrite // OR io.ErrNoProgress: not enough (or no) data written
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) flush() {
|
||||
halt.onerror(z.flushErr())
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) writeb(s []byte) {
|
||||
LOOP:
|
||||
a := len(z.buf) - z.n
|
||||
if len(s) > a {
|
||||
z.n += copy(z.buf[z.n:], s[:a])
|
||||
s = s[a:]
|
||||
z.flush()
|
||||
goto LOOP
|
||||
}
|
||||
z.n += copy(z.buf[z.n:], s)
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) writestr(s string) {
|
||||
// z.writeb(bytesView(s)) // inlined below
|
||||
LOOP:
|
||||
a := len(z.buf) - z.n
|
||||
if len(s) > a {
|
||||
z.n += copy(z.buf[z.n:], s[:a])
|
||||
s = s[a:]
|
||||
z.flush()
|
||||
goto LOOP
|
||||
}
|
||||
z.n += copy(z.buf[z.n:], s)
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) writeqstr(s string) {
|
||||
// z.writen1('"')
|
||||
// z.writestr(s)
|
||||
// z.writen1('"')
|
||||
|
||||
if z.n+len(s)+2 > len(z.buf) {
|
||||
z.flush()
|
||||
}
|
||||
setByteAt(z.buf, uint(z.n), '"')
|
||||
// z.buf[z.n] = '"'
|
||||
z.n++
|
||||
LOOP:
|
||||
a := len(z.buf) - z.n
|
||||
if len(s)+1 > a {
|
||||
z.n += copy(z.buf[z.n:], s[:a])
|
||||
s = s[a:]
|
||||
z.flush()
|
||||
goto LOOP
|
||||
}
|
||||
z.n += copy(z.buf[z.n:], s)
|
||||
setByteAt(z.buf, uint(z.n), '"')
|
||||
// z.buf[z.n] = '"'
|
||||
z.n++
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) writen1(b1 byte) {
|
||||
if 1 > len(z.buf)-z.n {
|
||||
z.flush()
|
||||
}
|
||||
setByteAt(z.buf, uint(z.n), b1)
|
||||
// z.buf[z.n] = b1
|
||||
z.n++
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) writen2(b1, b2 byte) {
|
||||
if 2 > len(z.buf)-z.n {
|
||||
z.flush()
|
||||
}
|
||||
setByteAt(z.buf, uint(z.n+1), b2)
|
||||
setByteAt(z.buf, uint(z.n), b1)
|
||||
// z.buf[z.n+1] = b2
|
||||
// z.buf[z.n] = b1
|
||||
z.n += 2
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) writen4(b [4]byte) {
|
||||
if 4 > len(z.buf)-z.n {
|
||||
z.flush()
|
||||
}
|
||||
// setByteAt(z.buf, uint(z.n+3), b4)
|
||||
// setByteAt(z.buf, uint(z.n+2), b3)
|
||||
// setByteAt(z.buf, uint(z.n+1), b2)
|
||||
// setByteAt(z.buf, uint(z.n), b1)
|
||||
copy(z.buf[z.n:], b[:])
|
||||
z.n += 4
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) writen8(b [8]byte) {
|
||||
if 8 > len(z.buf)-z.n {
|
||||
z.flush()
|
||||
}
|
||||
copy(z.buf[z.n:], b[:])
|
||||
z.n += 8
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) endErr() (err error) {
|
||||
if z.n > 0 {
|
||||
err = z.flushErr()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (z *bufioEncWriter) end() {
|
||||
halt.onerror(z.endErr())
|
||||
}
|
||||
|
||||
// ---------------------------------------------
|
||||
|
||||
var bytesEncAppenderDefOut = []byte{}
|
||||
|
||||
// bytesEncAppender implements encWriter and can write to an byte slice.
|
||||
type bytesEncAppender struct {
|
||||
b []byte
|
||||
out *[]byte
|
||||
}
|
||||
|
||||
func (z *bytesEncAppender) writeb(s []byte) {
|
||||
z.b = append(z.b, s...)
|
||||
}
|
||||
func (z *bytesEncAppender) writestr(s string) {
|
||||
z.b = append(z.b, s...)
|
||||
}
|
||||
func (z *bytesEncAppender) writeqstr(s string) {
|
||||
z.b = append(append(append(z.b, '"'), s...), '"')
|
||||
// z.b = append(z.b, '"')
|
||||
// z.b = append(z.b, s...)
|
||||
// z.b = append(z.b, '"')
|
||||
}
|
||||
func (z *bytesEncAppender) writen1(b1 byte) {
|
||||
z.b = append(z.b, b1)
|
||||
}
|
||||
func (z *bytesEncAppender) writen2(b1, b2 byte) {
|
||||
z.b = append(z.b, b1, b2)
|
||||
}
|
||||
|
||||
func (z *bytesEncAppender) writen4(b [4]byte) {
|
||||
z.b = append(z.b, b[:]...)
|
||||
// z.b = append(z.b, b1, b2, b3, b4) // prevents inlining encWr.writen4
|
||||
}
|
||||
|
||||
func (z *bytesEncAppender) writen8(b [8]byte) {
|
||||
z.b = append(z.b, b[:]...)
|
||||
// z.b = append(z.b, b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7])
|
||||
}
|
||||
|
||||
func (z *bytesEncAppender) end() {
|
||||
*(z.out) = z.b
|
||||
}
|
||||
|
||||
func (z *bytesEncAppender) resetBytes(in []byte, out *[]byte) {
|
||||
z.b = in[:0]
|
||||
z.out = out
|
||||
}
|
||||
|
||||
func (z *bytesEncAppender) resetIO(w io.Writer, bufsize int, blist *bytesFreeList) {
|
||||
halt.errorStr("resetIO is unsupported by bytesEncAppender")
|
||||
}
|
||||
Reference in New Issue
Block a user